Skip to content

Commit

Permalink
x86: do_page_fault small unification
Browse files Browse the repository at this point in the history
Copy the prefetch of map_sem from X86_64 and move the check
notify_page_fault (soon to be kprobe_handle_fault) out of
the unlikely if() statement.

This makes the X86_32|64 pagefault handlers closer to each
other.

Signed-off-by: Harvey Harrison <harvey.harrison@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
  • Loading branch information
Harvey Harrison authored and Ingo Molnar committed Jan 30, 2008
1 parent f2857ce commit 608566b
Show file tree
Hide file tree
Showing 2 changed files with 9 additions and 14 deletions.
16 changes: 7 additions & 9 deletions arch/x86/mm/fault_32.c
Original file line number Diff line number Diff line change
Expand Up @@ -295,13 +295,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
*/
trace_hardirqs_fixup();

tsk = current;
mm = tsk->mm;
prefetchw(&mm->mmap_sem);

/* get the address */
address = read_cr2();

tsk = current;

si_code = SEGV_MAPERR;

if (notify_page_fault(regs))
return;

/*
* We fault-in kernel-space virtual memory on-demand. The
* 'reference' page table is init_mm.pgd.
Expand All @@ -319,25 +324,18 @@ void __kprobes do_page_fault(struct pt_regs *regs, unsigned long error_code)
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
vmalloc_fault(address) >= 0)
return;
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
*/
goto bad_area_nosemaphore;
}

if (notify_page_fault(regs))
return;

/* It's safe to allow irq's after cr2 has been saved and the vmalloc
fault has been handled. */
if (regs->flags & (X86_EFLAGS_IF|VM_MASK))
local_irq_enable();

mm = tsk->mm;

/*
* If we're in an interrupt, have no user context or are running in an
* atomic region then we must not take the fault.
Expand Down
7 changes: 2 additions & 5 deletions arch/x86/mm/fault_64.c
Original file line number Diff line number Diff line change
Expand Up @@ -355,6 +355,8 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,

si_code = SEGV_MAPERR;

if (notify_page_fault(regs))
return;

/*
* We fault-in kernel-space virtual memory on-demand. The
Expand All @@ -380,18 +382,13 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
if (vmalloc_fault(address) >= 0)
return;
}
if (notify_page_fault(regs))
return;
/*
* Don't take the mm semaphore here. If we fixup a prefetch
* fault we could otherwise deadlock.
*/
goto bad_area_nosemaphore;
}

if (notify_page_fault(regs))
return;

if (likely(regs->flags & X86_EFLAGS_IF))
local_irq_enable();

Expand Down

0 comments on commit 608566b

Please sign in to comment.