Skip to content

Commit

Permalink
[PATCH] x86_64: Convert page fault error codes to symbolic constants.
Browse files Browse the repository at this point in the history
Much better to deal with these than with the magic numbers.

And remove the comment describing the bits - kernel source
is no replacement for an architecture manual.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Andi Kleen authored and Linus Torvalds committed Jan 12, 2006
1 parent bf2fcc6 commit 66c5815
Showing 1 changed file with 17 additions and 17 deletions.
34 changes: 17 additions & 17 deletions arch/x86_64/mm/fault.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,6 +35,13 @@
#include <asm-generic/sections.h>
#include <asm/kdebug.h>

/* Page fault error code bits */
#define PF_PROT (1<<0) /* or no page found */
#define PF_WRITE (1<<1)
#define PF_USER (1<<2)
#define PF_RSVD (1<<3)
#define PF_INSTR (1<<4)

void bust_spinlocks(int yes)
{
int loglevel_save = console_loglevel;
Expand Down Expand Up @@ -68,7 +75,7 @@ static noinline int is_prefetch(struct pt_regs *regs, unsigned long addr,
unsigned char *max_instr;

/* If it was a exec fault ignore */
if (error_code & (1<<4))
if (error_code & PF_INSTR)
return 0;

instr = (unsigned char *)convert_rip_to_linear(current, regs);
Expand Down Expand Up @@ -293,13 +300,6 @@ int exception_trace = 1;
* This routine handles page faults. It determines the address,
* and the problem, and then passes it off to one of the appropriate
* routines.
*
* error_code:
* bit 0 == 0 means no page found, 1 means protection fault
* bit 1 == 0 means read, 1 means write
* bit 2 == 0 means kernel, 1 means user-mode
* bit 3 == 1 means use of reserved bit detected
* bit 4 == 1 means fault was an instruction fetch
*/
asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
unsigned long error_code)
Expand Down Expand Up @@ -350,7 +350,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* is always initialized because it's shared with the main
* kernel text. Only vmalloc may need PML4 syncups.
*/
if (!(error_code & 0xd) &&
if (!(error_code & (PF_RSVD|PF_USER|PF_PROT)) &&
((address >= VMALLOC_START && address < VMALLOC_END))) {
if (vmalloc_fault(address) < 0)
goto bad_area_nosemaphore;
Expand All @@ -363,7 +363,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
goto bad_area_nosemaphore;
}

if (unlikely(error_code & (1 << 3)))
if (unlikely(error_code & PF_RSVD))
pgtable_bad(address, regs, error_code);

/*
Expand All @@ -390,7 +390,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
* thus avoiding the deadlock.
*/
if (!down_read_trylock(&mm->mmap_sem)) {
if ((error_code & 4) == 0 &&
if ((error_code & PF_USER) == 0 &&
!search_exception_tables(regs->rip))
goto bad_area_nosemaphore;
down_read(&mm->mmap_sem);
Expand All @@ -417,17 +417,17 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
good_area:
info.si_code = SEGV_ACCERR;
write = 0;
switch (error_code & 3) {
switch (error_code & (PF_PROT|PF_WRITE)) {
default: /* 3: write, present */
/* fall through */
case 2: /* write, not present */
case PF_WRITE: /* write, not present */
if (!(vma->vm_flags & VM_WRITE))
goto bad_area;
write++;
break;
case 1: /* read, present */
case PF_PROT: /* read, present */
goto bad_area;
case 0: /* read, not present */
case 0: /* read, not present */
if (!(vma->vm_flags & (VM_READ | VM_EXEC)))
goto bad_area;
}
Expand Down Expand Up @@ -462,7 +462,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,

bad_area_nosemaphore:
/* User mode accesses just cause a SIGSEGV */
if (error_code & 4) {
if (error_code & PF_USER) {
if (is_prefetch(regs, address, error_code))
return;

Expand Down Expand Up @@ -558,7 +558,7 @@ asmlinkage void __kprobes do_page_fault(struct pt_regs *regs,
up_read(&mm->mmap_sem);

/* Kernel mode? Handle exceptions or die */
if (!(error_code & 4))
if (!(error_code & PF_USER))
goto no_context;

tsk->thread.cr2 = address;
Expand Down

0 comments on commit 66c5815

Please sign in to comment.