Skip to content

Commit

Permalink
[PATCH] mm: pagefault_{disable,enable}()
Browse files Browse the repository at this point in the history
Introduce pagefault_{disable,enable}() and use these where previously we did
manual preempt increments/decrements to make the pagefault handler do the
atomic thing.

Currently they still rely on the increased preempt count, but do not rely on
the disabled preemption, this might go away in the future.

(NOTE: the extra barrier() in pagefault_disable might fix some holes on
       machines which have too many registers for their own good)

[heiko.carstens@de.ibm.com: s390 fix]
Signed-off-by: Peter Zijlstra <a.p.zijlstra@chello.nl>
Acked-by: Nick Piggin <npiggin@suse.de>
Cc: Martin Schwidefsky <schwidefsky@de.ibm.com>
Signed-off-by: Heiko Carstens <heiko.carstens@de.ibm.com>
Signed-off-by: Andrew Morton <akpm@osdl.org>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
  • Loading branch information
Peter Zijlstra authored and Linus Torvalds committed Dec 7, 2006
1 parent 6edaf68 commit a866374
Show file tree
Hide file tree
Showing 17 changed files with 88 additions and 62 deletions.
4 changes: 2 additions & 2 deletions arch/frv/kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,7 +200,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -223,7 +223,7 @@ int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
break;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
10 changes: 4 additions & 6 deletions arch/i386/mm/highmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;

/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

Expand All @@ -52,8 +52,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)

#ifdef CONFIG_DEBUG_HIGHMEM
if (vaddr >= PAGE_OFFSET && vaddr < (unsigned long)high_memory) {
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
return;
}

Expand All @@ -68,8 +67,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
*/
kpte_clear_flush(kmap_pte-idx, vaddr);

dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

/* This is the same as kmap_atomic() but can map memory that doesn't
Expand All @@ -80,7 +78,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;

inc_preempt_count();
pagefault_disable();

idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Expand Down
10 changes: 4 additions & 6 deletions arch/mips/mm/highmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ void *__kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;

/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

Expand All @@ -62,8 +62,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
enum fixed_addresses idx = type + KM_TYPE_NR*smp_processor_id();

if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
return;
}

Expand All @@ -78,8 +77,7 @@ void __kunmap_atomic(void *kvaddr, enum km_type type)
local_flush_tlb_one(vaddr);
#endif

dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

#ifndef CONFIG_LIMITED_DMA
Expand All @@ -92,7 +90,7 @@ void *kmap_atomic_pfn(unsigned long pfn, enum km_type type)
enum fixed_addresses idx;
unsigned long vaddr;

inc_preempt_count();
pagefault_disable();

idx = type + KM_TYPE_NR*smp_processor_id();
vaddr = __fix_to_virt(FIX_KMAP_BEGIN + idx);
Expand Down
6 changes: 3 additions & 3 deletions arch/s390/lib/uaccess_std.c
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@

#include <linux/errno.h>
#include <linux/mm.h>
#include <asm/uaccess.h>
#include <linux/uaccess.h>
#include <asm/futex.h>

#ifndef __s390x__
Expand Down Expand Up @@ -258,7 +258,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
{
int oldval = 0, newval, ret;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -284,7 +284,7 @@ int futex_atomic_op(int op, int __user *uaddr, int oparg, int *old)
default:
ret = -ENOSYS;
}
dec_preempt_count();
pagefault_enable();
*old = oldval;
return ret;
}
Expand Down
8 changes: 3 additions & 5 deletions arch/sparc/mm/highmem.c
Original file line number Diff line number Diff line change
Expand Up @@ -35,7 +35,7 @@ void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;

/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

Expand Down Expand Up @@ -70,8 +70,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned long idx = type + KM_TYPE_NR*smp_processor_id();

if (vaddr < FIXADDR_START) { // FIXME
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
return;
}

Expand All @@ -97,8 +96,7 @@ void kunmap_atomic(void *kvaddr, enum km_type type)
#endif
#endif

dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

/* We may be fed a pagetable here by ptep_to_xxx and others. */
Expand Down
5 changes: 2 additions & 3 deletions include/asm-frv/highmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -115,7 +115,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
{
unsigned long paddr;

inc_preempt_count();
pagefault_disable();
paddr = page_to_phys(page);

switch (type) {
Expand Down Expand Up @@ -170,8 +170,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
default:
BUG();
}
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

#endif /* !__ASSEMBLY__ */
Expand Down
4 changes: 2 additions & 2 deletions include/asm-generic/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-i386/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

if (op == FUTEX_OP_SET)
__futex_atomic_op1("xchgl %0, %2", ret, oldval, uaddr, oparg);
Expand Down Expand Up @@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
}
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-ia64/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -59,7 +59,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -83,7 +83,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-mips/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -88,7 +88,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -115,7 +115,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-parisc/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -33,7 +33,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-powerpc/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
8 changes: 3 additions & 5 deletions include/asm-ppc/highmem.h
Original file line number Diff line number Diff line change
Expand Up @@ -79,7 +79,7 @@ static inline void *kmap_atomic(struct page *page, enum km_type type)
unsigned long vaddr;

/* even !CONFIG_PREEMPT needs this, for in_atomic in do_page_fault */
inc_preempt_count();
pagefault_disable();
if (!PageHighMem(page))
return page_address(page);

Expand All @@ -101,8 +101,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
unsigned int idx = type + KM_TYPE_NR*smp_processor_id();

if (vaddr < KMAP_FIX_BEGIN) { // FIXME
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
return;
}

Expand All @@ -115,8 +114,7 @@ static inline void kunmap_atomic(void *kvaddr, enum km_type type)
pte_clear(&init_mm, vaddr, kmap_pte+idx);
flush_tlb_page(NULL, vaddr);
#endif
dec_preempt_count();
preempt_check_resched();
pagefault_enable();
}

static inline struct page *kmap_atomic_to_page(void *ptr)
Expand Down
4 changes: 2 additions & 2 deletions include/asm-sparc64/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28))
oparg = 1 << oparg;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -67,7 +67,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
4 changes: 2 additions & 2 deletions include/asm-x86_64/futex.h
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int)))
return -EFAULT;

inc_preempt_count();
pagefault_disable();

switch (op) {
case FUTEX_OP_SET:
Expand All @@ -78,7 +78,7 @@ futex_atomic_op_inuser (int encoded_op, int __user *uaddr)
ret = -ENOSYS;
}

dec_preempt_count();
pagefault_enable();

if (!ret) {
switch (cmp) {
Expand Down
39 changes: 37 additions & 2 deletions include/linux/uaccess.h
Original file line number Diff line number Diff line change
@@ -1,8 +1,43 @@
#ifndef __LINUX_UACCESS_H__
#define __LINUX_UACCESS_H__

#include <linux/preempt.h>
#include <asm/uaccess.h>

/*
* These routines enable/disable the pagefault handler in that
* it will not take any locks and go straight to the fixup table.
*
* They have great resemblance to the preempt_disable/enable calls
* and in fact they are identical; this is because currently there is
* no other way to make the pagefault handlers do this. So we do
* disable preemption but we don't necessarily care about that.
*/
static inline void pagefault_disable(void)
{
inc_preempt_count();
/*
* make sure to have issued the store before a pagefault
* can hit.
*/
barrier();
}

static inline void pagefault_enable(void)
{
/*
* make sure to issue those last loads/stores before enabling
* the pagefault handler again.
*/
barrier();
dec_preempt_count();
/*
* make sure we do..
*/
barrier();
preempt_check_resched();
}

#ifndef ARCH_HAS_NOCACHE_UACCESS

static inline unsigned long __copy_from_user_inatomic_nocache(void *to,
Expand Down Expand Up @@ -35,9 +70,9 @@ static inline unsigned long __copy_from_user_nocache(void *to,
({ \
long ret; \
\
inc_preempt_count(); \
pagefault_disable(); \
ret = __get_user(retval, addr); \
dec_preempt_count(); \
pagefault_enable(); \
ret; \
})

Expand Down
Loading

0 comments on commit a866374

Please sign in to comment.