diff --git a/[refs] b/[refs] index a0c077f5dcf4..97cd39a6089a 100644 --- a/[refs] +++ b/[refs] @@ -1,2 +1,2 @@ --- -refs/heads/master: 8d7718aa082aaf30a0b4989e1f04858952f941bc +refs/heads/master: 2e12978a9f7a7abd54e8eb9ce70a7718767b8b2c diff --git a/trunk/arch/alpha/include/asm/futex.h b/trunk/arch/alpha/include/asm/futex.h index e8a761aee088..945de222ab91 100644 --- a/trunk/arch/alpha/include/asm/futex.h +++ b/trunk/arch/alpha/include/asm/futex.h @@ -29,7 +29,7 @@ : "r" (uaddr), "r"(oparg) \ : "memory") -static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -81,23 +81,21 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0, cmp; - u32 prev; + int prev, cmp; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__ ( __ASM_SMP_MB - "1: ldl_l %1,0(%3)\n" - " cmpeq %1,%4,%2\n" - " beq %2,3f\n" - " mov %5,%2\n" - "2: stl_c %2,0(%3)\n" - " beq %2,4f\n" + "1: ldl_l %0,0(%2)\n" + " cmpeq %0,%3,%1\n" + " beq %1,3f\n" + " mov %4,%1\n" + "2: stl_c %1,0(%2)\n" + " beq %1,4f\n" "3: .subsection 2\n" "4: br 1b\n" " .previous\n" @@ -107,12 +105,11 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .long 2b-.\n" " lda $31,3b-2b(%0)\n" " .previous\n" - : "+r"(ret), "=&r"(prev), "=&r"(cmp) + : "=&r"(prev), "=&r"(cmp) : "r"(uaddr), "r"((long)oldval), "r"(newval) : "memory"); - *uval = prev; - return ret; + return prev; } #endif /* __KERNEL__ */ diff --git a/trunk/arch/arm/include/asm/futex.h b/trunk/arch/arm/include/asm/futex.h index 0e29d8e6a5c2..b33fe7065b38 100644 --- a/trunk/arch/arm/include/asm/futex.h +++ b/trunk/arch/arm/include/asm/futex.h @@ -35,7 +35,7 @@ : "cc", "memory") static inline int -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -46,7 +46,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); /* implies preempt_disable() */ @@ -88,38 +88,36 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0; - u32 val; + int val; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - /* Note that preemption is disabled by futex_atomic_cmpxchg_inatomic - * call sites. */ + pagefault_disable(); /* implies preempt_disable() */ __asm__ __volatile__("@futex_atomic_cmpxchg_inatomic\n" - "1: " T(ldr) " %1, [%4]\n" - " teq %1, %2\n" + "1: " T(ldr) " %0, [%3]\n" + " teq %0, %1\n" " it eq @ explicit IT needed for the 2b label\n" - "2: " T(streq) " %3, [%4]\n" + "2: " T(streq) " %2, [%3]\n" "3:\n" " .pushsection __ex_table,\"a\"\n" " .align 3\n" " .long 1b, 4f, 2b, 4f\n" " .popsection\n" " .pushsection .fixup,\"ax\"\n" - "4: mov %0, %5\n" + "4: mov %0, %4\n" " b 3b\n" " .popsection" - : "+r" (ret), "=&r" (val) + : "=&r" (val) : "r" (oldval), "r" (newval), "r" (uaddr), "Ir" (-EFAULT) : "cc", "memory"); - *uval = val; - return ret; + pagefault_enable(); /* subsumes preempt_enable() */ + + return val; } #endif /* !SMP */ diff --git a/trunk/arch/frv/include/asm/futex.h b/trunk/arch/frv/include/asm/futex.h index 4bea27f50a7a..08b3d1da3583 100644 --- a/trunk/arch/frv/include/asm/futex.h +++ b/trunk/arch/frv/include/asm/futex.h @@ -7,11 +7,10 @@ #include #include -extern int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr); +extern int futex_atomic_op_inuser(int encoded_op, int __user *uaddr); static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { return -ENOSYS; } diff --git a/trunk/arch/frv/kernel/futex.c b/trunk/arch/frv/kernel/futex.c index d155ca9e5098..14f64b054c7e 100644 --- a/trunk/arch/frv/kernel/futex.c +++ b/trunk/arch/frv/kernel/futex.c @@ -18,7 +18,7 @@ * the various futex operations; MMU fault checking is ignored under no-MMU * conditions */ -static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; @@ -50,7 +50,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; @@ -83,7 +83,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; @@ -116,7 +116,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, int *_ol return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; @@ -149,7 +149,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, int *_o return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_oldval) +static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *_oldval) { int oldval, ret; @@ -186,7 +186,7 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, int *_o /* * do the futex operations */ -int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -197,7 +197,7 @@ int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); diff --git a/trunk/arch/ia64/include/asm/futex.h b/trunk/arch/ia64/include/asm/futex.h index 8428525ddb22..c7f0f062239c 100644 --- a/trunk/arch/ia64/include/asm/futex.h +++ b/trunk/arch/ia64/include/asm/futex.h @@ -46,7 +46,7 @@ do { \ } while (0) static inline int -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -56,7 +56,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -100,26 +100,23 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; { - register unsigned long r8 __asm ("r8") = 0; - unsigned long prev; + register unsigned long r8 __asm ("r8"); __asm__ __volatile__( " mf;; \n" " mov ar.ccv=%3;; \n" "[1:] cmpxchg4.acq %0=[%1],%2,ar.ccv \n" " .xdata4 \"__ex_table\", 1b-., 2f-. \n" "[2:]" - : "=r" (prev) + : "=r" (r8) : "r" (uaddr), "r" (newval), "rO" ((long) (unsigned) oldval) : "memory"); - *uval = prev; return r8; } } diff --git a/trunk/arch/microblaze/include/asm/futex.h b/trunk/arch/microblaze/include/asm/futex.h index b0526d2716fa..ad3fd61b2fe7 100644 --- a/trunk/arch/microblaze/include/asm/futex.h +++ b/trunk/arch/microblaze/include/asm/futex.h @@ -29,7 +29,7 @@ }) static inline int -futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -39,7 +39,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -94,34 +94,31 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0, cmp; - u32 prev; + int prev, cmp; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - __asm__ __volatile__ ("1: lwx %1, %3, r0; \ - cmp %2, %1, %4; \ - beqi %2, 3f; \ - 2: swx %5, %3, r0; \ - addic %2, r0, 0; \ - bnei %2, 1b; \ + __asm__ __volatile__ ("1: lwx %0, %2, r0; \ + cmp %1, %0, %3; \ + beqi %1, 3f; \ + 2: swx %4, %2, r0; \ + addic %1, r0, 0; \ + bnei %1, 1b; \ 3: \ .section .fixup,\"ax\"; \ 4: brid 3b; \ - addik %0, r0, %6; \ + addik %0, r0, %5; \ .previous; \ .section __ex_table,\"a\"; \ .word 1b,4b,2b,4b; \ .previous;" \ - : "+r" (ret), "=&r" (prev), "=&r"(cmp) \ + : "=&r" (prev), "=&r"(cmp) \ : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT)); - *uval = prev; - return ret; + return prev; } #endif /* __KERNEL__ */ diff --git a/trunk/arch/mips/include/asm/futex.h b/trunk/arch/mips/include/asm/futex.h index 6ebf1734b411..b9cce90346cf 100644 --- a/trunk/arch/mips/include/asm/futex.h +++ b/trunk/arch/mips/include/asm/futex.h @@ -75,7 +75,7 @@ } static inline int -futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -85,7 +85,7 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -132,13 +132,11 @@ futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0; - u32 val; + int retval; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; if (cpu_has_llsc && R10000_LLSC_WAR) { @@ -147,25 +145,25 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %1, %3 \n" - " bne %1, %z4, 3f \n" + "1: ll %0, %2 \n" + " bne %0, %z3, 3f \n" " .set mips0 \n" - " move $1, %z5 \n" + " move $1, %z4 \n" " .set mips3 \n" - "2: sc $1, %2 \n" + "2: sc $1, %1 \n" " beqzl $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %6 \n" + "4: li %0, %5 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) + : "=&r" (retval), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else if (cpu_has_llsc) { @@ -174,32 +172,31 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, " .set push \n" " .set noat \n" " .set mips3 \n" - "1: ll %1, %3 \n" - " bne %1, %z4, 3f \n" + "1: ll %0, %2 \n" + " bne %0, %z3, 3f \n" " .set mips0 \n" - " move $1, %z5 \n" + " move $1, %z4 \n" " .set mips3 \n" - "2: sc $1, %2 \n" + "2: sc $1, %1 \n" " beqz $1, 1b \n" __WEAK_LLSC_MB "3: \n" " .set pop \n" " .section .fixup,\"ax\" \n" - "4: li %0, %6 \n" + "4: li %0, %5 \n" " j 3b \n" " .previous \n" " .section __ex_table,\"a\" \n" " "__UA_ADDR "\t1b, 4b \n" " "__UA_ADDR "\t2b, 4b \n" " .previous \n" - : "+r" (ret), "=&r" (val), "=R" (*uaddr) + : "=&r" (retval), "=R" (*uaddr) : "R" (*uaddr), "Jr" (oldval), "Jr" (newval), "i" (-EFAULT) : "memory"); } else return -ENOSYS; - *uval = val; - return ret; + return retval; } #endif diff --git a/trunk/arch/parisc/include/asm/futex.h b/trunk/arch/parisc/include/asm/futex.h index 67a33cc27ef2..0c705c3a55ef 100644 --- a/trunk/arch/parisc/include/asm/futex.h +++ b/trunk/arch/parisc/include/asm/futex.h @@ -8,7 +8,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -51,10 +51,10 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) /* Non-atomic version */ static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - u32 val; + int err = 0; + int uval; /* futex.c wants to do a cmpxchg_inatomic on kernel NULL, which is * our gateway page, and causes no end of trouble... @@ -62,15 +62,15 @@ futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, if (segment_eq(KERNEL_DS, get_fs()) && !uaddr) return -EFAULT; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - if (get_user(val, uaddr)) - return -EFAULT; - if (val == oldval && put_user(newval, uaddr)) - return -EFAULT; - *uval = val; - return 0; + err = get_user(uval, uaddr); + if (err) return -EFAULT; + if (uval == oldval) + err = put_user(newval, uaddr); + if (err) return -EFAULT; + return uval; } #endif /*__KERNEL__*/ diff --git a/trunk/arch/powerpc/include/asm/futex.h b/trunk/arch/powerpc/include/asm/futex.h index c94e4a3fe2ef..7c589ef81fb0 100644 --- a/trunk/arch/powerpc/include/asm/futex.h +++ b/trunk/arch/powerpc/include/asm/futex.h @@ -30,7 +30,7 @@ : "b" (uaddr), "i" (-EFAULT), "r" (oparg) \ : "cr0", "memory") -static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -40,7 +40,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -82,38 +82,35 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0; - u32 prev; + int prev; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; __asm__ __volatile__ ( PPC_RELEASE_BARRIER -"1: lwarx %1,0,%3 # futex_atomic_cmpxchg_inatomic\n\ - cmpw 0,%1,%4\n\ +"1: lwarx %0,0,%2 # futex_atomic_cmpxchg_inatomic\n\ + cmpw 0,%0,%3\n\ bne- 3f\n" - PPC405_ERR77(0,%3) -"2: stwcx. %5,0,%3\n\ + PPC405_ERR77(0,%2) +"2: stwcx. %4,0,%2\n\ bne- 1b\n" PPC_ACQUIRE_BARRIER "3: .section .fixup,\"ax\"\n\ -4: li %0,%6\n\ +4: li %0,%5\n\ b 3b\n\ .previous\n\ .section __ex_table,\"a\"\n\ .align 3\n\ " PPC_LONG "1b,4b,2b,4b\n\ .previous" \ - : "+r" (ret), "=&r" (prev), "+m" (*uaddr) + : "=&r" (prev), "+m" (*uaddr) : "r" (uaddr), "r" (oldval), "r" (newval), "i" (-EFAULT) : "cc", "memory"); - *uval = prev; - return ret; + return prev; } #endif /* __KERNEL__ */ diff --git a/trunk/arch/s390/include/asm/futex.h b/trunk/arch/s390/include/asm/futex.h index 81cf36b691f1..5c5d02de49e9 100644 --- a/trunk/arch/s390/include/asm/futex.h +++ b/trunk/arch/s390/include/asm/futex.h @@ -7,7 +7,7 @@ #include #include -static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -18,7 +18,7 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -39,13 +39,13 @@ static inline int futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, + int oldval, int newval) { - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - return uaccess.futex_atomic_cmpxchg(uval, uaddr, oldval, newval); + return uaccess.futex_atomic_cmpxchg(uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/trunk/arch/s390/include/asm/uaccess.h b/trunk/arch/s390/include/asm/uaccess.h index 2d9ea11f919a..d6b1ed0ec52b 100644 --- a/trunk/arch/s390/include/asm/uaccess.h +++ b/trunk/arch/s390/include/asm/uaccess.h @@ -83,8 +83,8 @@ struct uaccess_ops { size_t (*clear_user)(size_t, void __user *); size_t (*strnlen_user)(size_t, const char __user *); size_t (*strncpy_from_user)(size_t, const char __user *, char *); - int (*futex_atomic_op)(int op, u32 __user *, int oparg, int *old); - int (*futex_atomic_cmpxchg)(u32 *, u32 __user *, u32 old, u32 new); + int (*futex_atomic_op)(int op, int __user *, int oparg, int *old); + int (*futex_atomic_cmpxchg)(int __user *, int old, int new); }; extern struct uaccess_ops uaccess; diff --git a/trunk/arch/s390/lib/uaccess.h b/trunk/arch/s390/lib/uaccess.h index 1d2536cb630b..126011df14f1 100644 --- a/trunk/arch/s390/lib/uaccess.h +++ b/trunk/arch/s390/lib/uaccess.h @@ -12,12 +12,12 @@ extern size_t copy_from_user_std(size_t, const void __user *, void *); extern size_t copy_to_user_std(size_t, void __user *, const void *); extern size_t strnlen_user_std(size_t, const char __user *); extern size_t strncpy_from_user_std(size_t, const char __user *, char *); -extern int futex_atomic_cmpxchg_std(u32 *, u32 __user *, u32, u32); -extern int futex_atomic_op_std(int, u32 __user *, int, int *); +extern int futex_atomic_cmpxchg_std(int __user *, int, int); +extern int futex_atomic_op_std(int, int __user *, int, int *); extern size_t copy_from_user_pt(size_t, const void __user *, void *); extern size_t copy_to_user_pt(size_t, void __user *, const void *); -extern int futex_atomic_op_pt(int, u32 __user *, int, int *); -extern int futex_atomic_cmpxchg_pt(u32 *, u32 __user *, u32, u32); +extern int futex_atomic_op_pt(int, int __user *, int, int *); +extern int futex_atomic_cmpxchg_pt(int __user *, int, int); #endif /* __ARCH_S390_LIB_UACCESS_H */ diff --git a/trunk/arch/s390/lib/uaccess_pt.c b/trunk/arch/s390/lib/uaccess_pt.c index 74833831417f..404f2de296dc 100644 --- a/trunk/arch/s390/lib/uaccess_pt.c +++ b/trunk/arch/s390/lib/uaccess_pt.c @@ -302,7 +302,7 @@ static size_t copy_in_user_pt(size_t n, void __user *to, : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc" ); -static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) +static int __futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -335,7 +335,7 @@ static int __futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) +int futex_atomic_op_pt(int op, int __user *uaddr, int oparg, int *old) { int ret; @@ -354,29 +354,26 @@ int futex_atomic_op_pt(int op, u32 __user *uaddr, int oparg, int *old) return ret; } -static int __futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +static int __futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) { int ret; asm volatile("0: cs %1,%4,0(%5)\n" - "1: la %0,0\n" + "1: lr %0,%1\n" "2:\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); - *uval = oldval; return ret; } -int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +int futex_atomic_cmpxchg_pt(int __user *uaddr, int oldval, int newval) { int ret; if (segment_eq(get_fs(), KERNEL_DS)) - return __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); + return __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); spin_lock(¤t->mm->page_table_lock); uaddr = (int __user *) __dat_user_addr((unsigned long) uaddr); if (!uaddr) { @@ -385,7 +382,7 @@ int futex_atomic_cmpxchg_pt(u32 *uval, u32 __user *uaddr, } get_page(virt_to_page(uaddr)); spin_unlock(¤t->mm->page_table_lock); - ret = __futex_atomic_cmpxchg_pt(uval, uaddr, oldval, newval); + ret = __futex_atomic_cmpxchg_pt(uaddr, oldval, newval); put_page(virt_to_page(uaddr)); return ret; } diff --git a/trunk/arch/s390/lib/uaccess_std.c b/trunk/arch/s390/lib/uaccess_std.c index bb1a7eed42ce..a6c4f7ed24a4 100644 --- a/trunk/arch/s390/lib/uaccess_std.c +++ b/trunk/arch/s390/lib/uaccess_std.c @@ -255,7 +255,7 @@ size_t strncpy_from_user_std(size_t size, const char __user *src, char *dst) : "0" (-EFAULT), "d" (oparg), "a" (uaddr), \ "m" (*uaddr) : "cc"); -int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) +int futex_atomic_op_std(int op, int __user *uaddr, int oparg, int *old) { int oldval = 0, newval, ret; @@ -287,21 +287,19 @@ int futex_atomic_op_std(int op, u32 __user *uaddr, int oparg, int *old) return ret; } -int futex_atomic_cmpxchg_std(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +int futex_atomic_cmpxchg_std(int __user *uaddr, int oldval, int newval) { int ret; asm volatile( " sacf 256\n" "0: cs %1,%4,0(%5)\n" - "1: la %0,0\n" + "1: lr %0,%1\n" "2: sacf 0\n" EX_TABLE(0b,2b) EX_TABLE(1b,2b) : "=d" (ret), "+d" (oldval), "=m" (*uaddr) : "0" (-EFAULT), "d" (newval), "a" (uaddr), "m" (*uaddr) : "cc", "memory" ); - *uval = oldval; return ret; } diff --git a/trunk/arch/sh/include/asm/futex-irq.h b/trunk/arch/sh/include/asm/futex-irq.h index 6cb9f193a95e..a9f16a7f9aea 100644 --- a/trunk/arch/sh/include/asm/futex-irq.h +++ b/trunk/arch/sh/include/asm/futex-irq.h @@ -3,7 +3,7 @@ #include -static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, +static inline int atomic_futex_op_xchg_set(int oparg, int __user *uaddr, int *oldval) { unsigned long flags; @@ -20,7 +20,7 @@ static inline int atomic_futex_op_xchg_set(int oparg, u32 __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, +static inline int atomic_futex_op_xchg_add(int oparg, int __user *uaddr, int *oldval) { unsigned long flags; @@ -37,7 +37,7 @@ static inline int atomic_futex_op_xchg_add(int oparg, u32 __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, +static inline int atomic_futex_op_xchg_or(int oparg, int __user *uaddr, int *oldval) { unsigned long flags; @@ -54,7 +54,7 @@ static inline int atomic_futex_op_xchg_or(int oparg, u32 __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, +static inline int atomic_futex_op_xchg_and(int oparg, int __user *uaddr, int *oldval) { unsigned long flags; @@ -71,7 +71,7 @@ static inline int atomic_futex_op_xchg_and(int oparg, u32 __user *uaddr, return ret; } -static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, +static inline int atomic_futex_op_xchg_xor(int oparg, int __user *uaddr, int *oldval) { unsigned long flags; @@ -88,13 +88,11 @@ static inline int atomic_futex_op_xchg_xor(int oparg, u32 __user *uaddr, return ret; } -static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, - u32 __user *uaddr, - u32 oldval, u32 newval) +static inline int atomic_futex_op_cmpxchg_inatomic(int __user *uaddr, + int oldval, int newval) { unsigned long flags; - int ret; - u32 prev = 0; + int ret, prev = 0; local_irq_save(flags); @@ -104,8 +102,10 @@ static inline int atomic_futex_op_cmpxchg_inatomic(u32 *uval, local_irq_restore(flags); - *uval = prev; - return ret; + if (ret) + return ret; + + return prev; } #endif /* __ASM_SH_FUTEX_IRQ_H */ diff --git a/trunk/arch/sh/include/asm/futex.h b/trunk/arch/sh/include/asm/futex.h index 7be39a646fbd..68256ec5fa35 100644 --- a/trunk/arch/sh/include/asm/futex.h +++ b/trunk/arch/sh/include/asm/futex.h @@ -10,7 +10,7 @@ /* XXX: UP variants, fix for SH-4A and SMP.. */ #include -static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -21,7 +21,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -65,13 +65,12 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - return atomic_futex_op_cmpxchg_inatomic(uval, uaddr, oldval, newval); + return atomic_futex_op_cmpxchg_inatomic(uaddr, oldval, newval); } #endif /* __KERNEL__ */ diff --git a/trunk/arch/sparc/include/asm/futex_64.h b/trunk/arch/sparc/include/asm/futex_64.h index 444e7bea23bc..47f95839dc69 100644 --- a/trunk/arch/sparc/include/asm/futex_64.h +++ b/trunk/arch/sparc/include/asm/futex_64.h @@ -30,7 +30,7 @@ : "r" (uaddr), "r" (oparg), "i" (-EFAULT) \ : "memory") -static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -38,7 +38,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) int cmparg = (encoded_op << 20) >> 20; int oldval = 0, ret, tem; - if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(u32)))) + if (unlikely(!access_ok(VERIFY_WRITE, uaddr, sizeof(int)))) return -EFAULT; if (unlikely((((unsigned long) uaddr) & 0x3UL))) return -EINVAL; @@ -85,30 +85,26 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { - int ret = 0; - __asm__ __volatile__( - "\n1: casa [%4] %%asi, %3, %1\n" + "\n1: casa [%3] %%asi, %2, %0\n" "2:\n" " .section .fixup,#alloc,#execinstr\n" " .align 4\n" "3: sethi %%hi(2b), %0\n" " jmpl %0 + %%lo(2b), %%g0\n" - " mov %5, %0\n" + " mov %4, %0\n" " .previous\n" " .section __ex_table,\"a\"\n" " .align 4\n" " .word 1b, 3b\n" " .previous\n" - : "+r" (ret), "=r" (newval) - : "1" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) + : "=r" (newval) + : "0" (newval), "r" (oldval), "r" (uaddr), "i" (-EFAULT) : "memory"); - *uval = newval; - return ret; + return newval; } #endif /* !(_SPARC64_FUTEX_H) */ diff --git a/trunk/arch/tile/include/asm/futex.h b/trunk/arch/tile/include/asm/futex.h index d03ec124a598..fe0d10dcae57 100644 --- a/trunk/arch/tile/include/asm/futex.h +++ b/trunk/arch/tile/include/asm/futex.h @@ -29,16 +29,16 @@ #include #include -extern struct __get_user futex_set(u32 __user *v, int i); -extern struct __get_user futex_add(u32 __user *v, int n); -extern struct __get_user futex_or(u32 __user *v, int n); -extern struct __get_user futex_andn(u32 __user *v, int n); -extern struct __get_user futex_cmpxchg(u32 __user *v, int o, int n); +extern struct __get_user futex_set(int __user *v, int i); +extern struct __get_user futex_add(int __user *v, int n); +extern struct __get_user futex_or(int __user *v, int n); +extern struct __get_user futex_andn(int __user *v, int n); +extern struct __get_user futex_cmpxchg(int __user *v, int o, int n); #ifndef __tilegx__ -extern struct __get_user futex_xor(u32 __user *v, int n); +extern struct __get_user futex_xor(int __user *v, int n); #else -static inline struct __get_user futex_xor(u32 __user *uaddr, int n) +static inline struct __get_user futex_xor(int __user *uaddr, int n) { struct __get_user asm_ret = __get_user_4(uaddr); if (!asm_ret.err) { @@ -53,7 +53,7 @@ static inline struct __get_user futex_xor(u32 __user *uaddr, int n) } #endif -static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -65,7 +65,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -119,17 +119,16 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, + int newval) { struct __get_user asm_ret; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; asm_ret = futex_cmpxchg(uaddr, oldval, newval); - *uval = asm_ret.val; - return asm_ret.err; + return asm_ret.err ? asm_ret.err : asm_ret.val; } #ifndef __tilegx__ diff --git a/trunk/arch/x86/include/asm/futex.h b/trunk/arch/x86/include/asm/futex.h index d09bb03653f0..1f11ce44e956 100644 --- a/trunk/arch/x86/include/asm/futex.h +++ b/trunk/arch/x86/include/asm/futex.h @@ -37,7 +37,7 @@ "+m" (*uaddr), "=&r" (tem) \ : "r" (oparg), "i" (-EFAULT), "1" (0)) -static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) +static inline int futex_atomic_op_inuser(int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -48,7 +48,7 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) @@ -109,10 +109,9 @@ static inline int futex_atomic_op_inuser(int encoded_op, u32 __user *uaddr) return ret; } -static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +static inline int futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, + int newval) { - int ret = 0; #if defined(CONFIG_X86_32) && !defined(CONFIG_X86_BSWAP) /* Real i386 machines have no cmpxchg instruction */ @@ -120,22 +119,21 @@ static inline int futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, return -ENOSYS; #endif - if (!access_ok(VERIFY_WRITE, uaddr, sizeof(u32))) + if (!access_ok(VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; - asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %4, %2\n" + asm volatile("1:\t" LOCK_PREFIX "cmpxchgl %3, %1\n" "2:\t.section .fixup, \"ax\"\n" - "3:\tmov %3, %0\n" + "3:\tmov %2, %0\n" "\tjmp 2b\n" "\t.previous\n" _ASM_EXTABLE(1b, 3b) - : "+r" (ret), "=a" (oldval), "+m" (*uaddr) - : "i" (-EFAULT), "r" (newval), "1" (oldval) + : "=a" (oldval), "+m" (*uaddr) + : "i" (-EFAULT), "r" (newval), "0" (oldval) : "memory" ); - *uval = oldval; - return ret; + return oldval; } #endif diff --git a/trunk/include/asm-generic/futex.h b/trunk/include/asm-generic/futex.h index 01f227e14254..3c2344f48136 100644 --- a/trunk/include/asm-generic/futex.h +++ b/trunk/include/asm-generic/futex.h @@ -6,7 +6,7 @@ #include static inline int -futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) +futex_atomic_op_inuser (int encoded_op, int __user *uaddr) { int op = (encoded_op >> 28) & 7; int cmp = (encoded_op >> 24) & 15; @@ -16,7 +16,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) if (encoded_op & (FUTEX_OP_OPARG_SHIFT << 28)) oparg = 1 << oparg; - if (! access_ok (VERIFY_WRITE, uaddr, sizeof(u32))) + if (! access_ok (VERIFY_WRITE, uaddr, sizeof(int))) return -EFAULT; pagefault_disable(); @@ -48,8 +48,7 @@ futex_atomic_op_inuser (int encoded_op, u32 __user *uaddr) } static inline int -futex_atomic_cmpxchg_inatomic(u32 *uval, u32 __user *uaddr, - u32 oldval, u32 newval) +futex_atomic_cmpxchg_inatomic(int __user *uaddr, int oldval, int newval) { return -ENOSYS; } diff --git a/trunk/kernel/futex.c b/trunk/kernel/futex.c index 237f14bfc022..6feeea4f8f15 100644 --- a/trunk/kernel/futex.c +++ b/trunk/kernel/futex.c @@ -381,16 +381,15 @@ static struct futex_q *futex_top_waiter(struct futex_hash_bucket *hb, return NULL; } -static int cmpxchg_futex_value_locked(u32 *curval, u32 __user *uaddr, - u32 uval, u32 newval) +static u32 cmpxchg_futex_value_locked(u32 __user *uaddr, u32 uval, u32 newval) { - int ret; + u32 curval; pagefault_disable(); - ret = futex_atomic_cmpxchg_inatomic(curval, uaddr, uval, newval); + curval = futex_atomic_cmpxchg_inatomic(uaddr, uval, newval); pagefault_enable(); - return ret; + return curval; } static int get_futex_value_locked(u32 *dest, u32 __user *from) @@ -675,7 +674,7 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, struct task_struct *task, int set_waiters) { int lock_taken, ret, ownerdied = 0; - u32 uval, newval, curval, vpid = task_pid_vnr(task); + u32 uval, newval, curval; retry: ret = lock_taken = 0; @@ -685,17 +684,19 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, * (by doing a 0 -> TID atomic cmpxchg), while holding all * the locks. It will most likely not succeed. */ - newval = vpid; + newval = task_pid_vnr(task); if (set_waiters) newval |= FUTEX_WAITERS; - if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, 0, newval))) + curval = cmpxchg_futex_value_locked(uaddr, 0, newval); + + if (unlikely(curval == -EFAULT)) return -EFAULT; /* * Detect deadlocks. */ - if ((unlikely((curval & FUTEX_TID_MASK) == vpid))) + if ((unlikely((curval & FUTEX_TID_MASK) == task_pid_vnr(task)))) return -EDEADLK; /* @@ -722,12 +723,14 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, */ if (unlikely(ownerdied || !(curval & FUTEX_TID_MASK))) { /* Keep the OWNER_DIED bit */ - newval = (curval & ~FUTEX_TID_MASK) | vpid; + newval = (curval & ~FUTEX_TID_MASK) | task_pid_vnr(task); ownerdied = 0; lock_taken = 1; } - if (unlikely(cmpxchg_futex_value_locked(&curval, uaddr, uval, newval))) + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (unlikely(curval == -EFAULT)) return -EFAULT; if (unlikely(curval != uval)) goto retry; @@ -772,6 +775,24 @@ static int futex_lock_pi_atomic(u32 __user *uaddr, struct futex_hash_bucket *hb, return ret; } +/** + * __unqueue_futex() - Remove the futex_q from its futex_hash_bucket + * @q: The futex_q to unqueue + * + * The q->lock_ptr must not be NULL and must be held by the caller. + */ +static void __unqueue_futex(struct futex_q *q) +{ + struct futex_hash_bucket *hb; + + if (WARN_ON(!q->lock_ptr || !spin_is_locked(q->lock_ptr) + || plist_node_empty(&q->list))) + return; + + hb = container_of(q->lock_ptr, struct futex_hash_bucket, lock); + plist_del(&q->list, &hb->chain); +} + /* * The hash bucket lock must be held when this is called. * Afterwards, the futex_q must not be accessed. @@ -789,7 +810,7 @@ static void wake_futex(struct futex_q *q) */ get_task_struct(p); - plist_del(&q->list, &q->list.plist); + __unqueue_futex(q); /* * The waiting task can free the futex_q as soon as * q->lock_ptr = NULL is written, without taking any locks. A @@ -840,7 +861,9 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_q *this) newval = FUTEX_WAITERS | task_pid_vnr(new_owner); - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (curval == -EFAULT) ret = -EFAULT; else if (curval != uval) ret = -EINVAL; @@ -875,8 +898,10 @@ static int unlock_futex_pi(u32 __user *uaddr, u32 uval) * There is no waiter, so we unlock the futex. The owner died * bit has not to be preserved here. We are the owner: */ - if (cmpxchg_futex_value_locked(&oldval, uaddr, uval, 0)) - return -EFAULT; + oldval = cmpxchg_futex_value_locked(uaddr, uval, 0); + + if (oldval == -EFAULT) + return oldval; if (oldval != uval) return -EAGAIN; @@ -1093,8 +1118,7 @@ void requeue_pi_wake_futex(struct futex_q *q, union futex_key *key, get_futex_key_refs(key); q->key = *key; - WARN_ON(plist_node_empty(&q->list)); - plist_del(&q->list, &q->list.plist); + __unqueue_futex(q); WARN_ON(!q->rt_waiter); q->rt_waiter = NULL; @@ -1497,8 +1521,7 @@ static int unqueue_me(struct futex_q *q) spin_unlock(lock_ptr); goto retry; } - WARN_ON(plist_node_empty(&q->list)); - plist_del(&q->list, &q->list.plist); + __unqueue_futex(q); BUG_ON(q->pi_state); @@ -1518,8 +1541,7 @@ static int unqueue_me(struct futex_q *q) static void unqueue_me_pi(struct futex_q *q) __releases(q->lock_ptr) { - WARN_ON(plist_node_empty(&q->list)); - plist_del(&q->list, &q->list.plist); + __unqueue_futex(q); BUG_ON(!q->pi_state); free_pi_state(q->pi_state); @@ -1571,7 +1593,9 @@ static int fixup_pi_state_owner(u32 __user *uaddr, struct futex_q *q, while (1) { newval = (uval & FUTEX_OWNER_DIED) | newtid; - if (cmpxchg_futex_value_locked(&curval, uaddr, uval, newval)) + curval = cmpxchg_futex_value_locked(uaddr, uval, newval); + + if (curval == -EFAULT) goto handle_fault; if (curval == uval) break; @@ -1772,14 +1796,13 @@ static int futex_wait_setup(u32 __user *uaddr, u32 val, unsigned int flags, * * The basic logical guarantee of a futex is that it blocks ONLY * if cond(var) is known to be true at the time of blocking, for - * any cond. If we locked the hash-bucket after testing *uaddr, that - * would open a race condition where we could block indefinitely with + * any cond. If we queued after testing *uaddr, that would open + * a race condition where we could block indefinitely with * cond(var) false, which would violate the guarantee. * - * On the other hand, we insert q and release the hash-bucket only - * after testing *uaddr. This guarantees that futex_wait() will NOT - * absorb a wakeup if *uaddr does not match the desired values - * while the syscall executes. + * A consequence is that futex_wait() can return zero and absorb + * a wakeup when *uaddr != val on entry to the syscall. This is + * rare, but normal. */ retry: ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &q->key); @@ -2038,9 +2061,9 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) { struct futex_hash_bucket *hb; struct futex_q *this, *next; + u32 uval; struct plist_head *head; union futex_key key = FUTEX_KEY_INIT; - u32 uval, vpid = task_pid_vnr(current); int ret; retry: @@ -2049,7 +2072,7 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) /* * We release only a lock we actually own: */ - if ((uval & FUTEX_TID_MASK) != vpid) + if ((uval & FUTEX_TID_MASK) != task_pid_vnr(current)) return -EPERM; ret = get_futex_key(uaddr, flags & FLAGS_SHARED, &key); @@ -2064,14 +2087,17 @@ static int futex_unlock_pi(u32 __user *uaddr, unsigned int flags) * again. If it succeeds then we can return without waking * anyone else up: */ - if (!(uval & FUTEX_OWNER_DIED) && - cmpxchg_futex_value_locked(&uval, uaddr, vpid, 0)) + if (!(uval & FUTEX_OWNER_DIED)) + uval = cmpxchg_futex_value_locked(uaddr, task_pid_vnr(current), 0); + + + if (unlikely(uval == -EFAULT)) goto pi_faulted; /* * Rare case: we managed to release the lock atomically, * no need to wake anyone else up: */ - if (unlikely(uval == vpid)) + if (unlikely(uval == task_pid_vnr(current))) goto out_unlock; /* @@ -2156,7 +2182,7 @@ int handle_early_requeue_pi_wakeup(struct futex_hash_bucket *hb, * We were woken prior to requeue by a timeout or a signal. * Unqueue the futex_q and determine which it was. */ - plist_del(&q->list, &q->list.plist); + plist_del(&q->list, &hb->chain); /* Handle spurious wakeups gracefully */ ret = -EWOULDBLOCK; @@ -2452,7 +2478,9 @@ int handle_futex_death(u32 __user *uaddr, struct task_struct *curr, int pi) * userspace. */ mval = (uval & FUTEX_WAITERS) | FUTEX_OWNER_DIED; - if (futex_atomic_cmpxchg_inatomic(&nval, uaddr, uval, mval)) + nval = futex_atomic_cmpxchg_inatomic(uaddr, uval, mval); + + if (nval == -EFAULT) return -1; if (nval != uval) @@ -2665,7 +2693,8 @@ static int __init futex_init(void) * implementation, the non-functional ones will return * -ENOSYS. */ - if (cmpxchg_futex_value_locked(&curval, NULL, 0, 0) == -EFAULT) + curval = cmpxchg_futex_value_locked(NULL, 0, 0); + if (curval == -EFAULT) futex_cmpxchg_enabled = 1; for (i = 0; i < ARRAY_SIZE(futex_queues); i++) {