Skip to content

Commit

Permalink
x86/uaccess: Implement macros for CMPXCHG on user addresses
Browse files Browse the repository at this point in the history
Add support for CMPXCHG loops on userspace addresses.  Provide both an
"unsafe" version for tight loops that do their own uaccess begin/end, as
well as a "safe" version for use cases where the CMPXCHG is not buried in
a loop, e.g. KVM will resume the guest instead of looping when emulation
of a guest atomic accesses fails the CMPXCHG.

Provide 8-byte versions for 32-bit kernels so that KVM can do CMPXCHG on
guest PAE PTEs, which are accessed via userspace addresses.

Guard the asm_volatile_goto() variation with CC_HAS_ASM_GOTO_TIED_OUTPUT,
the "+m" constraint fails on some compilers that otherwise support
CC_HAS_ASM_GOTO_OUTPUT.

Cc: stable@vger.kernel.org
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Co-developed-by: Sean Christopherson <seanjc@google.com>
Signed-off-by: Sean Christopherson <seanjc@google.com>
Message-Id: <20220202004945.2540433-3-seanjc@google.com>
Signed-off-by: Paolo Bonzini <pbonzini@redhat.com>
  • Loading branch information
Peter Zijlstra authored and Paolo Bonzini committed Apr 13, 2022
1 parent 1aa0e8b commit 989b5db
Showing 1 changed file with 142 additions and 0 deletions.
142 changes: 142 additions & 0 deletions arch/x86/include/asm/uaccess.h
Original file line number Diff line number Diff line change
Expand Up @@ -382,6 +382,103 @@ do { \

#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT

#ifdef CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })

#ifdef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm_volatile_goto("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
_ASM_EXTABLE_UA(1b, %l[label]) \
: CC_OUT(z) (success), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory" \
: label); \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })
#endif // CONFIG_X86_32
#else // !CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT
#define __try_cmpxchg_user_asm(itype, ltype, _ptr, _pold, _new, label) ({ \
int __err = 0; \
bool success; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg"itype" %[new], %[ptr]\n"\
CC_SET(z) \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, \
%[errout]) \
: CC_OUT(z) (success), \
[errout] "+r" (__err), \
[ptr] "+m" (*_ptr), \
[old] "+a" (__old) \
: [new] ltype (__new) \
: "memory", "cc"); \
if (unlikely(__err)) \
goto label; \
if (unlikely(!success)) \
*_old = __old; \
likely(success); })

#ifdef CONFIG_X86_32
/*
* Unlike the normal CMPXCHG, hardcode ECX for both success/fail and error.
* There are only six GPRs available and four (EAX, EBX, ECX, and EDX) are
* hardcoded by CMPXCHG8B, leaving only ESI and EDI. If the compiler uses
* both ESI and EDI for the memory operand, compilation will fail if the error
* is an input+output as there will be no register available for input.
*/
#define __try_cmpxchg64_user_asm(_ptr, _pold, _new, label) ({ \
int __result; \
__typeof__(_ptr) _old = (__typeof__(_ptr))(_pold); \
__typeof__(*(_ptr)) __old = *_old; \
__typeof__(*(_ptr)) __new = (_new); \
asm volatile("\n" \
"1: " LOCK_PREFIX "cmpxchg8b %[ptr]\n" \
"mov $0, %%ecx\n\t" \
"setz %%cl\n" \
"2:\n" \
_ASM_EXTABLE_TYPE_REG(1b, 2b, EX_TYPE_EFAULT_REG, %%ecx) \
: [result]"=c" (__result), \
"+A" (__old), \
[ptr] "+m" (*_ptr) \
: "b" ((u32)__new), \
"c" ((u32)((u64)__new >> 32)) \
: "memory", "cc"); \
if (unlikely(__result < 0)) \
goto label; \
if (unlikely(!__result)) \
*_old = __old; \
likely(__result); })
#endif // CONFIG_X86_32
#endif // CONFIG_CC_HAS_ASM_GOTO_TIED_OUTPUT

/* FIXME: this hack is definitely wrong -AK */
struct __large_struct { unsigned long buf[100]; };
#define __m(x) (*(struct __large_struct __user *)(x))
Expand Down Expand Up @@ -474,6 +571,51 @@ do { \
} while (0)
#endif // CONFIG_CC_HAS_ASM_GOTO_OUTPUT

extern void __try_cmpxchg_user_wrong_size(void);

#ifndef CONFIG_X86_32
#define __try_cmpxchg64_user_asm(_ptr, _oldp, _nval, _label) \
__try_cmpxchg_user_asm("q", "r", (_ptr), (_oldp), (_nval), _label)
#endif

/*
* Force the pointer to u<size> to match the size expected by the asm helper.
* clang/LLVM compiles all cases and only discards the unused paths after
* processing errors, which breaks i386 if the pointer is an 8-byte value.
*/
#define unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
bool __ret; \
__chk_user_ptr(_ptr); \
switch (sizeof(*(_ptr))) { \
case 1: __ret = __try_cmpxchg_user_asm("b", "q", \
(__force u8 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 2: __ret = __try_cmpxchg_user_asm("w", "r", \
(__force u16 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 4: __ret = __try_cmpxchg_user_asm("l", "r", \
(__force u32 *)(_ptr), (_oldp), \
(_nval), _label); \
break; \
case 8: __ret = __try_cmpxchg64_user_asm((__force u64 *)(_ptr), (_oldp),\
(_nval), _label); \
break; \
default: __try_cmpxchg_user_wrong_size(); \
} \
__ret; })

/* "Returns" 0 on success, 1 on failure, -EFAULT if the access faults. */
#define __try_cmpxchg_user(_ptr, _oldp, _nval, _label) ({ \
int __ret = -EFAULT; \
__uaccess_begin_nospec(); \
__ret = !unsafe_try_cmpxchg_user(_ptr, _oldp, _nval, _label); \
_label: \
__uaccess_end(); \
__ret; \
})

/*
* We want the unsafe accessors to always be inlined and use
* the error labels - thus the macro games.
Expand Down

0 comments on commit 989b5db

Please sign in to comment.