Skip to content

Commit

Permalink
Merge branch 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linu…
Browse files Browse the repository at this point in the history
…x/kernel/git/tip/linux-2.6-tip

* 'x86-asm-for-linus' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/linux-2.6-tip:
  include/linux/compiler-gcc4.h: Fix build bug - gcc-4.0.2 doesn't understand __builtin_object_size
  x86/alternatives: No need for alternatives-asm.h to re-invent stuff already in asm.h
  x86/alternatives: Check replacementlen <= instrlen at build time
  x86, 64-bit: Set data segments to null after switching to 64-bit mode
  x86: Clean up the loadsegment() macro
  x86: Optimize loadsegment()
  x86: Add missing might_fault() checks to copy_{to,from}_user()
  x86-64: __copy_from_user_inatomic() adjustments
  x86: Remove unused thread_return label from switch_to()
  x86, 64-bit: Fix bstep_iret jump
  x86: Don't use the strict copy checks when branch profiling is in use
  x86, 64-bit: Move K8 B step iret fixup to fault entry asm
  x86: Generate cmpxchg build failures
  x86: Add a Kconfig option to turn the copy_from_user warnings into errors
  x86: Turn the copy_from_user check into an (optional) compile time warning
  x86: Use __builtin_memset and __builtin_memcpy for memset/memcpy
  x86: Use __builtin_object_size() to validate the buffer size for copy_from_user()
  • Loading branch information
Linus Torvalds committed Dec 5, 2009
2 parents a77d2e0 + 7cff7ce commit ef26b16
Show file tree
Hide file tree
Showing 18 changed files with 321 additions and 354 deletions.
14 changes: 14 additions & 0 deletions arch/x86/Kconfig.debug
Original file line number Diff line number Diff line change
Expand Up @@ -296,4 +296,18 @@ config OPTIMIZE_INLINING

If unsure, say N.

config DEBUG_STRICT_USER_COPY_CHECKS
bool "Strict copy size checks"
depends on DEBUG_KERNEL && !TRACE_BRANCH_PROFILING
---help---
Enabling this option turns a certain set of sanity checks for user
copy operations into compile time failures.

The copy_from_user() etc checks are there to help test if there
are sufficient security checks on the length argument of
the copy operation, by having gcc prove that the argument is
within bounds.

If unsure, or if you run an older (pre 4.4) gcc, say N.

endmenu
10 changes: 3 additions & 7 deletions arch/x86/include/asm/alternative-asm.h
Original file line number Diff line number Diff line change
@@ -1,17 +1,13 @@
#ifdef __ASSEMBLY__

#ifdef CONFIG_X86_32
# define X86_ALIGN .long
#else
# define X86_ALIGN .quad
#endif
#include <asm/asm.h>

#ifdef CONFIG_SMP
.macro LOCK_PREFIX
1: lock
.section .smp_locks,"a"
.align 4
X86_ALIGN 1b
_ASM_ALIGN
_ASM_PTR 1b
.previous
.endm
#else
Expand Down
1 change: 1 addition & 0 deletions arch/x86/include/asm/alternative.h
Original file line number Diff line number Diff line change
Expand Up @@ -84,6 +84,7 @@ static inline void alternatives_smp_switch(int smp) {}
" .byte " __stringify(feature) "\n" /* feature bit */ \
" .byte 662b-661b\n" /* sourcelen */ \
" .byte 664f-663f\n" /* replacementlen */ \
" .byte 0xff + (664f-663f) - (662b-661b)\n" /* rlen <= slen */ \
".previous\n" \
".section .altinstr_replacement, \"ax\"\n" \
"663:\n\t" newinstr "\n664:\n" /* replacement */ \
Expand Down
218 changes: 86 additions & 132 deletions arch/x86/include/asm/cmpxchg_32.h
Original file line number Diff line number Diff line change
Expand Up @@ -8,14 +8,50 @@
* you need to test for the feature in boot_cpu_data.
*/

#define xchg(ptr, v) \
((__typeof__(*(ptr)))__xchg((unsigned long)(v), (ptr), sizeof(*(ptr))))
extern void __xchg_wrong_size(void);

/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/

struct __xchg_dummy {
unsigned long a[100];
};
#define __xg(x) ((struct __xchg_dummy *)(x))

#define __xchg(x, ptr, size) \
({ \
__typeof(*(ptr)) __x = (x); \
switch (size) { \
case 1: \
asm volatile("xchgb %b0,%1" \
: "=q" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
case 2: \
asm volatile("xchgw %w0,%1" \
: "=r" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
case 4: \
asm volatile("xchgl %0,%1" \
: "=r" (__x) \
: "m" (*__xg(ptr)), "0" (__x) \
: "memory"); \
break; \
default: \
__xchg_wrong_size(); \
} \
__x; \
})

#define xchg(ptr, v) \
__xchg((v), (ptr), sizeof(*ptr))

/*
* The semantics of XCHGCMP8B are a bit strange, this is why
* there is a loop and the loading of %%eax and %%edx has to
Expand Down Expand Up @@ -71,57 +107,63 @@ static inline void __set_64bit_var(unsigned long long *ptr,
(unsigned int)((value) >> 32)) \
: __set_64bit(ptr, ll_low((value)), ll_high((value))))

/*
* Note: no "lock" prefix even on SMP: xchg always implies lock anyway
* Note 2: xchg has side effect, so that attribute volatile is necessary,
* but generally the primitive is invalid, *ptr is output argument. --ANK
*/
static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
int size)
{
switch (size) {
case 1:
asm volatile("xchgb %b0,%1"
: "=q" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 2:
asm volatile("xchgw %w0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
case 4:
asm volatile("xchgl %0,%1"
: "=r" (x)
: "m" (*__xg(ptr)), "0" (x)
: "memory");
break;
}
return x;
}
extern void __cmpxchg_wrong_size(void);

/*
* Atomic compare and exchange. Compare OLD with MEM, if identical,
* store NEW in MEM. Return the initial value in MEM. Success is
* indicated by comparing RETURN with OLD.
*/
#define __raw_cmpxchg(ptr, old, new, size, lock) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) __old = (old); \
__typeof__(*(ptr)) __new = (new); \
switch (size) { \
case 1: \
asm volatile(lock "cmpxchgb %b1,%2" \
: "=a"(__ret) \
: "q"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
case 2: \
asm volatile(lock "cmpxchgw %w1,%2" \
: "=a"(__ret) \
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
case 4: \
asm volatile(lock "cmpxchgl %1,%2" \
: "=a"(__ret) \
: "r"(__new), "m"(*__xg(ptr)), "0"(__old) \
: "memory"); \
break; \
default: \
__cmpxchg_wrong_size(); \
} \
__ret; \
})

#define __cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), LOCK_PREFIX)

#define __sync_cmpxchg(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "lock; ")

#define __cmpxchg_local(ptr, old, new, size) \
__raw_cmpxchg((ptr), (old), (new), (size), "")

#ifdef CONFIG_X86_CMPXCHG
#define __HAVE_ARCH_CMPXCHG 1
#define cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define sync_cmpxchg(ptr, o, n) \
((__typeof__(*(ptr)))__sync_cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))
#define cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), \
sizeof(*(ptr))))

#define cmpxchg(ptr, old, new) \
__cmpxchg((ptr), (old), (new), sizeof(*ptr))

#define sync_cmpxchg(ptr, old, new) \
__sync_cmpxchg((ptr), (old), (new), sizeof(*ptr))

#define cmpxchg_local(ptr, old, new) \
__cmpxchg_local((ptr), (old), (new), sizeof(*ptr))
#endif

#ifdef CONFIG_X86_CMPXCHG64
Expand All @@ -133,94 +175,6 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr,
(unsigned long long)(n)))
#endif

static inline unsigned long __cmpxchg(volatile void *ptr, unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile(LOCK_PREFIX "cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile(LOCK_PREFIX "cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile(LOCK_PREFIX "cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}

/*
* Always use locked operations when touching memory shared with a
* hypervisor, since the system may be SMP even if the guest kernel
* isn't.
*/
static inline unsigned long __sync_cmpxchg(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("lock; cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("lock; cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("lock; cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}

static inline unsigned long __cmpxchg_local(volatile void *ptr,
unsigned long old,
unsigned long new, int size)
{
unsigned long prev;
switch (size) {
case 1:
asm volatile("cmpxchgb %b1,%2"
: "=a"(prev)
: "q"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 2:
asm volatile("cmpxchgw %w1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
case 4:
asm volatile("cmpxchgl %1,%2"
: "=a"(prev)
: "r"(new), "m"(*__xg(ptr)), "0"(old)
: "memory");
return prev;
}
return old;
}

static inline unsigned long long __cmpxchg64(volatile void *ptr,
unsigned long long old,
unsigned long long new)
Expand Down
Loading

0 comments on commit ef26b16

Please sign in to comment.