Skip to content

Commit

Permalink
x86: Use xadd helper more widely
Browse files Browse the repository at this point in the history
This covers the trivial cases from open-coded xadd to the xadd macros.

Signed-off-by: Jeremy Fitzhardinge <jeremy.fitzhardinge@citrix.com>
Link: http://lkml.kernel.org/r/4E5BCC40.3030501@goop.org
Signed-off-by: H. Peter Anvin <hpa@linux.intel.com>
  • Loading branch information
Jeremy Fitzhardinge authored and H. Peter Anvin committed Aug 29, 2011
1 parent 433b352 commit 8b8bc2f
Show file tree
Hide file tree
Showing 4 changed files with 5 additions and 23 deletions.
8 changes: 2 additions & 6 deletions arch/x86/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -172,18 +172,14 @@ static inline int atomic_add_negative(int i, atomic_t *v)
*/
static inline int atomic_add_return(int i, atomic_t *v)
{
int __i;
#ifdef CONFIG_M386
int __i;
unsigned long flags;
if (unlikely(boot_cpu_data.x86 <= 3))
goto no_xadd;
#endif
/* Modern 486+ processor */
__i = i;
asm volatile(LOCK_PREFIX "xaddl %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
return i + xadd(&v->counter, i);

#ifdef CONFIG_M386
no_xadd: /* Legacy 386 processor */
Expand Down
6 changes: 1 addition & 5 deletions arch/x86/include/asm/atomic64_64.h
Original file line number Diff line number Diff line change
Expand Up @@ -170,11 +170,7 @@ static inline int atomic64_add_negative(long i, atomic64_t *v)
*/
static inline long atomic64_add_return(long i, atomic64_t *v)
{
long __i = i;
asm volatile(LOCK_PREFIX "xaddq %0, %1;"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
return i + xadd(&v->counter, i);
}

static inline long atomic64_sub_return(long i, atomic64_t *v)
Expand Down
8 changes: 1 addition & 7 deletions arch/x86/include/asm/rwsem.h
Original file line number Diff line number Diff line change
Expand Up @@ -204,13 +204,7 @@ static inline void rwsem_atomic_add(long delta, struct rw_semaphore *sem)
*/
static inline long rwsem_atomic_update(long delta, struct rw_semaphore *sem)
{
long tmp = delta;

asm volatile(LOCK_PREFIX "xadd %0,%1"
: "+r" (tmp), "+m" (sem->count)
: : "memory");

return tmp + delta;
return delta + xadd(&sem->count, delta);
}

#endif /* __KERNEL__ */
Expand Down
6 changes: 1 addition & 5 deletions arch/x86/include/asm/uv/uv_bau.h
Original file line number Diff line number Diff line change
Expand Up @@ -656,11 +656,7 @@ static inline int atomic_read_short(const struct atomic_short *v)
*/
static inline int atom_asr(short i, struct atomic_short *v)
{
short __i = i;
asm volatile(LOCK_PREFIX "xaddw %0, %1"
: "+r" (i), "+m" (v->counter)
: : "memory");
return i + __i;
return i + xadd(&v->counter, i);
}

/*
Expand Down

0 comments on commit 8b8bc2f

Please sign in to comment.