Skip to content

Commit

Permalink
[ARM] barriers: improve xchg, bitops and atomic SMP barriers
Browse files Browse the repository at this point in the history
Mathieu Desnoyers pointed out that the ARM barriers were lacking:

- cmpxchg, xchg and atomic add return need memory barriers on
  architectures which can reorder the relative order in which memory
  read/writes can be seen between CPUs, which seems to include recent
  ARM architectures. Those barriers are currently missing on ARM.

- test_and_xxx_bit were missing SMP barriers.

So put these barriers in.  Provide separate atomic_add/atomic_sub
operations which do not require barriers.

Reported-Reviewed-and-Acked-by: Mathieu Desnoyers <mathieu.desnoyers@polymtl.ca>
Signed-off-by: Russell King <rmk+kernel@arm.linux.org.uk>
  • Loading branch information
Russell King authored and Russell King committed May 28, 2009
1 parent 2908157 commit bac4e96
Show file tree
Hide file tree
Showing 5 changed files with 71 additions and 13 deletions.
13 changes: 13 additions & 0 deletions arch/arm/include/asm/assembler.h
Original file line number Diff line number Diff line change
Expand Up @@ -114,3 +114,16 @@
.align 3; \
.long 9999b,9001f; \
.previous
/*
* SMP data memory barrier
*/
.macro smp_dmb
#ifdef CONFIG_SMP
#if __LINUX_ARM_ARCH__ >= 7
dmb
#elif __LINUX_ARM_ARCH__ == 6
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
#endif
.endm
61 changes: 52 additions & 9 deletions arch/arm/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,11 +44,29 @@ static inline void atomic_set(atomic_t *v, int i)
: "cc");
}

static inline void atomic_add(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_add\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_add_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_add_return\n"
"1: ldrex %0, [%2]\n"
" add %0, %0, %3\n"
Expand All @@ -59,14 +77,34 @@ static inline int atomic_add_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline void atomic_sub(int i, atomic_t *v)
{
unsigned long tmp;
int result;

__asm__ __volatile__("@ atomic_sub\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
" strex %1, %0, [%2]\n"
" teq %1, #0\n"
" bne 1b"
: "=&r" (result), "=&r" (tmp)
: "r" (&v->counter), "Ir" (i)
: "cc");
}

static inline int atomic_sub_return(int i, atomic_t *v)
{
unsigned long tmp;
int result;

smp_mb();

__asm__ __volatile__("@ atomic_sub_return\n"
"1: ldrex %0, [%2]\n"
" sub %0, %0, %3\n"
Expand All @@ -77,13 +115,17 @@ static inline int atomic_sub_return(int i, atomic_t *v)
: "r" (&v->counter), "Ir" (i)
: "cc");

smp_mb();

return result;
}

static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
{
unsigned long oldval, res;

smp_mb();

do {
__asm__ __volatile__("@ atomic_cmpxchg\n"
"ldrex %1, [%2]\n"
Expand All @@ -95,6 +137,8 @@ static inline int atomic_cmpxchg(atomic_t *ptr, int old, int new)
: "cc");
} while (res);

smp_mb();

return oldval;
}

Expand Down Expand Up @@ -135,6 +179,7 @@ static inline int atomic_add_return(int i, atomic_t *v)

return val;
}
#define atomic_add(i, v) (void) atomic_add_return(i, v)

static inline int atomic_sub_return(int i, atomic_t *v)
{
Expand All @@ -148,6 +193,7 @@ static inline int atomic_sub_return(int i, atomic_t *v)

return val;
}
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
{
Expand Down Expand Up @@ -187,10 +233,8 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)
}
#define atomic_inc_not_zero(v) atomic_add_unless((v), 1, 0)

#define atomic_add(i, v) (void) atomic_add_return(i, v)
#define atomic_inc(v) (void) atomic_add_return(1, v)
#define atomic_sub(i, v) (void) atomic_sub_return(i, v)
#define atomic_dec(v) (void) atomic_sub_return(1, v)
#define atomic_inc(v) atomic_add(1, v)
#define atomic_dec(v) atomic_sub(1, v)

#define atomic_inc_and_test(v) (atomic_add_return(1, v) == 0)
#define atomic_dec_and_test(v) (atomic_sub_return(1, v) == 0)
Expand All @@ -200,11 +244,10 @@ static inline int atomic_add_unless(atomic_t *v, int a, int u)

#define atomic_add_negative(i,v) (atomic_add_return(i, v) < 0)

/* Atomic operations are already serializing on ARM */
#define smp_mb__before_atomic_dec() barrier()
#define smp_mb__after_atomic_dec() barrier()
#define smp_mb__before_atomic_inc() barrier()
#define smp_mb__after_atomic_inc() barrier()
#define smp_mb__before_atomic_dec() smp_mb()
#define smp_mb__after_atomic_dec() smp_mb()
#define smp_mb__before_atomic_inc() smp_mb()
#define smp_mb__after_atomic_inc() smp_mb()

#include <asm-generic/atomic.h>
#endif
Expand Down
3 changes: 3 additions & 0 deletions arch/arm/include/asm/system.h
Original file line number Diff line number Diff line change
Expand Up @@ -248,6 +248,8 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
unsigned int tmp;
#endif

smp_mb();

switch (size) {
#if __LINUX_ARM_ARCH__ >= 6
case 1:
Expand Down Expand Up @@ -307,6 +309,7 @@ static inline unsigned long __xchg(unsigned long x, volatile void *ptr, int size
__bad_xchg(ptr, size), ret = 0;
break;
}
smp_mb();

return ret;
}
Expand Down
5 changes: 1 addition & 4 deletions arch/arm/kernel/entry-armv.S
Original file line number Diff line number Diff line change
Expand Up @@ -815,10 +815,7 @@ __kuser_helper_start:
*/

__kuser_memory_barrier: @ 0xffff0fa0

#if __LINUX_ARM_ARCH__ >= 6 && defined(CONFIG_SMP)
mcr p15, 0, r0, c7, c10, 5 @ dmb
#endif
smp_dmb
usr_ret lr

.align 5
Expand Down
2 changes: 2 additions & 0 deletions arch/arm/lib/bitops.h
Original file line number Diff line number Diff line change
Expand Up @@ -18,12 +18,14 @@
mov r2, #1
add r1, r1, r0, lsr #3 @ Get byte offset
mov r3, r2, lsl r3 @ create mask
smp_dmb
1: ldrexb r2, [r1]
ands r0, r2, r3 @ save old value of bit
\instr r2, r2, r3 @ toggle bit
strexb ip, r2, [r1]
cmp ip, #0
bne 1b
smp_dmb
cmp r0, #0
movne r0, #1
2: mov pc, lr
Expand Down

0 comments on commit bac4e96

Please sign in to comment.