Skip to content

Commit

Permalink
ARC: atomic_cmpxchg/atomic_xchg: implement relaxed variants
Browse files Browse the repository at this point in the history
And move them out of cmpxchg.h to canonical atomic.h

Acked-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Vineet Gupta <vgupta@kernel.org>
  • Loading branch information
Vineet Gupta committed Aug 24, 2021
1 parent ddc348c commit 301014c
Show file tree
Hide file tree
Showing 2 changed files with 27 additions and 23 deletions.
27 changes: 27 additions & 0 deletions arch/arc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,33 @@
#include <asm/atomic-spinlock.h>
#endif

#define arch_atomic_cmpxchg(v, o, n) \
({ \
arch_cmpxchg(&((v)->counter), (o), (n)); \
})

#ifdef arch_cmpxchg_relaxed
#define arch_atomic_cmpxchg_relaxed(v, o, n) \
({ \
arch_cmpxchg_relaxed(&((v)->counter), (o), (n)); \
})
#endif

#define arch_atomic_xchg(v, n) \
({ \
arch_xchg(&((v)->counter), (n)); \
})

#ifdef arch_xchg_relaxed
#define arch_atomic_xchg_relaxed(v, n) \
({ \
arch_xchg_relaxed(&((v)->counter), (n)); \
})
#endif

/*
* 64-bit atomics
*/
#ifdef CONFIG_GENERIC_ATOMIC64
#include <asm-generic/atomic64.h>
#else
Expand Down
23 changes: 0 additions & 23 deletions arch/arc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -80,14 +80,6 @@

#endif

/*
* atomic_cmpxchg is same as cmpxchg
* LLSC: only different in data-type, semantics are exactly same
* !LLSC: cmpxchg() has to use an external lock atomic_ops_lock to guarantee
* semantics, and this lock also happens to be used by atomic_*()
*/
#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))

/*
* xchg
*/
Expand Down Expand Up @@ -148,19 +140,4 @@

#endif

/*
* "atomic" variant of xchg()
* REQ: It needs to follow the same serialization rules as other atomic_xxx()
* Since xchg() doesn't always do that, it would seem that following definition
* is incorrect. But here's the rationale:
* SMP : Even xchg() takes the atomic_ops_lock, so OK.
* LLSC: atomic_ops_lock are not relevant at all (even if SMP, since LLSC
* is natively "SMP safe", no serialization required).
* UP : other atomics disable IRQ, so no way a difft ctxt atomic_xchg()
* could clobber them. atomic_xchg() itself would be 1 insn, so it
* can't be clobbered by others. Thus no serialization required when
* atomic_xchg is involved.
*/
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))

#endif

0 comments on commit 301014c

Please sign in to comment.