Skip to content

Commit

Permalink
locking/atomic: parisc: move to ARCH_ATOMIC
Browse files Browse the repository at this point in the history
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates parisc to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: "James E.J. Bottomley" <James.Bottomley@HansenPartnership.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Helge Deller <deller@gmx.de>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-27-mark.rutland@arm.com
  • Loading branch information
Mark Rutland authored and Peter Zijlstra committed May 26, 2021
1 parent 3f1e931 commit 329c161
Show file tree
Hide file tree
Showing 3 changed files with 24 additions and 23 deletions.
1 change: 1 addition & 0 deletions arch/parisc/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
config PARISC
def_bool y
select ARCH_32BIT_OFF_T if !64BIT
select ARCH_ATOMIC
select ARCH_MIGHT_HAVE_PC_PARPORT
select HAVE_IDE
select HAVE_FUNCTION_TRACER
Expand Down
34 changes: 17 additions & 17 deletions arch/parisc/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,7 +56,7 @@ extern arch_spinlock_t __atomic_hash[ATOMIC_HASH_SIZE] __lock_aligned;
* are atomic, so a reader never sees inconsistent values.
*/

static __inline__ void atomic_set(atomic_t *v, int i)
static __inline__ void arch_atomic_set(atomic_t *v, int i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
Expand All @@ -66,19 +66,19 @@ static __inline__ void atomic_set(atomic_t *v, int i)
_atomic_spin_unlock_irqrestore(v, flags);
}

#define atomic_set_release(v, i) atomic_set((v), (i))
#define arch_atomic_set_release(v, i) arch_atomic_set((v), (i))

static __inline__ int atomic_read(const atomic_t *v)
static __inline__ int arch_atomic_read(const atomic_t *v)
{
return READ_ONCE((v)->counter);
}

/* exported interface */
#define atomic_cmpxchg(v, o, n) (cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, o, n) (arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))

#define ATOMIC_OP(op, c_op) \
static __inline__ void atomic_##op(int i, atomic_t *v) \
static __inline__ void arch_atomic_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
\
Expand All @@ -88,7 +88,7 @@ static __inline__ void atomic_##op(int i, atomic_t *v) \
}

#define ATOMIC_OP_RETURN(op, c_op) \
static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
static __inline__ int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
Expand All @@ -101,7 +101,7 @@ static __inline__ int atomic_##op##_return(int i, atomic_t *v) \
}

#define ATOMIC_FETCH_OP(op, c_op) \
static __inline__ int atomic_fetch_##op(int i, atomic_t *v) \
static __inline__ int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long flags; \
int ret; \
Expand Down Expand Up @@ -141,7 +141,7 @@ ATOMIC_OPS(xor, ^=)
#define ATOMIC64_INIT(i) { (i) }

#define ATOMIC64_OP(op, c_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
static __inline__ void arch_atomic64_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
\
Expand All @@ -151,7 +151,7 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t *v) \
}

#define ATOMIC64_OP_RETURN(op, c_op) \
static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
static __inline__ s64 arch_atomic64_##op##_return(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
Expand All @@ -164,7 +164,7 @@ static __inline__ s64 atomic64_##op##_return(s64 i, atomic64_t *v) \
}

#define ATOMIC64_FETCH_OP(op, c_op) \
static __inline__ s64 atomic64_fetch_##op(s64 i, atomic64_t *v) \
static __inline__ s64 arch_atomic64_fetch_##op(s64 i, atomic64_t *v) \
{ \
unsigned long flags; \
s64 ret; \
Expand Down Expand Up @@ -200,7 +200,7 @@ ATOMIC64_OPS(xor, ^=)
#undef ATOMIC64_OP

static __inline__ void
atomic64_set(atomic64_t *v, s64 i)
arch_atomic64_set(atomic64_t *v, s64 i)
{
unsigned long flags;
_atomic_spin_lock_irqsave(v, flags);
Expand All @@ -210,18 +210,18 @@ atomic64_set(atomic64_t *v, s64 i)
_atomic_spin_unlock_irqrestore(v, flags);
}

#define atomic64_set_release(v, i) atomic64_set((v), (i))
#define arch_atomic64_set_release(v, i) arch_atomic64_set((v), (i))

static __inline__ s64
atomic64_read(const atomic64_t *v)
arch_atomic64_read(const atomic64_t *v)
{
return READ_ONCE((v)->counter);
}

/* exported interface */
#define atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))cmpxchg(&((v)->counter), (o), (n)))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic64_cmpxchg(v, o, n) \
((__typeof__((v)->counter))arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic64_xchg(v, new) (arch_xchg(&((v)->counter), new))

#endif /* !CONFIG_64BIT */

Expand Down
12 changes: 6 additions & 6 deletions arch/parisc/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -44,7 +44,7 @@ __xchg(unsigned long x, volatile void *ptr, int size)
** if (((unsigned long)p & 0xf) == 0)
** return __ldcw(p);
*/
#define xchg(ptr, x) \
#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
Expand Down Expand Up @@ -78,7 +78,7 @@ __cmpxchg(volatile void *ptr, unsigned long old, unsigned long new_, int size)
return old;
}

#define cmpxchg(ptr, o, n) \
#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
Expand Down Expand Up @@ -106,19 +106,19 @@ static inline unsigned long __cmpxchg_local(volatile void *ptr,
* cmpxchg_local and cmpxchg64_local are atomic wrt current CPU. Always make
* them available.
*/
#define cmpxchg_local(ptr, o, n) \
#define arch_cmpxchg_local(ptr, o, n) \
((__typeof__(*(ptr)))__cmpxchg_local((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr))))
#ifdef CONFIG_64BIT
#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
})
#else
#define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#endif

#define cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)
#define arch_cmpxchg64(ptr, o, n) __cmpxchg_u64(ptr, o, n)

#endif /* _ASM_PARISC_CMPXCHG_H_ */

0 comments on commit 329c161

Please sign in to comment.