Skip to content

Commit

Permalink
locking/atomic: alpha: move to ARCH_ATOMIC
Browse files Browse the repository at this point in the history
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates alpha to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

Note: xchg_local() is NOT currently part of the generic atomic
arch_atomic API, and is not instrumented.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Ivan Kokshaysky <ink@jurassic.park.msu.ru>
Cc: Matt Turner <mattst88@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Richard Henderson <rth@twiddle.net>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-14-mark.rutland@arm.com
  • Loading branch information
Mark Rutland authored and Peter Zijlstra committed May 26, 2021
1 parent 82b993e commit 96d330a
Show file tree
Hide file tree
Showing 3 changed files with 54 additions and 47 deletions.
1 change: 1 addition & 0 deletions arch/alpha/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -2,6 +2,7 @@
config ALPHA
bool
default y
select ARCH_ATOMIC
select ARCH_32BIT_USTAT_F_TINODE
select ARCH_MIGHT_HAVE_PC_PARPORT
select ARCH_MIGHT_HAVE_PC_SERIO
Expand Down
88 changes: 47 additions & 41 deletions arch/alpha/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -26,11 +26,11 @@

#define ATOMIC64_INIT(i) { (i) }

#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic64_read(v) READ_ONCE((v)->counter)
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic64_read(v) READ_ONCE((v)->counter)

#define atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic_set(v,i) WRITE_ONCE((v)->counter, (i))
#define arch_atomic64_set(v,i) WRITE_ONCE((v)->counter, (i))

/*
* To get proper branch prediction for the main line, we must branch
Expand All @@ -39,7 +39,7 @@
*/

#define ATOMIC_OP(op, asm_op) \
static __inline__ void atomic_##op(int i, atomic_t * v) \
static __inline__ void arch_atomic_##op(int i, atomic_t * v) \
{ \
unsigned long temp; \
__asm__ __volatile__( \
Expand All @@ -55,7 +55,7 @@ static __inline__ void atomic_##op(int i, atomic_t * v) \
} \

#define ATOMIC_OP_RETURN(op, asm_op) \
static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
Expand All @@ -74,7 +74,7 @@ static inline int atomic_##op##_return_relaxed(int i, atomic_t *v) \
}

#define ATOMIC_FETCH_OP(op, asm_op) \
static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
{ \
long temp, result; \
__asm__ __volatile__( \
Expand All @@ -92,7 +92,7 @@ static inline int atomic_fetch_##op##_relaxed(int i, atomic_t *v) \
}

#define ATOMIC64_OP(op, asm_op) \
static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
static __inline__ void arch_atomic64_##op(s64 i, atomic64_t * v) \
{ \
s64 temp; \
__asm__ __volatile__( \
Expand All @@ -108,7 +108,8 @@ static __inline__ void atomic64_##op(s64 i, atomic64_t * v) \
} \

#define ATOMIC64_OP_RETURN(op, asm_op) \
static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
static __inline__ s64 \
arch_atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
Expand All @@ -127,7 +128,8 @@ static __inline__ s64 atomic64_##op##_return_relaxed(s64 i, atomic64_t * v) \
}

#define ATOMIC64_FETCH_OP(op, asm_op) \
static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
static __inline__ s64 \
arch_atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
{ \
s64 temp, result; \
__asm__ __volatile__( \
Expand Down Expand Up @@ -155,18 +157,18 @@ static __inline__ s64 atomic64_fetch_##op##_relaxed(s64 i, atomic64_t * v) \
ATOMIC_OPS(add)
ATOMIC_OPS(sub)

#define atomic_add_return_relaxed atomic_add_return_relaxed
#define atomic_sub_return_relaxed atomic_sub_return_relaxed
#define atomic_fetch_add_relaxed atomic_fetch_add_relaxed
#define atomic_fetch_sub_relaxed atomic_fetch_sub_relaxed
#define arch_atomic_add_return_relaxed arch_atomic_add_return_relaxed
#define arch_atomic_sub_return_relaxed arch_atomic_sub_return_relaxed
#define arch_atomic_fetch_add_relaxed arch_atomic_fetch_add_relaxed
#define arch_atomic_fetch_sub_relaxed arch_atomic_fetch_sub_relaxed

#define atomic64_add_return_relaxed atomic64_add_return_relaxed
#define atomic64_sub_return_relaxed atomic64_sub_return_relaxed
#define atomic64_fetch_add_relaxed atomic64_fetch_add_relaxed
#define atomic64_fetch_sub_relaxed atomic64_fetch_sub_relaxed
#define arch_atomic64_add_return_relaxed arch_atomic64_add_return_relaxed
#define arch_atomic64_sub_return_relaxed arch_atomic64_sub_return_relaxed
#define arch_atomic64_fetch_add_relaxed arch_atomic64_fetch_add_relaxed
#define arch_atomic64_fetch_sub_relaxed arch_atomic64_fetch_sub_relaxed

#define atomic_andnot atomic_andnot
#define atomic64_andnot atomic64_andnot
#define arch_atomic_andnot arch_atomic_andnot
#define arch_atomic64_andnot arch_atomic64_andnot

#undef ATOMIC_OPS
#define ATOMIC_OPS(op, asm) \
Expand All @@ -180,15 +182,15 @@ ATOMIC_OPS(andnot, bic)
ATOMIC_OPS(or, bis)
ATOMIC_OPS(xor, xor)

#define atomic_fetch_and_relaxed atomic_fetch_and_relaxed
#define atomic_fetch_andnot_relaxed atomic_fetch_andnot_relaxed
#define atomic_fetch_or_relaxed atomic_fetch_or_relaxed
#define atomic_fetch_xor_relaxed atomic_fetch_xor_relaxed
#define arch_atomic_fetch_and_relaxed arch_atomic_fetch_and_relaxed
#define arch_atomic_fetch_andnot_relaxed arch_atomic_fetch_andnot_relaxed
#define arch_atomic_fetch_or_relaxed arch_atomic_fetch_or_relaxed
#define arch_atomic_fetch_xor_relaxed arch_atomic_fetch_xor_relaxed

#define atomic64_fetch_and_relaxed atomic64_fetch_and_relaxed
#define atomic64_fetch_andnot_relaxed atomic64_fetch_andnot_relaxed
#define atomic64_fetch_or_relaxed atomic64_fetch_or_relaxed
#define atomic64_fetch_xor_relaxed atomic64_fetch_xor_relaxed
#define arch_atomic64_fetch_and_relaxed arch_atomic64_fetch_and_relaxed
#define arch_atomic64_fetch_andnot_relaxed arch_atomic64_fetch_andnot_relaxed
#define arch_atomic64_fetch_or_relaxed arch_atomic64_fetch_or_relaxed
#define arch_atomic64_fetch_xor_relaxed arch_atomic64_fetch_xor_relaxed

#undef ATOMIC_OPS
#undef ATOMIC64_FETCH_OP
Expand All @@ -198,22 +200,26 @@ ATOMIC_OPS(xor, xor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#define atomic64_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic64_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic64_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic64_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

#define atomic_cmpxchg(v, old, new) (cmpxchg(&((v)->counter), old, new))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, old, new) \
(arch_cmpxchg(&((v)->counter), old, new))
#define arch_atomic_xchg(v, new) \
(arch_xchg(&((v)->counter), new))

/**
* atomic_fetch_add_unless - add unless the number is a given value
* arch_atomic_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
static __inline__ int arch_atomic_fetch_add_unless(atomic_t *v, int a, int u)
{
int c, new, old;
smp_mb();
Expand All @@ -234,18 +240,18 @@ static __inline__ int atomic_fetch_add_unless(atomic_t *v, int a, int u)
smp_mb();
return old;
}
#define atomic_fetch_add_unless atomic_fetch_add_unless
#define arch_atomic_fetch_add_unless arch_atomic_fetch_add_unless

/**
* atomic64_fetch_add_unless - add unless the number is a given value
* arch_atomic64_fetch_add_unless - add unless the number is a given value
* @v: pointer of type atomic64_t
* @a: the amount to add to v...
* @u: ...unless v is equal to u.
*
* Atomically adds @a to @v, so long as it was not @u.
* Returns the old value of @v.
*/
static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
static __inline__ s64 arch_atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
{
s64 c, new, old;
smp_mb();
Expand All @@ -266,16 +272,16 @@ static __inline__ s64 atomic64_fetch_add_unless(atomic64_t *v, s64 a, s64 u)
smp_mb();
return old;
}
#define atomic64_fetch_add_unless atomic64_fetch_add_unless
#define arch_atomic64_fetch_add_unless arch_atomic64_fetch_add_unless

/*
* atomic64_dec_if_positive - decrement by 1 if old value positive
* arch_atomic64_dec_if_positive - decrement by 1 if old value positive
* @v: pointer of type atomic_t
*
* The function returns the old value of *v minus 1, even if
* the atomic variable, v, was not decremented.
*/
static inline s64 atomic64_dec_if_positive(atomic64_t *v)
static inline s64 arch_atomic64_dec_if_positive(atomic64_t *v)
{
s64 old, tmp;
smp_mb();
Expand All @@ -295,6 +301,6 @@ static inline s64 atomic64_dec_if_positive(atomic64_t *v)
smp_mb();
return old - 1;
}
#define atomic64_dec_if_positive atomic64_dec_if_positive
#define arch_atomic64_dec_if_positive arch_atomic64_dec_if_positive

#endif /* _ALPHA_ATOMIC_H */
12 changes: 6 additions & 6 deletions arch/alpha/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
sizeof(*(ptr))); \
})

#define cmpxchg_local(ptr, o, n) \
#define arch_cmpxchg_local(ptr, o, n) \
({ \
__typeof__(*(ptr)) _o_ = (o); \
__typeof__(*(ptr)) _n_ = (n); \
Expand All @@ -26,7 +26,7 @@
sizeof(*(ptr))); \
})

#define cmpxchg64_local(ptr, o, n) \
#define arch_cmpxchg64_local(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg_local((ptr), (o), (n)); \
Expand All @@ -42,7 +42,7 @@
* The leading and the trailing memory barriers guarantee that these
* operations are fully ordered.
*/
#define xchg(ptr, x) \
#define arch_xchg(ptr, x) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _x_ = (x); \
Expand All @@ -53,7 +53,7 @@
__ret; \
})

#define cmpxchg(ptr, o, n) \
#define arch_cmpxchg(ptr, o, n) \
({ \
__typeof__(*(ptr)) __ret; \
__typeof__(*(ptr)) _o_ = (o); \
Expand All @@ -65,10 +65,10 @@
__ret; \
})

#define cmpxchg64(ptr, o, n) \
#define arch_cmpxchg64(ptr, o, n) \
({ \
BUILD_BUG_ON(sizeof(*(ptr)) != 8); \
cmpxchg((ptr), (o), (n)); \
arch_cmpxchg((ptr), (o), (n)); \
})

#undef ____cmpxchg
Expand Down

0 comments on commit 96d330a

Please sign in to comment.