Skip to content

Commit

Permalink
locking/atomic: m68k: move to ARCH_ATOMIC
Browse files Browse the repository at this point in the history
We'd like all architectures to convert to ARCH_ATOMIC, as once all
architectures are converted it will be possible to make significant
cleanups to the atomics headers, and this will make it much easier to
generically enable atomic functionality (e.g. debug logic in the
instrumented wrappers).

As a step towards that, this patch migrates m68k to ARCH_ATOMIC. The
arch code provides arch_{atomic,atomic64,xchg,cmpxchg}*(), and common
code wraps these with optional instrumentation to provide the regular
functions.

While atomic_dec_and_test_lt() is not part of the common atomic API, it
is also given an `arch_` prefix for consistency.

Signed-off-by: Mark Rutland <mark.rutland@arm.com>
Reviewed-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Geert Uytterhoeven <geert@linux-m68k.org>
Acked-by: Greg Ungerer <gerg@linux-m68k.org>
Cc: Boqun Feng <boqun.feng@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Will Deacon <will@kernel.org>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Link: https://lore.kernel.org/r/20210525140232.53872-21-mark.rutland@arm.com
  • Loading branch information
Mark Rutland authored and Peter Zijlstra committed May 26, 2021
1 parent f84f1b9 commit e86e793
Show file tree
Hide file tree
Showing 4 changed files with 37 additions and 36 deletions.
1 change: 1 addition & 0 deletions arch/m68k/Kconfig
Original file line number Diff line number Diff line change
Expand Up @@ -3,6 +3,7 @@ config M68K
bool
default y
select ARCH_32BIT_OFF_T
select ARCH_ATOMIC
select ARCH_HAS_BINFMT_FLAT
select ARCH_HAS_DMA_PREP_COHERENT if HAS_DMA && MMU && !COLDFIRE
select ARCH_HAS_SYNC_DMA_FOR_DEVICE if HAS_DMA
Expand Down
60 changes: 30 additions & 30 deletions arch/m68k/include/asm/atomic.h
Original file line number Diff line number Diff line change
Expand Up @@ -16,8 +16,8 @@
* We do not have SMP m68k systems, so we don't have to deal with that.
*/

#define atomic_read(v) READ_ONCE((v)->counter)
#define atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))
#define arch_atomic_read(v) READ_ONCE((v)->counter)
#define arch_atomic_set(v, i) WRITE_ONCE(((v)->counter), (i))

/*
* The ColdFire parts cannot do some immediate to memory operations,
Expand All @@ -30,15 +30,15 @@
#endif

#define ATOMIC_OP(op, c_op, asm_op) \
static inline void atomic_##op(int i, atomic_t *v) \
static inline void arch_atomic_##op(int i, atomic_t *v) \
{ \
__asm__ __volatile__(#asm_op "l %1,%0" : "+m" (*v) : ASM_DI (i));\
} \

#ifdef CONFIG_RMW_INSNS

#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
static inline int arch_atomic_##op##_return(int i, atomic_t *v) \
{ \
int t, tmp; \
\
Expand All @@ -48,12 +48,12 @@ static inline int atomic_##op##_return(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \
: "g" (i), "2" (arch_atomic_read(v))); \
return t; \
}

#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t *v) \
{ \
int t, tmp; \
\
Expand All @@ -63,14 +63,14 @@ static inline int atomic_fetch_##op(int i, atomic_t *v) \
" casl %2,%1,%0\n" \
" jne 1b" \
: "+m" (*v), "=&d" (t), "=&d" (tmp) \
: "g" (i), "2" (atomic_read(v))); \
: "g" (i), "2" (arch_atomic_read(v))); \
return tmp; \
}

#else

#define ATOMIC_OP_RETURN(op, c_op, asm_op) \
static inline int atomic_##op##_return(int i, atomic_t * v) \
static inline int arch_atomic_##op##_return(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
Expand All @@ -83,7 +83,7 @@ static inline int atomic_##op##_return(int i, atomic_t * v) \
}

#define ATOMIC_FETCH_OP(op, c_op, asm_op) \
static inline int atomic_fetch_##op(int i, atomic_t * v) \
static inline int arch_atomic_fetch_##op(int i, atomic_t * v) \
{ \
unsigned long flags; \
int t; \
Expand Down Expand Up @@ -120,27 +120,27 @@ ATOMIC_OPS(xor, ^=, eor)
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

static inline void atomic_inc(atomic_t *v)
static inline void arch_atomic_inc(atomic_t *v)
{
__asm__ __volatile__("addql #1,%0" : "+m" (*v));
}
#define atomic_inc atomic_inc
#define arch_atomic_inc arch_atomic_inc

static inline void atomic_dec(atomic_t *v)
static inline void arch_atomic_dec(atomic_t *v)
{
__asm__ __volatile__("subql #1,%0" : "+m" (*v));
}
#define atomic_dec atomic_dec
#define arch_atomic_dec arch_atomic_dec

static inline int atomic_dec_and_test(atomic_t *v)
static inline int arch_atomic_dec_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("subql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#define atomic_dec_and_test atomic_dec_and_test
#define arch_atomic_dec_and_test arch_atomic_dec_and_test

static inline int atomic_dec_and_test_lt(atomic_t *v)
static inline int arch_atomic_dec_and_test_lt(atomic_t *v)
{
char c;
__asm__ __volatile__(
Expand All @@ -150,66 +150,66 @@ static inline int atomic_dec_and_test_lt(atomic_t *v)
return c != 0;
}

static inline int atomic_inc_and_test(atomic_t *v)
static inline int arch_atomic_inc_and_test(atomic_t *v)
{
char c;
__asm__ __volatile__("addql #1,%1; seq %0" : "=d" (c), "+m" (*v));
return c != 0;
}
#define atomic_inc_and_test atomic_inc_and_test
#define arch_atomic_inc_and_test arch_atomic_inc_and_test

#ifdef CONFIG_RMW_INSNS

#define atomic_cmpxchg(v, o, n) ((int)cmpxchg(&((v)->counter), (o), (n)))
#define atomic_xchg(v, new) (xchg(&((v)->counter), new))
#define arch_atomic_cmpxchg(v, o, n) ((int)arch_cmpxchg(&((v)->counter), (o), (n)))
#define arch_atomic_xchg(v, new) (arch_xchg(&((v)->counter), new))

#else /* !CONFIG_RMW_INSNS */

static inline int atomic_cmpxchg(atomic_t *v, int old, int new)
static inline int arch_atomic_cmpxchg(atomic_t *v, int old, int new)
{
unsigned long flags;
int prev;

local_irq_save(flags);
prev = atomic_read(v);
prev = arch_atomic_read(v);
if (prev == old)
atomic_set(v, new);
arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}

static inline int atomic_xchg(atomic_t *v, int new)
static inline int arch_atomic_xchg(atomic_t *v, int new)
{
unsigned long flags;
int prev;

local_irq_save(flags);
prev = atomic_read(v);
atomic_set(v, new);
prev = arch_atomic_read(v);
arch_atomic_set(v, new);
local_irq_restore(flags);
return prev;
}

#endif /* !CONFIG_RMW_INSNS */

static inline int atomic_sub_and_test(int i, atomic_t *v)
static inline int arch_atomic_sub_and_test(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("subl %2,%1; seq %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
#define atomic_sub_and_test atomic_sub_and_test
#define arch_atomic_sub_and_test arch_atomic_sub_and_test

static inline int atomic_add_negative(int i, atomic_t *v)
static inline int arch_atomic_add_negative(int i, atomic_t *v)
{
char c;
__asm__ __volatile__("addl %2,%1; smi %0"
: "=d" (c), "+m" (*v)
: ASM_DI (i));
return c != 0;
}
#define atomic_add_negative atomic_add_negative
#define arch_atomic_add_negative arch_atomic_add_negative

#endif /* __ARCH_M68K_ATOMIC __ */
10 changes: 5 additions & 5 deletions arch/m68k/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
Expand Up @@ -76,11 +76,11 @@ static inline unsigned long __xchg(unsigned long x, volatile void * ptr, int siz
}
#endif

#define xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})
#define arch_xchg(ptr,x) ({(__typeof__(*(ptr)))__xchg((unsigned long)(x),(ptr),sizeof(*(ptr)));})

#include <asm-generic/cmpxchg-local.h>

#define cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))
#define arch_cmpxchg64_local(ptr, o, n) __generic_cmpxchg64_local((ptr), (o), (n))

extern unsigned long __invalid_cmpxchg_size(volatile void *,
unsigned long, unsigned long, int);
Expand Down Expand Up @@ -118,14 +118,14 @@ static inline unsigned long __cmpxchg(volatile void *p, unsigned long old,
return old;
}

#define cmpxchg(ptr, o, n) \
#define arch_cmpxchg(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})
#define cmpxchg_local(ptr, o, n) \
#define arch_cmpxchg_local(ptr, o, n) \
({(__typeof__(*(ptr)))__cmpxchg((ptr), (unsigned long)(o), \
(unsigned long)(n), sizeof(*(ptr)));})

#define cmpxchg64(ptr, o, n) cmpxchg64_local((ptr), (o), (n))
#define arch_cmpxchg64(ptr, o, n) arch_cmpxchg64_local((ptr), (o), (n))

#else

Expand Down
2 changes: 1 addition & 1 deletion arch/m68k/include/asm/mmu_context.h
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@ static inline void get_mmu_context(struct mm_struct *mm)

if (mm->context != NO_CONTEXT)
return;
while (atomic_dec_and_test_lt(&nr_free_contexts)) {
while (arch_atomic_dec_and_test_lt(&nr_free_contexts)) {
atomic_inc(&nr_free_contexts);
steal_context();
}
Expand Down

0 comments on commit e86e793

Please sign in to comment.