Skip to content

Commit

Permalink
csky: Atomic operations
Browse files Browse the repository at this point in the history
This patch adds atomic, cmpxchg, spinlock files.

Signed-off-by: Guo Ren <ren_guo@c-sky.com>
Cc: Andrea Parri <andrea.parri@amarulasolutions.com>
Cc: Arnd Bergmann <arnd@arndb.de>
Reviewed-by: Peter Zijlstra <peterz@infradead.org>
  • Loading branch information
Guo Ren committed Oct 25, 2018
1 parent e38a527 commit a0ae628
Show file tree
Hide file tree
Showing 5 changed files with 665 additions and 0 deletions.
212 changes: 212 additions & 0 deletions arch/csky/include/asm/atomic.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,212 @@
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __ASM_CSKY_ATOMIC_H
#define __ASM_CSKY_ATOMIC_H

#include <linux/version.h>
#include <asm/cmpxchg.h>
#include <asm/barrier.h>

#ifdef CONFIG_CPU_HAS_LDSTEX

#define __atomic_add_unless __atomic_add_unless
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
unsigned long tmp, ret;

smp_mb();

asm volatile (
"1: ldex.w %0, (%3) \n"
" mov %1, %0 \n"
" cmpne %0, %4 \n"
" bf 2f \n"
" add %0, %2 \n"
" stex.w %0, (%3) \n"
" bez %0, 1b \n"
"2: \n"
: "=&r" (tmp), "=&r" (ret)
: "r" (a), "r"(&v->counter), "r"(u)
: "memory");

if (ret != u)
smp_mb();

return ret;
}

#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp; \
\
asm volatile ( \
"1: ldex.w %0, (%2) \n" \
" " #op " %0, %1 \n" \
" stex.w %0, (%2) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
}

#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp, ret; \
\
smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" " #op " %0, %2 \n" \
" mov %1, %0 \n" \
" stex.w %0, (%3) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
smp_mb(); \
\
return ret; \
}

#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp, ret; \
\
smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" mov %1, %0 \n" \
" " #op " %0, %2 \n" \
" stex.w %0, (%3) \n" \
" bez %0, 1b \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
smp_mb(); \
\
return ret; \
}

#else /* CONFIG_CPU_HAS_LDSTEX */

#include <linux/irqflags.h>

#define __atomic_add_unless __atomic_add_unless
static inline int __atomic_add_unless(atomic_t *v, int a, int u)
{
unsigned long tmp, ret, flags;

raw_local_irq_save(flags);

asm volatile (
" ldw %0, (%3) \n"
" mov %1, %0 \n"
" cmpne %0, %4 \n"
" bf 2f \n"
" add %0, %2 \n"
" stw %0, (%3) \n"
"2: \n"
: "=&r" (tmp), "=&r" (ret)
: "r" (a), "r"(&v->counter), "r"(u)
: "memory");

raw_local_irq_restore(flags);

return ret;
}

#define ATOMIC_OP(op, c_op) \
static inline void atomic_##op(int i, atomic_t *v) \
{ \
unsigned long tmp, flags; \
\
raw_local_irq_save(flags); \
\
asm volatile ( \
" ldw %0, (%2) \n" \
" " #op " %0, %1 \n" \
" stw %0, (%2) \n" \
: "=&r" (tmp) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
\
raw_local_irq_restore(flags); \
}

#define ATOMIC_OP_RETURN(op, c_op) \
static inline int atomic_##op##_return(int i, atomic_t *v) \
{ \
unsigned long tmp, ret, flags; \
\
raw_local_irq_save(flags); \
\
asm volatile ( \
" ldw %0, (%3) \n" \
" " #op " %0, %2 \n" \
" stw %0, (%3) \n" \
" mov %1, %0 \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
\
raw_local_irq_restore(flags); \
\
return ret; \
}

#define ATOMIC_FETCH_OP(op, c_op) \
static inline int atomic_fetch_##op(int i, atomic_t *v) \
{ \
unsigned long tmp, ret, flags; \
\
raw_local_irq_save(flags); \
\
asm volatile ( \
" ldw %0, (%3) \n" \
" mov %1, %0 \n" \
" " #op " %0, %2 \n" \
" stw %0, (%3) \n" \
: "=&r" (tmp), "=&r" (ret) \
: "r" (i), "r"(&v->counter) \
: "memory"); \
\
raw_local_irq_restore(flags); \
\
return ret; \
}

#endif /* CONFIG_CPU_HAS_LDSTEX */

#define atomic_add_return atomic_add_return
ATOMIC_OP_RETURN(add, +)
#define atomic_sub_return atomic_sub_return
ATOMIC_OP_RETURN(sub, -)

#define atomic_fetch_add atomic_fetch_add
ATOMIC_FETCH_OP(add, +)
#define atomic_fetch_sub atomic_fetch_sub
ATOMIC_FETCH_OP(sub, -)
#define atomic_fetch_and atomic_fetch_and
ATOMIC_FETCH_OP(and, &)
#define atomic_fetch_or atomic_fetch_or
ATOMIC_FETCH_OP(or, |)
#define atomic_fetch_xor atomic_fetch_xor
ATOMIC_FETCH_OP(xor, ^)

#define atomic_and atomic_and
ATOMIC_OP(and, &)
#define atomic_or atomic_or
ATOMIC_OP(or, |)
#define atomic_xor atomic_xor
ATOMIC_OP(xor, ^)

#undef ATOMIC_FETCH_OP
#undef ATOMIC_OP_RETURN
#undef ATOMIC_OP

#include <asm-generic/atomic.h>

#endif /* __ASM_CSKY_ATOMIC_H */
73 changes: 73 additions & 0 deletions arch/csky/include/asm/cmpxchg.h
Original file line number Diff line number Diff line change
@@ -0,0 +1,73 @@
/* SPDX-License-Identifier: GPL-2.0 */

#ifndef __ASM_CSKY_CMPXCHG_H
#define __ASM_CSKY_CMPXCHG_H

#ifdef CONFIG_CPU_HAS_LDSTEX
#include <asm/barrier.h>

extern void __bad_xchg(void);

#define __xchg(new, ptr, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(*(ptr)) __ret; \
unsigned long tmp; \
switch (size) { \
case 4: \
smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" mov %1, %2 \n" \
" stex.w %1, (%3) \n" \
" bez %1, 1b \n" \
: "=&r" (__ret), "=&r" (tmp) \
: "r" (__new), "r"(__ptr) \
:); \
smp_mb(); \
break; \
default: \
__bad_xchg(); \
} \
__ret; \
})

#define xchg(ptr, x) (__xchg((x), (ptr), sizeof(*(ptr))))

#define __cmpxchg(ptr, old, new, size) \
({ \
__typeof__(ptr) __ptr = (ptr); \
__typeof__(new) __new = (new); \
__typeof__(new) __tmp; \
__typeof__(old) __old = (old); \
__typeof__(*(ptr)) __ret; \
switch (size) { \
case 4: \
smp_mb(); \
asm volatile ( \
"1: ldex.w %0, (%3) \n" \
" cmpne %0, %4 \n" \
" bt 2f \n" \
" mov %1, %2 \n" \
" stex.w %1, (%3) \n" \
" bez %1, 1b \n" \
"2: \n" \
: "=&r" (__ret), "=&r" (__tmp) \
: "r" (__new), "r"(__ptr), "r"(__old) \
:); \
smp_mb(); \
break; \
default: \
__bad_xchg(); \
} \
__ret; \
})

#define cmpxchg(ptr, o, n) \
(__cmpxchg((ptr), (o), (n), sizeof(*(ptr))))
#else
#include <asm-generic/cmpxchg.h>
#endif

#endif /* __ASM_CSKY_CMPXCHG_H */
Loading

0 comments on commit a0ae628

Please sign in to comment.