-
Notifications
You must be signed in to change notification settings - Fork 0
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
sh: add J2 atomics using the cas.l instruction
Signed-off-by: Rich Felker <dalias@libc.org>
- Loading branch information
Rich Felker
committed
Aug 5, 2016
1 parent
834da19
commit 2b47d54
Showing
9 changed files
with
481 additions
and
216 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,93 @@ | ||
#ifndef __ASM_SH_BITOPS_CAS_H | ||
#define __ASM_SH_BITOPS_CAS_H | ||
|
||
static inline unsigned __bo_cas(volatile unsigned *p, unsigned old, unsigned new) | ||
{ | ||
__asm__ __volatile__("cas.l %1,%0,@r0" | ||
: "+r"(new) | ||
: "r"(old), "z"(p) | ||
: "t", "memory" ); | ||
return new; | ||
} | ||
|
||
static inline void set_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old|mask) != old); | ||
} | ||
|
||
static inline void clear_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old&~mask) != old); | ||
} | ||
|
||
static inline void change_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old^mask) != old); | ||
} | ||
|
||
static inline int test_and_set_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old|mask) != old); | ||
|
||
return !!(old & mask); | ||
} | ||
|
||
static inline int test_and_clear_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old&~mask) != old); | ||
|
||
return !!(old & mask); | ||
} | ||
|
||
static inline int test_and_change_bit(int nr, volatile void *addr) | ||
{ | ||
unsigned mask, old; | ||
volatile unsigned *a = addr; | ||
|
||
a += nr >> 5; | ||
mask = 1U << (nr & 0x1f); | ||
|
||
do old = *a; | ||
while (__bo_cas(a, old, old^mask) != old); | ||
|
||
return !!(old & mask); | ||
} | ||
|
||
#include <asm-generic/bitops/non-atomic.h> | ||
|
||
#endif /* __ASM_SH_BITOPS_CAS_H */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,24 @@ | ||
#ifndef __ASM_SH_CMPXCHG_CAS_H | ||
#define __ASM_SH_CMPXCHG_CAS_H | ||
|
||
static inline unsigned long | ||
__cmpxchg_u32(volatile u32 *m, unsigned long old, unsigned long new) | ||
{ | ||
__asm__ __volatile__("cas.l %1,%0,@r0" | ||
: "+r"(new) | ||
: "r"(old), "z"(m) | ||
: "t", "memory" ); | ||
return new; | ||
} | ||
|
||
static inline unsigned long xchg_u32(volatile u32 *m, unsigned long val) | ||
{ | ||
unsigned long old; | ||
do old = *m; | ||
while (__cmpxchg_u32(m, old, val) != old); | ||
return old; | ||
} | ||
|
||
#include <asm/cmpxchg-xchg.h> | ||
|
||
#endif /* __ASM_SH_CMPXCHG_CAS_H */ |
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,117 @@ | ||
/* | ||
* include/asm-sh/spinlock-cas.h | ||
* | ||
* Copyright (C) 2015 SEI | ||
* | ||
* This file is subject to the terms and conditions of the GNU General Public | ||
* License. See the file "COPYING" in the main directory of this archive | ||
* for more details. | ||
*/ | ||
#ifndef __ASM_SH_SPINLOCK_CAS_H | ||
#define __ASM_SH_SPINLOCK_CAS_H | ||
|
||
#include <asm/barrier.h> | ||
#include <asm/processor.h> | ||
|
||
static inline unsigned __sl_cas(volatile unsigned *p, unsigned old, unsigned new) | ||
{ | ||
__asm__ __volatile__("cas.l %1,%0,@r0" | ||
: "+r"(new) | ||
: "r"(old), "z"(p) | ||
: "t", "memory" ); | ||
return new; | ||
} | ||
|
||
/* | ||
* Your basic SMP spinlocks, allowing only a single CPU anywhere | ||
*/ | ||
|
||
#define arch_spin_is_locked(x) ((x)->lock <= 0) | ||
#define arch_spin_lock_flags(lock, flags) arch_spin_lock(lock) | ||
|
||
static inline void arch_spin_unlock_wait(arch_spinlock_t *lock) | ||
{ | ||
smp_cond_load_acquire(&lock->lock, VAL > 0); | ||
} | ||
|
||
static inline void arch_spin_lock(arch_spinlock_t *lock) | ||
{ | ||
while (!__sl_cas(&lock->lock, 1, 0)); | ||
} | ||
|
||
static inline void arch_spin_unlock(arch_spinlock_t *lock) | ||
{ | ||
__sl_cas(&lock->lock, 0, 1); | ||
} | ||
|
||
static inline int arch_spin_trylock(arch_spinlock_t *lock) | ||
{ | ||
return __sl_cas(&lock->lock, 1, 0); | ||
} | ||
|
||
/* | ||
* Read-write spinlocks, allowing multiple readers but only one writer. | ||
* | ||
* NOTE! it is quite common to have readers in interrupts but no interrupt | ||
* writers. For those circumstances we can "mix" irq-safe locks - any writer | ||
* needs to get a irq-safe write-lock, but readers can get non-irqsafe | ||
* read-locks. | ||
*/ | ||
|
||
/** | ||
* read_can_lock - would read_trylock() succeed? | ||
* @lock: the rwlock in question. | ||
*/ | ||
#define arch_read_can_lock(x) ((x)->lock > 0) | ||
|
||
/** | ||
* write_can_lock - would write_trylock() succeed? | ||
* @lock: the rwlock in question. | ||
*/ | ||
#define arch_write_can_lock(x) ((x)->lock == RW_LOCK_BIAS) | ||
|
||
static inline void arch_read_lock(arch_rwlock_t *rw) | ||
{ | ||
unsigned old; | ||
do old = rw->lock; | ||
while (!old || __sl_cas(&rw->lock, old, old-1) != old); | ||
} | ||
|
||
static inline void arch_read_unlock(arch_rwlock_t *rw) | ||
{ | ||
unsigned old; | ||
do old = rw->lock; | ||
while (__sl_cas(&rw->lock, old, old+1) != old); | ||
} | ||
|
||
static inline void arch_write_lock(arch_rwlock_t *rw) | ||
{ | ||
while (__sl_cas(&rw->lock, RW_LOCK_BIAS, 0) != RW_LOCK_BIAS); | ||
} | ||
|
||
static inline void arch_write_unlock(arch_rwlock_t *rw) | ||
{ | ||
__sl_cas(&rw->lock, 0, RW_LOCK_BIAS); | ||
} | ||
|
||
static inline int arch_read_trylock(arch_rwlock_t *rw) | ||
{ | ||
unsigned old; | ||
do old = rw->lock; | ||
while (old && __sl_cas(&rw->lock, old, old-1) != old); | ||
return !!old; | ||
} | ||
|
||
static inline int arch_write_trylock(arch_rwlock_t *rw) | ||
{ | ||
return __sl_cas(&rw->lock, RW_LOCK_BIAS, 0) == RW_LOCK_BIAS; | ||
} | ||
|
||
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock) | ||
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock) | ||
|
||
#define arch_spin_relax(lock) cpu_relax() | ||
#define arch_read_relax(lock) cpu_relax() | ||
#define arch_write_relax(lock) cpu_relax() | ||
|
||
#endif /* __ASM_SH_SPINLOCK_CAS_H */ |
Oops, something went wrong.