Skip to content

Commit

Permalink
locking: Convert raw_rwlock functions to arch_rwlock
Browse files Browse the repository at this point in the history
Name space cleanup for rwlock functions. No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Acked-by: Peter Zijlstra <peterz@infradead.org>
Acked-by: David S. Miller <davem@davemloft.net>
Acked-by: Ingo Molnar <mingo@elte.hu>
Cc: linux-arch@vger.kernel.org
  • Loading branch information
Thomas Gleixner committed Dec 14, 2009
1 parent fb3a6bb commit e593194
Show file tree
Hide file tree
Showing 18 changed files with 216 additions and 216 deletions.
20 changes: 10 additions & 10 deletions arch/alpha/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -50,17 +50,17 @@ static inline int arch_spin_trylock(arch_spinlock_t *lock)

/***********************************************************/

static inline int __raw_read_can_lock(arch_rwlock_t *lock)
static inline int arch_read_can_lock(arch_rwlock_t *lock)
{
return (lock->lock & 1) == 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *lock)
static inline int arch_write_can_lock(arch_rwlock_t *lock)
{
return lock->lock == 0;
}

static inline void __raw_read_lock(arch_rwlock_t *lock)
static inline void arch_read_lock(arch_rwlock_t *lock)
{
long regx;

Expand All @@ -80,7 +80,7 @@ static inline void __raw_read_lock(arch_rwlock_t *lock)
: "m" (*lock) : "memory");
}

static inline void __raw_write_lock(arch_rwlock_t *lock)
static inline void arch_write_lock(arch_rwlock_t *lock)
{
long regx;

Expand All @@ -100,7 +100,7 @@ static inline void __raw_write_lock(arch_rwlock_t *lock)
: "m" (*lock) : "memory");
}

static inline int __raw_read_trylock(arch_rwlock_t * lock)
static inline int arch_read_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
Expand All @@ -122,7 +122,7 @@ static inline int __raw_read_trylock(arch_rwlock_t * lock)
return success;
}

static inline int __raw_write_trylock(arch_rwlock_t * lock)
static inline int arch_write_trylock(arch_rwlock_t * lock)
{
long regx;
int success;
Expand All @@ -144,7 +144,7 @@ static inline int __raw_write_trylock(arch_rwlock_t * lock)
return success;
}

static inline void __raw_read_unlock(arch_rwlock_t * lock)
static inline void arch_read_unlock(arch_rwlock_t * lock)
{
long regx;
__asm__ __volatile__(
Expand All @@ -160,14 +160,14 @@ static inline void __raw_read_unlock(arch_rwlock_t * lock)
: "m" (*lock) : "memory");
}

static inline void __raw_write_unlock(arch_rwlock_t * lock)
static inline void arch_write_unlock(arch_rwlock_t * lock)
{
mb();
lock->lock = 0;
}

#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Expand Down
20 changes: 10 additions & 10 deletions arch/arm/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -86,7 +86,7 @@ static inline void arch_spin_unlock(arch_spinlock_t *lock)
* just write zero since the lock is exclusively held.
*/

static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
unsigned long tmp;

Expand All @@ -106,7 +106,7 @@ static inline void __raw_write_lock(arch_rwlock_t *rw)
smp_mb();
}

static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
unsigned long tmp;

Expand All @@ -126,7 +126,7 @@ static inline int __raw_write_trylock(arch_rwlock_t *rw)
}
}

static inline void __raw_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
smp_mb();

Expand All @@ -142,7 +142,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
}

/* write_can_lock - would write_trylock() succeed? */
#define __raw_write_can_lock(x) ((x)->lock == 0)
#define arch_write_can_lock(x) ((x)->lock == 0)

/*
* Read locks are a bit more hairy:
Expand All @@ -156,7 +156,7 @@ static inline void __raw_write_unlock(arch_rwlock_t *rw)
* currently active. However, we know we won't have any write
* locks.
*/
static inline void __raw_read_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;

Expand All @@ -176,7 +176,7 @@ static inline void __raw_read_lock(arch_rwlock_t *rw)
smp_mb();
}

static inline void __raw_read_unlock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2;

Expand All @@ -198,7 +198,7 @@ static inline void __raw_read_unlock(arch_rwlock_t *rw)
: "cc");
}

static inline int __raw_read_trylock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
unsigned long tmp, tmp2 = 1;

Expand All @@ -215,10 +215,10 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
}

/* read_can_lock - would read_trylock() succeed? */
#define __raw_read_can_lock(x) ((x)->lock < 0x80000000)
#define arch_read_can_lock(x) ((x)->lock < 0x80000000)

#define __raw_read_lock_flags(lock, flags) __raw_read_lock(lock)
#define __raw_write_lock_flags(lock, flags) __raw_write_lock(lock)
#define arch_read_lock_flags(lock, flags) arch_read_lock(lock)
#define arch_write_lock_flags(lock, flags) arch_write_lock(lock)

#define arch_spin_relax(lock) cpu_relax()
#define arch_read_relax(lock) cpu_relax()
Expand Down
40 changes: 20 additions & 20 deletions arch/blackfin/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -17,12 +17,12 @@ asmlinkage int __raw_spin_is_locked_asm(volatile int *ptr);
asmlinkage void __raw_spin_lock_asm(volatile int *ptr);
asmlinkage int __raw_spin_trylock_asm(volatile int *ptr);
asmlinkage void __raw_spin_unlock_asm(volatile int *ptr);
asmlinkage void __raw_read_lock_asm(volatile int *ptr);
asmlinkage int __raw_read_trylock_asm(volatile int *ptr);
asmlinkage void __raw_read_unlock_asm(volatile int *ptr);
asmlinkage void __raw_write_lock_asm(volatile int *ptr);
asmlinkage int __raw_write_trylock_asm(volatile int *ptr);
asmlinkage void __raw_write_unlock_asm(volatile int *ptr);
asmlinkage void arch_read_lock_asm(volatile int *ptr);
asmlinkage int arch_read_trylock_asm(volatile int *ptr);
asmlinkage void arch_read_unlock_asm(volatile int *ptr);
asmlinkage void arch_write_lock_asm(volatile int *ptr);
asmlinkage int arch_write_trylock_asm(volatile int *ptr);
asmlinkage void arch_write_unlock_asm(volatile int *ptr);

static inline int arch_spin_is_locked(arch_spinlock_t *lock)
{
Expand Down Expand Up @@ -52,44 +52,44 @@ static inline void arch_spin_unlock_wait(arch_spinlock_t *lock)
cpu_relax();
}

static inline int __raw_read_can_lock(arch_rwlock_t *rw)
static inline int arch_read_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) > 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *rw)
static inline int arch_write_can_lock(arch_rwlock_t *rw)
{
return __raw_uncached_fetch_asm(&rw->lock) == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
__raw_read_lock_asm(&rw->lock);
arch_read_lock_asm(&rw->lock);
}

static inline int __raw_read_trylock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
return __raw_read_trylock_asm(&rw->lock);
return arch_read_trylock_asm(&rw->lock);
}

static inline void __raw_read_unlock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
__raw_read_unlock_asm(&rw->lock);
arch_read_unlock_asm(&rw->lock);
}

static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
__raw_write_lock_asm(&rw->lock);
arch_write_lock_asm(&rw->lock);
}

static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
return __raw_write_trylock_asm(&rw->lock);
return arch_write_trylock_asm(&rw->lock);
}

static inline void __raw_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
__raw_write_unlock_asm(&rw->lock);
arch_write_unlock_asm(&rw->lock);
}

#define arch_spin_relax(lock) cpu_relax()
Expand Down
16 changes: 8 additions & 8 deletions arch/cris/include/arch-v32/arch/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -56,48 +56,48 @@ arch_spin_lock_flags(arch_spinlock_t *lock, unsigned long flags)
*
*/

static inline int __raw_read_can_lock(arch_rwlock_t *x)
static inline int arch_read_can_lock(arch_rwlock_t *x)
{
return (int)(x)->lock > 0;
}

static inline int __raw_write_can_lock(arch_rwlock_t *x)
static inline int arch_write_can_lock(arch_rwlock_t *x)
{
return (x)->lock == RW_LOCK_BIAS;
}

static inline void __raw_read_lock(arch_rwlock_t *rw)
static inline void arch_read_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock == 0);
rw->lock--;
arch_spin_unlock(&rw->slock);
}

static inline void __raw_write_lock(arch_rwlock_t *rw)
static inline void arch_write_lock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = 0;
arch_spin_unlock(&rw->slock);
}

static inline void __raw_read_unlock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
rw->lock++;
arch_spin_unlock(&rw->slock);
}

static inline void __raw_write_unlock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
arch_spin_lock(&rw->slock);
while (rw->lock != RW_LOCK_BIAS);
rw->lock = RW_LOCK_BIAS;
arch_spin_unlock(&rw->slock);
}

static inline int __raw_read_trylock(arch_rwlock_t *rw)
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Expand All @@ -109,7 +109,7 @@ static inline int __raw_read_trylock(arch_rwlock_t *rw)
return ret;
}

static inline int __raw_write_trylock(arch_rwlock_t *rw)
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
arch_spin_lock(&rw->slock);
Expand Down
Loading

0 comments on commit e593194

Please sign in to comment.