Skip to content

Commit

Permalink
parisc: Rework arch_rw locking functions
Browse files Browse the repository at this point in the history
Clean up the arch read/write locking functions based on the arc
implemenation. This improves readability of those functions.

Signed-off-by: Helge Deller <deller@gmx.de>
  • Loading branch information
Helge Deller committed Apr 5, 2020
1 parent 2772f0e commit fbdc8f0
Show file tree
Hide file tree
Showing 2 changed files with 67 additions and 82 deletions.
135 changes: 56 additions & 79 deletions arch/parisc/include/asm/spinlock.h
Original file line number Diff line number Diff line change
Expand Up @@ -67,116 +67,93 @@ static inline int arch_spin_trylock(arch_spinlock_t *x)

/*
* Read-write spinlocks, allowing multiple readers but only one writer.
* Linux rwlocks are unfair to writers; they can be starved for an indefinite
* time by readers. With care, they can also be taken in interrupt context.
* Unfair locking as Writers could be starved indefinitely by Reader(s)
*
* In the PA-RISC implementation, we have a spinlock and a counter.
* Readers use the lock to serialise their access to the counter (which
* records how many readers currently hold the lock).
* Writers hold the spinlock, preventing any readers or other writers from
* grabbing the rwlock.
* The spinlock itself is contained in @counter and access to it is
* serialized with @lock_mutex.
*/

/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void arch_read_lock(arch_rwlock_t *rw)
/* 1 - lock taken successfully */
static inline int arch_read_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter++;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
}

/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);
rw->counter--;
arch_spin_unlock(&rw->lock);
arch_spin_lock(&(rw->lock_mutex));

/*
* zero means writer holds the lock exclusively, deny Reader.
* Otherwise grant lock to first/subseq reader
*/
if (rw->counter > 0) {
rw->counter--;
ret = 1;
}

arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);

return ret;
}

/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to grab the same read lock */
static __inline__ int arch_read_trylock(arch_rwlock_t *rw)
/* 1 - lock taken successfully */
static inline int arch_write_trylock(arch_rwlock_t *rw)
{
int ret = 0;
unsigned long flags;
retry:

local_irq_save(flags);
if (arch_spin_trylock(&rw->lock)) {
rw->counter++;
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);
return 1;
arch_spin_lock(&(rw->lock_mutex));

/*
* If reader(s) hold lock (lock < __ARCH_RW_LOCK_UNLOCKED__),
* deny writer. Otherwise if unlocked grant to writer
* Hence the claim that Linux rwlocks are unfair to writers.
* (can be starved for an indefinite time by readers).
*/
if (rw->counter == __ARCH_RW_LOCK_UNLOCKED__) {
rw->counter = 0;
ret = 1;
}

arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
/* If write-locked, we fail to acquire the lock */
if (rw->counter < 0)
return 0;

/* Wait until we have a realistic chance at the lock */
while (arch_spin_is_locked(&rw->lock) && rw->counter >= 0)
return ret;
}

static inline void arch_read_lock(arch_rwlock_t *rw)
{
while (!arch_read_trylock(rw))
cpu_relax();
}

goto retry;
static inline void arch_write_lock(arch_rwlock_t *rw)
{
while (!arch_write_trylock(rw))
cpu_relax();
}

/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ void arch_write_lock(arch_rwlock_t *rw)
static inline void arch_read_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
retry:
local_irq_save(flags);
arch_spin_lock_flags(&rw->lock, flags);

if (rw->counter != 0) {
arch_spin_unlock(&rw->lock);
local_irq_restore(flags);

while (rw->counter != 0)
cpu_relax();

goto retry;
}

rw->counter = -1; /* mark as write-locked */
mb();
local_irq_save(flags);
arch_spin_lock(&(rw->lock_mutex));
rw->counter++;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);
}

static __inline__ void arch_write_unlock(arch_rwlock_t *rw)
{
rw->counter = 0;
arch_spin_unlock(&rw->lock);
}

/* Note that we have to ensure interrupts are disabled in case we're
* interrupted by some other code that wants to read_trylock() this lock */
static __inline__ int arch_write_trylock(arch_rwlock_t *rw)
static inline void arch_write_unlock(arch_rwlock_t *rw)
{
unsigned long flags;
int result = 0;

local_irq_save(flags);
if (arch_spin_trylock(&rw->lock)) {
if (rw->counter == 0) {
rw->counter = -1;
result = 1;
} else {
/* Read-locked. Oh well. */
arch_spin_unlock(&rw->lock);
}
}
arch_spin_lock(&(rw->lock_mutex));
rw->counter = __ARCH_RW_LOCK_UNLOCKED__;
arch_spin_unlock(&(rw->lock_mutex));
local_irq_restore(flags);

return result;
}

#endif /* __ASM_SPINLOCK_H */
14 changes: 11 additions & 3 deletions arch/parisc/include/asm/spinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -12,11 +12,19 @@ typedef struct {
#endif
} arch_spinlock_t;


/* counter:
* Unlocked : 0x0100_0000
* Read lock(s) : 0x00FF_FFFF to 0x01 (Multiple Readers decrement it)
* Write lock : 0x0, but only if prior value is "unlocked" 0x0100_0000
*/
typedef struct {
arch_spinlock_t lock;
volatile int counter;
arch_spinlock_t lock_mutex;
volatile unsigned int counter;
} arch_rwlock_t;

#define __ARCH_RW_LOCK_UNLOCKED { __ARCH_SPIN_LOCK_UNLOCKED, 0 }
#define __ARCH_RW_LOCK_UNLOCKED__ 0x01000000
#define __ARCH_RW_LOCK_UNLOCKED { .lock_mutex = __ARCH_SPIN_LOCK_UNLOCKED, \
.counter = __ARCH_RW_LOCK_UNLOCKED__ }

#endif

0 comments on commit fbdc8f0

Please sign in to comment.