Skip to content

Commit

Permalink
locking/qspinlock: Extract out code snippets for the next patch
Browse files Browse the repository at this point in the history
This is a preparatory patch that extracts out the following 2 code
snippets to prepare for the next performance optimization patch.

 1) the logic for the exchange of new and previous tail code words
    into a new xchg_tail() function.
 2) the logic for clearing the pending bit and setting the locked bit
    into a new clear_pending_set_locked() function.

This patch also simplifies the trylock operation before queuing by
calling queued_spin_trylock() directly.

Signed-off-by: Waiman Long <Waiman.Long@hp.com>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Boris Ostrovsky <boris.ostrovsky@oracle.com>
Cc: Borislav Petkov <bp@alien8.de>
Cc: Daniel J Blueman <daniel@numascale.com>
Cc: David Vrabel <david.vrabel@citrix.com>
Cc: Douglas Hatch <doug.hatch@hp.com>
Cc: H. Peter Anvin <hpa@zytor.com>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@oracle.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Oleg Nesterov <oleg@redhat.com>
Cc: Paolo Bonzini <paolo.bonzini@gmail.com>
Cc: Paul E. McKenney <paulmck@linux.vnet.ibm.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Raghavendra K T <raghavendra.kt@linux.vnet.ibm.com>
Cc: Rik van Riel <riel@redhat.com>
Cc: Scott J Norton <scott.norton@hp.com>
Cc: Thomas Gleixner <tglx@linutronix.de>
Cc: virtualization@lists.linux-foundation.org
Cc: xen-devel@lists.xenproject.org
Link: http://lkml.kernel.org/r/1429901803-29771-5-git-send-email-Waiman.Long@hp.com
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Waiman Long authored and Ingo Molnar committed May 8, 2015
1 parent c1fb159 commit 6403bd7
Show file tree
Hide file tree
Showing 2 changed files with 50 additions and 31 deletions.
2 changes: 2 additions & 0 deletions include/asm-generic/qspinlock_types.h
Original file line number Diff line number Diff line change
Expand Up @@ -58,6 +58,8 @@ typedef struct qspinlock {
#define _Q_TAIL_CPU_BITS (32 - _Q_TAIL_CPU_OFFSET)
#define _Q_TAIL_CPU_MASK _Q_SET_MASK(TAIL_CPU)

#define _Q_TAIL_MASK (_Q_TAIL_IDX_MASK | _Q_TAIL_CPU_MASK)

#define _Q_LOCKED_VAL (1U << _Q_LOCKED_OFFSET)
#define _Q_PENDING_VAL (1U << _Q_PENDING_OFFSET)

Expand Down
79 changes: 48 additions & 31 deletions kernel/locking/qspinlock.c
Original file line number Diff line number Diff line change
Expand Up @@ -96,6 +96,42 @@ static inline struct mcs_spinlock *decode_tail(u32 tail)

#define _Q_LOCKED_PENDING_MASK (_Q_LOCKED_MASK | _Q_PENDING_MASK)

/**
* clear_pending_set_locked - take ownership and clear the pending bit.
* @lock: Pointer to queued spinlock structure
*
* *,1,0 -> *,0,1
*/
static __always_inline void clear_pending_set_locked(struct qspinlock *lock)
{
atomic_add(-_Q_PENDING_VAL + _Q_LOCKED_VAL, &lock->val);
}

/**
* xchg_tail - Put in the new queue tail code word & retrieve previous one
* @lock : Pointer to queued spinlock structure
* @tail : The new queue tail code word
* Return: The previous queue tail code word
*
* xchg(lock, tail)
*
* p,*,* -> n,*,* ; prev = xchg(lock, node)
*/
static __always_inline u32 xchg_tail(struct qspinlock *lock, u32 tail)
{
u32 old, new, val = atomic_read(&lock->val);

for (;;) {
new = (val & _Q_LOCKED_PENDING_MASK) | tail;
old = atomic_cmpxchg(&lock->val, val, new);
if (old == val)
break;

val = old;
}
return old;
}

/**
* queued_spin_lock_slowpath - acquire the queued spinlock
* @lock: Pointer to queued spinlock structure
Expand Down Expand Up @@ -178,15 +214,7 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
*
* *,1,0 -> *,0,1
*/
for (;;) {
new = (val & ~_Q_PENDING_MASK) | _Q_LOCKED_VAL;

old = atomic_cmpxchg(&lock->val, val, new);
if (old == val)
break;

val = old;
}
clear_pending_set_locked(lock);
return;

/*
Expand All @@ -203,37 +231,26 @@ void queued_spin_lock_slowpath(struct qspinlock *lock, u32 val)
node->next = NULL;

/*
* We have already touched the queueing cacheline; don't bother with
* pending stuff.
*
* trylock || xchg(lock, node)
*
* 0,0,0 -> 0,0,1 ; no tail, not locked -> no tail, locked.
* p,y,x -> n,y,x ; tail was p -> tail is n; preserving locked.
* We touched a (possibly) cold cacheline in the per-cpu queue node;
* attempt the trylock once more in the hope someone let go while we
* weren't watching.
*/
for (;;) {
new = _Q_LOCKED_VAL;
if (val)
new = tail | (val & _Q_LOCKED_PENDING_MASK);

old = atomic_cmpxchg(&lock->val, val, new);
if (old == val)
break;

val = old;
}
if (queued_spin_trylock(lock))
goto release;

/*
* we won the trylock; forget about queueing.
* We have already touched the queueing cacheline; don't bother with
* pending stuff.
*
* p,*,* -> n,*,*
*/
if (new == _Q_LOCKED_VAL)
goto release;
old = xchg_tail(lock, tail);

/*
* if there was a previous node; link it and wait until reaching the
* head of the waitqueue.
*/
if (old & ~_Q_LOCKED_PENDING_MASK) {
if (old & _Q_TAIL_MASK) {
prev = decode_tail(old);
WRITE_ONCE(prev->next, node);

Expand Down

0 comments on commit 6403bd7

Please sign in to comment.