Skip to content

Commit

Permalink
locking/rtmutex: Add adaptive spinwait mechanism
Browse files Browse the repository at this point in the history
Going to sleep when locks are contended can be quite inefficient when the
contention time is short and the lock owner is running on a different CPU.

The MCS mechanism cannot be used because MCS is strictly FIFO ordered while
for rtmutex based locks the waiter ordering is priority based.

Provide a simple adaptive spinwait mechanism which currently restricts the
spinning to the top priority waiter.

[ tglx: Provide a contemporary changelog, extended it to all rtmutex based
  	locks and updated it to match the other spin on owner implementations ]

Originally-by: Gregory Haskins <ghaskins@novell.com>
Signed-off-by: Steven Rostedt <rostedt@goodmis.org>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211305.912050691@linutronix.de
  • Loading branch information
Steven Rostedt authored and Ingo Molnar committed Aug 17, 2021
1 parent 48eb3f4 commit 992caf7
Showing 1 changed file with 65 additions and 2 deletions.
67 changes: 65 additions & 2 deletions kernel/locking/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -8,6 +8,11 @@
* Copyright (C) 2005-2006 Timesys Corp., Thomas Gleixner <tglx@timesys.com>
* Copyright (C) 2005 Kihon Technologies Inc., Steven Rostedt
* Copyright (C) 2006 Esben Nielsen
* Adaptive Spinlocks:
* Copyright (C) 2008 Novell, Inc., Gregory Haskins, Sven Dietrich,
* and Peter Morreale,
* Adaptive Spinlocks simplification:
* Copyright (C) 2008 Red Hat, Inc., Steven Rostedt <srostedt@redhat.com>
*
* See Documentation/locking/rt-mutex-design.rst for details.
*/
Expand Down Expand Up @@ -1297,6 +1302,52 @@ static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
rt_mutex_slowunlock(lock);
}

#ifdef CONFIG_SMP
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
bool res = true;

rcu_read_lock();
for (;;) {
/* If owner changed, trylock again. */
if (owner != rt_mutex_owner(lock))
break;
/*
* Ensure that @owner is dereferenced after checking that
* the lock owner still matches @owner. If that fails,
* @owner might point to freed memory. If it still matches,
* the rcu_read_lock() ensures the memory stays valid.
*/
barrier();
/*
* Stop spinning when:
* - the lock owner has been scheduled out
* - current is not longer the top waiter
* - current is requested to reschedule (redundant
* for CONFIG_PREEMPT_RCU=y)
* - the VCPU on which owner runs is preempted
*/
if (!owner->on_cpu || waiter != rt_mutex_top_waiter(lock) ||
need_resched() || vcpu_is_preempted(task_cpu(owner))) {
res = false;
break;
}
cpu_relax();
}
rcu_read_unlock();
return res;
}
#else
static bool rtmutex_spin_on_owner(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter,
struct task_struct *owner)
{
return false;
}
#endif

#ifdef RT_MUTEX_BUILD_MUTEX
/*
* Functions required for:
Expand Down Expand Up @@ -1381,6 +1432,7 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
struct rt_mutex_waiter *waiter)
{
struct rt_mutex *rtm = container_of(lock, struct rt_mutex, rtmutex);
struct task_struct *owner;
int ret = 0;

for (;;) {
Expand All @@ -1403,9 +1455,14 @@ static int __sched rt_mutex_slowlock_block(struct rt_mutex_base *lock,
break;
}

if (waiter == rt_mutex_top_waiter(lock))
owner = rt_mutex_owner(lock);
else
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);

schedule();
if (!owner || !rtmutex_spin_on_owner(lock, waiter, owner))
schedule();

raw_spin_lock_irq(&lock->wait_lock);
set_current_state(state);
Expand Down Expand Up @@ -1561,6 +1618,7 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,
static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
{
struct rt_mutex_waiter waiter;
struct task_struct *owner;

lockdep_assert_held(&lock->wait_lock);

Expand All @@ -1579,9 +1637,14 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
if (try_to_take_rt_mutex(lock, current, &waiter))
break;

if (&waiter == rt_mutex_top_waiter(lock))
owner = rt_mutex_owner(lock);
else
owner = NULL;
raw_spin_unlock_irq(&lock->wait_lock);

schedule_rtlock();
if (!owner || !rtmutex_spin_on_owner(lock, &waiter, owner))
schedule_rtlock();

raw_spin_lock_irq(&lock->wait_lock);
set_current_state(TASK_RTLOCK_WAIT);
Expand Down

0 comments on commit 992caf7

Please sign in to comment.