Skip to content

Commit

Permalink
locking/rtmutex: Guard regular sleeping locks specific functions
Browse files Browse the repository at this point in the history
Guard the regular sleeping lock specific functionality, which is used for
rtmutex on non-RT enabled kernels and for mutex, rtmutex and semaphores on
RT enabled kernels so the code can be reused for the RT specific
implementation of spinlocks and rwlocks in a different compilation unit.

No functional change.

Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Signed-off-by: Peter Zijlstra (Intel) <peterz@infradead.org>
Signed-off-by: Ingo Molnar <mingo@kernel.org>
Link: https://lore.kernel.org/r/20210815211303.311535693@linutronix.de
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Aug 17, 2021
1 parent 456cfbc commit e17ba59
Show file tree
Hide file tree
Showing 3 changed files with 133 additions and 123 deletions.
254 changes: 131 additions & 123 deletions kernel/locking/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -1075,10 +1075,139 @@ static void __sched mark_wakeup_next_waiter(struct rt_wake_q_head *wqh,
raw_spin_unlock(&current->pi_lock);
}

static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
int ret = try_to_take_rt_mutex(lock, current, NULL);

/*
* try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up.
*/
fixup_rt_mutex_waiters(lock);

return ret;
}

/*
* Slow path try-lock function:
*/
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
unsigned long flags;
int ret;

/*
* If the lock already has an owner we fail to get the lock.
* This can be done without taking the @lock->wait_lock as
* it is only being read, and this is a trylock anyway.
*/
if (rt_mutex_owner(lock))
return 0;

/*
* The mutex has currently no owner. Lock the wait lock and try to
* acquire the lock. We use irqsave here to support early boot calls.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);

ret = __rt_mutex_slowtrylock(lock);

raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

return ret;
}

static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 1;

return rt_mutex_slowtrylock(lock);
}

/*
* Slow path to release a rt-mutex.
*/
static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
{
DEFINE_RT_WAKE_Q(wqh);
unsigned long flags;

/* irqsave required to support early boot calls */
raw_spin_lock_irqsave(&lock->wait_lock, flags);

debug_rt_mutex_unlock(lock);

/*
* We must be careful here if the fast path is enabled. If we
* have no waiters queued we cannot set owner to NULL here
* because of:
*
* foo->lock->owner = NULL;
* rtmutex_lock(foo->lock); <- fast path
* free = atomic_dec_and_test(foo->refcnt);
* rtmutex_unlock(foo->lock); <- fast path
* if (free)
* kfree(foo);
* raw_spin_unlock(foo->lock->wait_lock);
*
* So for the fastpath enabled kernel:
*
* Nothing can set the waiters bit as long as we hold
* lock->wait_lock. So we do the following sequence:
*
* owner = rt_mutex_owner(lock);
* clear_rt_mutex_waiters(lock);
* raw_spin_unlock(&lock->wait_lock);
* if (cmpxchg(&lock->owner, owner, 0) == owner)
* return;
* goto retry;
*
* The fastpath disabled variant is simple as all access to
* lock->owner is serialized by lock->wait_lock:
*
* lock->owner = NULL;
* raw_spin_unlock(&lock->wait_lock);
*/
while (!rt_mutex_has_waiters(lock)) {
/* Drops lock->wait_lock ! */
if (unlock_rt_mutex_safe(lock, flags) == true)
return;
/* Relock the rtmutex and try again */
raw_spin_lock_irqsave(&lock->wait_lock, flags);
}

/*
* The wakeup next waiter path does not suffer from the above
* race. See the comments there.
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
mark_wakeup_next_waiter(&wqh, lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

rt_mutex_wake_up_q(&wqh);
}

static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;

rt_mutex_slowunlock(lock);
}

#ifdef RT_MUTEX_BUILD_MUTEX
/*
* Functions required for:
* - rtmutex, futex on all kernels
* - mutex and rwsem substitutions on RT kernels
*/

/*
* Remove a waiter from a lock and give up
*
* Must be called with lock->wait_lock held and interrupts disabled. I must
* Must be called with lock->wait_lock held and interrupts disabled. It must
* have just failed to try_to_take_rt_mutex().
*/
static void __sched remove_waiter(struct rt_mutex_base *lock,
Expand Down Expand Up @@ -1286,125 +1415,4 @@ static __always_inline int __rt_mutex_lock(struct rt_mutex_base *lock,

return rt_mutex_slowlock(lock, state);
}

static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
int ret = try_to_take_rt_mutex(lock, current, NULL);

/*
* try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up.
*/
fixup_rt_mutex_waiters(lock);

return ret;
}

/*
* Slow path try-lock function:
*/
static int __sched rt_mutex_slowtrylock(struct rt_mutex_base *lock)
{
unsigned long flags;
int ret;

/*
* If the lock already has an owner we fail to get the lock.
* This can be done without taking the @lock->wait_lock as
* it is only being read, and this is a trylock anyway.
*/
if (rt_mutex_owner(lock))
return 0;

/*
* The mutex has currently no owner. Lock the wait lock and try to
* acquire the lock. We use irqsave here to support early boot calls.
*/
raw_spin_lock_irqsave(&lock->wait_lock, flags);

ret = __rt_mutex_slowtrylock(lock);

raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

return ret;
}

static __always_inline int __rt_mutex_trylock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_acquire(lock, NULL, current)))
return 1;

return rt_mutex_slowtrylock(lock);
}

/*
* Slow path to release a rt-mutex.
*/
static void __sched rt_mutex_slowunlock(struct rt_mutex_base *lock)
{
DEFINE_RT_WAKE_Q(wqh);
unsigned long flags;

/* irqsave required to support early boot calls */
raw_spin_lock_irqsave(&lock->wait_lock, flags);

debug_rt_mutex_unlock(lock);

/*
* We must be careful here if the fast path is enabled. If we
* have no waiters queued we cannot set owner to NULL here
* because of:
*
* foo->lock->owner = NULL;
* rtmutex_lock(foo->lock); <- fast path
* free = atomic_dec_and_test(foo->refcnt);
* rtmutex_unlock(foo->lock); <- fast path
* if (free)
* kfree(foo);
* raw_spin_unlock(foo->lock->wait_lock);
*
* So for the fastpath enabled kernel:
*
* Nothing can set the waiters bit as long as we hold
* lock->wait_lock. So we do the following sequence:
*
* owner = rt_mutex_owner(lock);
* clear_rt_mutex_waiters(lock);
* raw_spin_unlock(&lock->wait_lock);
* if (cmpxchg(&lock->owner, owner, 0) == owner)
* return;
* goto retry;
*
* The fastpath disabled variant is simple as all access to
* lock->owner is serialized by lock->wait_lock:
*
* lock->owner = NULL;
* raw_spin_unlock(&lock->wait_lock);
*/
while (!rt_mutex_has_waiters(lock)) {
/* Drops lock->wait_lock ! */
if (unlock_rt_mutex_safe(lock, flags) == true)
return;
/* Relock the rtmutex and try again */
raw_spin_lock_irqsave(&lock->wait_lock, flags);
}

/*
* The wakeup next waiter path does not suffer from the above
* race. See the comments there.
*
* Queue the next waiter for wakeup once we release the wait_lock.
*/
mark_wakeup_next_waiter(&wqh, lock);
raw_spin_unlock_irqrestore(&lock->wait_lock, flags);

rt_mutex_wake_up_q(&wqh);
}

static __always_inline void __rt_mutex_unlock(struct rt_mutex_base *lock)
{
if (likely(rt_mutex_cmpxchg_release(lock, current, NULL)))
return;

rt_mutex_slowunlock(lock);
}
#endif /* RT_MUTEX_BUILD_MUTEX */
1 change: 1 addition & 0 deletions kernel/locking/rtmutex_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -5,6 +5,7 @@
#include <linux/spinlock.h>
#include <linux/export.h>

#define RT_MUTEX_BUILD_MUTEX
#include "rtmutex.c"

/*
Expand Down
1 change: 1 addition & 0 deletions kernel/locking/rwsem.c
Original file line number Diff line number Diff line change
Expand Up @@ -1347,6 +1347,7 @@ static inline void __downgrade_write(struct rw_semaphore *sem)

#else /* !CONFIG_PREEMPT_RT */

#define RT_MUTEX_BUILD_MUTEX
#include "rtmutex.c"

#define rwbase_set_and_save_current_state(state) \
Expand Down

0 comments on commit e17ba59

Please sign in to comment.