Skip to content

Commit

Permalink
Merge tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/s…
Browse files Browse the repository at this point in the history
…cm/linux/kernel/git/tip/tip

Pull locking fixes from Borislav Petkov:

 - Prevent the leaking of a debug timer in futex_waitv()

 - A preempt-RT mutex locking fix, adding the proper acquire semantics

* tag 'locking_urgent_for_v6.2_rc2' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  futex: Fix futex_waitv() hrtimer debug object leak on kcalloc error
  rtmutex: Add acquire semantics for rtmutex lock acquisition slow path
  • Loading branch information
Linus Torvalds committed Jan 1, 2023
2 parents 8b41948 + 94cd8fa commit 95d248d
Show file tree
Hide file tree
Showing 3 changed files with 56 additions and 16 deletions.
11 changes: 7 additions & 4 deletions kernel/futex/syscalls.c
Original file line number Diff line number Diff line change
Expand Up @@ -286,19 +286,22 @@ SYSCALL_DEFINE5(futex_waitv, struct futex_waitv __user *, waiters,
}

futexv = kcalloc(nr_futexes, sizeof(*futexv), GFP_KERNEL);
if (!futexv)
return -ENOMEM;
if (!futexv) {
ret = -ENOMEM;
goto destroy_timer;
}

ret = futex_parse_waitv(futexv, waiters, nr_futexes);
if (!ret)
ret = futex_wait_multiple(futexv, nr_futexes, timeout ? &to : NULL);

kfree(futexv);

destroy_timer:
if (timeout) {
hrtimer_cancel(&to.timer);
destroy_hrtimer_on_stack(&to.timer);
}

kfree(futexv);
return ret;
}

Expand Down
55 changes: 46 additions & 9 deletions kernel/locking/rtmutex.c
Original file line number Diff line number Diff line change
Expand Up @@ -89,15 +89,31 @@ static inline int __ww_mutex_check_kill(struct rt_mutex *lock,
* set this bit before looking at the lock.
*/

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
static __always_inline struct task_struct *
rt_mutex_owner_encode(struct rt_mutex_base *lock, struct task_struct *owner)
{
unsigned long val = (unsigned long)owner;

if (rt_mutex_has_waiters(lock))
val |= RT_MUTEX_HAS_WAITERS;

WRITE_ONCE(lock->owner, (struct task_struct *)val);
return (struct task_struct *)val;
}

static __always_inline void
rt_mutex_set_owner(struct rt_mutex_base *lock, struct task_struct *owner)
{
/*
* lock->wait_lock is held but explicit acquire semantics are needed
* for a new lock owner so WRITE_ONCE is insufficient.
*/
xchg_acquire(&lock->owner, rt_mutex_owner_encode(lock, owner));
}

static __always_inline void rt_mutex_clear_owner(struct rt_mutex_base *lock)
{
/* lock->wait_lock is held so the unlock provides release semantics. */
WRITE_ONCE(lock->owner, rt_mutex_owner_encode(lock, NULL));
}

static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
Expand All @@ -106,7 +122,8 @@ static __always_inline void clear_rt_mutex_waiters(struct rt_mutex_base *lock)
((unsigned long)lock->owner & ~RT_MUTEX_HAS_WAITERS);
}

static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
static __always_inline void
fixup_rt_mutex_waiters(struct rt_mutex_base *lock, bool acquire_lock)
{
unsigned long owner, *p = (unsigned long *) &lock->owner;

Expand Down Expand Up @@ -172,8 +189,21 @@ static __always_inline void fixup_rt_mutex_waiters(struct rt_mutex_base *lock)
* still set.
*/
owner = READ_ONCE(*p);
if (owner & RT_MUTEX_HAS_WAITERS)
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
if (owner & RT_MUTEX_HAS_WAITERS) {
/*
* See rt_mutex_set_owner() and rt_mutex_clear_owner() on
* why xchg_acquire() is used for updating owner for
* locking and WRITE_ONCE() for unlocking.
*
* WRITE_ONCE() would work for the acquire case too, but
* in case that the lock acquisition failed it might
* force other lockers into the slow path unnecessarily.
*/
if (acquire_lock)
xchg_acquire(p, owner & ~RT_MUTEX_HAS_WAITERS);
else
WRITE_ONCE(*p, owner & ~RT_MUTEX_HAS_WAITERS);
}
}

/*
Expand Down Expand Up @@ -208,6 +238,13 @@ static __always_inline void mark_rt_mutex_waiters(struct rt_mutex_base *lock)
owner = *p;
} while (cmpxchg_relaxed(p, owner,
owner | RT_MUTEX_HAS_WAITERS) != owner);

/*
* The cmpxchg loop above is relaxed to avoid back-to-back ACQUIRE
* operations in the event of contention. Ensure the successful
* cmpxchg is visible.
*/
smp_mb__after_atomic();
}

/*
Expand Down Expand Up @@ -1243,7 +1280,7 @@ static int __sched __rt_mutex_slowtrylock(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the lock waiters bit
* unconditionally. Clean this up.
*/
fixup_rt_mutex_waiters(lock);
fixup_rt_mutex_waiters(lock, true);

return ret;
}
Expand Down Expand Up @@ -1604,7 +1641,7 @@ static int __sched __rt_mutex_slowlock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit
* unconditionally. We might have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
fixup_rt_mutex_waiters(lock, true);

trace_contention_end(lock, ret);

Expand Down Expand Up @@ -1719,7 +1756,7 @@ static void __sched rtlock_slowlock_locked(struct rt_mutex_base *lock)
* try_to_take_rt_mutex() sets the waiter bit unconditionally.
* We might have to fix that up:
*/
fixup_rt_mutex_waiters(lock);
fixup_rt_mutex_waiters(lock, true);
debug_rt_mutex_free_waiter(&waiter);

trace_contention_end(lock, 0);
Expand Down
6 changes: 3 additions & 3 deletions kernel/locking/rtmutex_api.c
Original file line number Diff line number Diff line change
Expand Up @@ -267,7 +267,7 @@ void __sched rt_mutex_init_proxy_locked(struct rt_mutex_base *lock,
void __sched rt_mutex_proxy_unlock(struct rt_mutex_base *lock)
{
debug_rt_mutex_proxy_unlock(lock);
rt_mutex_set_owner(lock, NULL);
rt_mutex_clear_owner(lock);
}

/**
Expand Down Expand Up @@ -382,7 +382,7 @@ int __sched rt_mutex_wait_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
fixup_rt_mutex_waiters(lock, true);
raw_spin_unlock_irq(&lock->wait_lock);

return ret;
Expand Down Expand Up @@ -438,7 +438,7 @@ bool __sched rt_mutex_cleanup_proxy_lock(struct rt_mutex_base *lock,
* try_to_take_rt_mutex() sets the waiter bit unconditionally. We might
* have to fix that up.
*/
fixup_rt_mutex_waiters(lock);
fixup_rt_mutex_waiters(lock, false);

raw_spin_unlock_irq(&lock->wait_lock);

Expand Down

0 comments on commit 95d248d

Please sign in to comment.