Skip to content

Commit

Permalink
Merge tag 'locking-urgent-2020-11-01' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull locking fixes from Thomas Gleixner:
 "A couple of locking fixes:

   - Fix incorrect failure injection handling in the fuxtex code

   - Prevent a preemption warning in lockdep when tracking
     local_irq_enable() and interrupts are already enabled

   - Remove more raw_cpu_read() usage from lockdep which causes state
     corruption on !X86 architectures.

   - Make the nr_unused_locks accounting in lockdep correct again"

* tag 'locking-urgent-2020-11-01' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  lockdep: Fix nr_unused_locks accounting
  locking/lockdep: Remove more raw_cpu_read() usage
  futex: Fix incorrect should_fail_futex() handling
  lockdep: Fix preemption WARN for spurious IRQ-enable
  • Loading branch information
Linus Torvalds committed Nov 1, 2020
2 parents 31f0200 + 1a39340 commit 8d99084
Show file tree
Hide file tree
Showing 2 changed files with 10 additions and 14 deletions.
4 changes: 3 additions & 1 deletion kernel/futex.c
Original file line number Diff line number Diff line change
Expand Up @@ -1503,8 +1503,10 @@ static int wake_futex_pi(u32 __user *uaddr, u32 uval, struct futex_pi_state *pi_
*/
newval = FUTEX_WAITERS | task_pid_vnr(new_owner);

if (unlikely(should_fail_futex(true)))
if (unlikely(should_fail_futex(true))) {
ret = -EFAULT;
goto out_unlock;
}

ret = cmpxchg_futex_value_locked(&curval, uaddr, uval, newval);
if (!ret && (curval != uval)) {
Expand Down
20 changes: 7 additions & 13 deletions kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -84,7 +84,7 @@ static inline bool lockdep_enabled(void)
if (!debug_locks)
return false;

if (raw_cpu_read(lockdep_recursion))
if (this_cpu_read(lockdep_recursion))
return false;

if (current->lockdep_recursion)
Expand Down Expand Up @@ -4057,7 +4057,7 @@ void lockdep_hardirqs_on_prepare(unsigned long ip)
if (unlikely(in_nmi()))
return;

if (unlikely(__this_cpu_read(lockdep_recursion)))
if (unlikely(this_cpu_read(lockdep_recursion)))
return;

if (unlikely(lockdep_hardirqs_enabled())) {
Expand Down Expand Up @@ -4126,7 +4126,7 @@ void noinstr lockdep_hardirqs_on(unsigned long ip)
goto skip_checks;
}

if (unlikely(__this_cpu_read(lockdep_recursion)))
if (unlikely(this_cpu_read(lockdep_recursion)))
return;

if (lockdep_hardirqs_enabled()) {
Expand Down Expand Up @@ -4396,26 +4396,20 @@ static int mark_lock(struct task_struct *curr, struct held_lock *this,
if (unlikely(hlock_class(this)->usage_mask & new_mask))
goto unlock;

if (!hlock_class(this)->usage_mask)
debug_atomic_dec(nr_unused_locks);

hlock_class(this)->usage_mask |= new_mask;

if (new_bit < LOCK_TRACE_STATES) {
if (!(hlock_class(this)->usage_traces[new_bit] = save_trace()))
return 0;
}

switch (new_bit) {
case 0 ... LOCK_USED-1:
if (new_bit < LOCK_USED) {
ret = mark_lock_irq(curr, this, new_bit);
if (!ret)
return 0;
break;

case LOCK_USED:
debug_atomic_dec(nr_unused_locks);
break;

default:
break;
}

unlock:
Expand Down

0 comments on commit 8d99084

Please sign in to comment.