Skip to content

Commit

Permalink
Merge tag 'locking-urgent-2025-03-28' of git://git.kernel.org/pub/scm…
Browse files Browse the repository at this point in the history
…/linux/kernel/git/tip/tip

Pull misc locking fixes and updates from Ingo Molnar:

 - Fix a locking self-test FAIL on PREEMPT_RT kernels

 - Fix nr_unused_locks accounting bug

 - Simplify the split-lock debugging feature's fast-path

* tag 'locking-urgent-2025-03-28' of git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip:
  locking/lockdep: Decrease nr_unused_locks if lock unused in zap_class()
  lockdep: Fix wait context check on softirq for PREEMPT_RT
  x86/split_lock: Simplify reenabling
  • Loading branch information
Linus Torvalds committed Mar 30, 2025
2 parents aa918db + 495f53d commit b4c5c57
Show file tree
Hide file tree
Showing 3 changed files with 45 additions and 11 deletions.
35 changes: 24 additions & 11 deletions arch/x86/kernel/cpu/bus_lock.c
Original file line number Diff line number Diff line change
Expand Up @@ -200,6 +200,26 @@ static void __split_lock_reenable(struct work_struct *work)
*/
static DEFINE_PER_CPU(struct delayed_work, sl_reenable);

/*
* Per-CPU delayed_work can't be statically initialized properly because
* the struct address is unknown. Thus per-CPU delayed_work structures
* have to be initialized during kernel initialization and after calling
* setup_per_cpu_areas().
*/
static int __init setup_split_lock_delayed_work(void)
{
unsigned int cpu;

for_each_possible_cpu(cpu) {
struct delayed_work *work = per_cpu_ptr(&sl_reenable, cpu);

INIT_DELAYED_WORK(work, __split_lock_reenable);
}

return 0;
}
pure_initcall(setup_split_lock_delayed_work);

/*
* If a CPU goes offline with pending delayed work to re-enable split lock
* detection then the delayed work will be executed on some other CPU. That
Expand All @@ -219,15 +239,16 @@ static int splitlock_cpu_offline(unsigned int cpu)

static void split_lock_warn(unsigned long ip)
{
struct delayed_work *work = NULL;
struct delayed_work *work;
int cpu;
unsigned int saved_sld_mitigate = READ_ONCE(sysctl_sld_mitigate);

if (!current->reported_split_lock)
pr_warn_ratelimited("#AC: %s/%d took a split_lock trap at address: 0x%lx\n",
current->comm, current->pid, ip);
current->reported_split_lock = 1;

if (sysctl_sld_mitigate) {
if (saved_sld_mitigate) {
/*
* misery factor #1:
* sleep 10ms before trying to execute split lock.
Expand All @@ -240,18 +261,10 @@ static void split_lock_warn(unsigned long ip)
*/
if (down_interruptible(&buslock_sem) == -EINTR)
return;
work = &sl_reenable_unlock;
}

cpu = get_cpu();

if (!work) {
work = this_cpu_ptr(&sl_reenable);
/* Deferred initialization of per-CPU struct */
if (!work->work.func)
INIT_DELAYED_WORK(work, __split_lock_reenable);
}

work = saved_sld_mitigate ? &sl_reenable_unlock : per_cpu_ptr(&sl_reenable, cpu);
schedule_delayed_work_on(cpu, work, 2);

/* Disable split lock detection on this CPU to make progress */
Expand Down
3 changes: 3 additions & 0 deletions kernel/locking/lockdep.c
Original file line number Diff line number Diff line change
Expand Up @@ -6264,6 +6264,9 @@ static void zap_class(struct pending_free *pf, struct lock_class *class)
hlist_del_rcu(&class->hash_entry);
WRITE_ONCE(class->key, NULL);
WRITE_ONCE(class->name, NULL);
/* Class allocated but not used, -1 in nr_unused_locks */
if (class->usage_mask == 0)
debug_atomic_dec(nr_unused_locks);
nr_lock_classes--;
__clear_bit(class - lock_classes, lock_classes_in_use);
if (class - lock_classes == max_lock_class_idx)
Expand Down
18 changes: 18 additions & 0 deletions kernel/softirq.c
Original file line number Diff line number Diff line change
Expand Up @@ -126,6 +126,18 @@ static DEFINE_PER_CPU(struct softirq_ctrl, softirq_ctrl) = {
.lock = INIT_LOCAL_LOCK(softirq_ctrl.lock),
};

#ifdef CONFIG_DEBUG_LOCK_ALLOC
static struct lock_class_key bh_lock_key;
struct lockdep_map bh_lock_map = {
.name = "local_bh",
.key = &bh_lock_key,
.wait_type_outer = LD_WAIT_FREE,
.wait_type_inner = LD_WAIT_CONFIG, /* PREEMPT_RT makes BH preemptible. */
.lock_type = LD_LOCK_PERCPU,
};
EXPORT_SYMBOL_GPL(bh_lock_map);
#endif

/**
* local_bh_blocked() - Check for idle whether BH processing is blocked
*
Expand All @@ -148,6 +160,8 @@ void __local_bh_disable_ip(unsigned long ip, unsigned int cnt)

WARN_ON_ONCE(in_hardirq());

lock_map_acquire_read(&bh_lock_map);

/* First entry of a task into a BH disabled section? */
if (!current->softirq_disable_cnt) {
if (preemptible()) {
Expand Down Expand Up @@ -211,6 +225,8 @@ void __local_bh_enable_ip(unsigned long ip, unsigned int cnt)
WARN_ON_ONCE(in_hardirq());
lockdep_assert_irqs_enabled();

lock_map_release(&bh_lock_map);

local_irq_save(flags);
curcnt = __this_cpu_read(softirq_ctrl.cnt);

Expand Down Expand Up @@ -261,6 +277,8 @@ static inline void ksoftirqd_run_begin(void)
/* Counterpart to ksoftirqd_run_begin() */
static inline void ksoftirqd_run_end(void)
{
/* pairs with the lock_map_acquire_read() in ksoftirqd_run_begin() */
lock_map_release(&bh_lock_map);
__local_bh_enable(SOFTIRQ_OFFSET, true);
WARN_ON_ONCE(in_interrupt());
local_irq_enable();
Expand Down

0 comments on commit b4c5c57

Please sign in to comment.