Skip to content

Commit

Permalink
watchdog/hardlockup/perf: Prevent CPU hotplug deadlock
Browse files Browse the repository at this point in the history
The following deadlock is possible in the watchdog hotplug code:

  cpus_write_lock()
    ...
      takedown_cpu()
        smpboot_park_threads()
          smpboot_park_thread()
            kthread_park()
              ->park() := watchdog_disable()
                watchdog_nmi_disable()
                  perf_event_release_kernel();
                    put_event()
                      _free_event()
                        ->destroy() := hw_perf_event_destroy()
                          x86_release_hardware()
                            release_ds_buffers()
                              get_online_cpus()

when a per cpu watchdog perf event is destroyed which drops the last
reference to the PMU hardware. The cleanup code there invokes
get_online_cpus() which instantly deadlocks because the hotplug percpu
rwsem is write locked.

To solve this add a deferring mechanism:

  cpus_write_lock()
			   kthread_park()
			    watchdog_nmi_disable(deferred)
			      perf_event_disable(event);
			      move_event_to_deferred(event);
			   ....
  cpus_write_unlock()
  cleaup_deferred_events()
    perf_event_release_kernel()

This is still properly serialized against concurrent hotplug via the
cpu_add_remove_lock, which is held by the task which initiated the hotplug
event.

This is also used to handle event destruction when the watchdog threads are
parked via other mechanisms than CPU hotplug.

Analyzed-by: Peter Zijlstra <peterz@infradead.org>

Reported-by: Borislav Petkov <bp@alien8.de>
Signed-off-by: Thomas Gleixner <tglx@linutronix.de>
Reviewed-by: Don Zickus <dzickus@redhat.com>
Cc: Andrew Morton <akpm@linux-foundation.org>
Cc: Chris Metcalf <cmetcalf@mellanox.com>
Cc: Linus Torvalds <torvalds@linux-foundation.org>
Cc: Nicholas Piggin <npiggin@gmail.com>
Cc: Peter Zijlstra <peterz@infradead.org>
Cc: Sebastian Siewior <bigeasy@linutronix.de>
Cc: Ulrich Obergfell <uobergfe@redhat.com>
Link: http://lkml.kernel.org/r/20170912194146.884469246@linutronix.de
Signed-off-by: Ingo Molnar <mingo@kernel.org>
  • Loading branch information
Thomas Gleixner authored and Ingo Molnar committed Sep 14, 2017
1 parent 20d853f commit 941154b
Show file tree
Hide file tree
Showing 4 changed files with 65 additions and 6 deletions.
6 changes: 6 additions & 0 deletions include/linux/nmi.h
Original file line number Diff line number Diff line change
Expand Up @@ -13,9 +13,11 @@
#ifdef CONFIG_LOCKUP_DETECTOR
void lockup_detector_init(void);
void lockup_detector_soft_poweroff(void);
void lockup_detector_cleanup(void);
#else
static inline void lockup_detector_init(void) { }
static inline void lockup_detector_soft_poweroff(void) { }
static inline void lockup_detector_cleanup(void) { }
#endif

#ifdef CONFIG_SOFTLOCKUP_DETECTOR
Expand Down Expand Up @@ -77,9 +79,13 @@ static inline void hardlockup_detector_disable(void) {}
extern void arch_touch_nmi_watchdog(void);
extern void hardlockup_detector_perf_stop(void);
extern void hardlockup_detector_perf_restart(void);
extern void hardlockup_detector_perf_disable(void);
extern void hardlockup_detector_perf_cleanup(void);
#else
static inline void hardlockup_detector_perf_stop(void) { }
static inline void hardlockup_detector_perf_restart(void) { }
static inline void hardlockup_detector_perf_disable(void) { }
static inline void hardlockup_detector_perf_cleanup(void) { }
#if !defined(CONFIG_HAVE_NMI_WATCHDOG)
static inline void arch_touch_nmi_watchdog(void) {}
#endif
Expand Down
6 changes: 6 additions & 0 deletions kernel/cpu.c
Original file line number Diff line number Diff line change
Expand Up @@ -24,6 +24,7 @@
#include <linux/lockdep.h>
#include <linux/tick.h>
#include <linux/irq.h>
#include <linux/nmi.h>
#include <linux/smpboot.h>
#include <linux/relay.h>
#include <linux/slab.h>
Expand Down Expand Up @@ -734,6 +735,11 @@ static int __ref _cpu_down(unsigned int cpu, int tasks_frozen,

out:
cpus_write_unlock();
/*
* Do post unplug cleanup. This is still protected against
* concurrent CPU hotplug via cpu_add_remove_lock.
*/
lockup_detector_cleanup();
return ret;
}

Expand Down
25 changes: 25 additions & 0 deletions kernel/watchdog.c
Original file line number Diff line number Diff line change
Expand Up @@ -109,8 +109,10 @@ int __weak watchdog_nmi_enable(unsigned int cpu)
{
return 0;
}

void __weak watchdog_nmi_disable(unsigned int cpu)
{
hardlockup_detector_perf_disable();
}

/*
Expand Down Expand Up @@ -193,6 +195,8 @@ __setup("hardlockup_all_cpu_backtrace=", hardlockup_all_cpu_backtrace_setup);
#endif
#endif

static void __lockup_detector_cleanup(void);

/*
* Hard-lockup warnings should be triggered after just a few seconds. Soft-
* lockups can have false positives under extreme conditions. So we generally
Expand Down Expand Up @@ -631,6 +635,24 @@ static void set_sample_period(void)
}
#endif /* SOFTLOCKUP */

static void __lockup_detector_cleanup(void)
{
lockdep_assert_held(&watchdog_mutex);
hardlockup_detector_perf_cleanup();
}

/**
* lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
*
* Caller must not hold the cpu hotplug rwsem.
*/
void lockup_detector_cleanup(void)
{
mutex_lock(&watchdog_mutex);
__lockup_detector_cleanup();
mutex_unlock(&watchdog_mutex);
}

/**
* lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
*
Expand Down Expand Up @@ -665,6 +687,8 @@ static int proc_watchdog_update(void)

watchdog_nmi_reconfigure();

__lockup_detector_cleanup();

return err;

}
Expand Down Expand Up @@ -837,6 +861,7 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
}

watchdog_nmi_reconfigure();
__lockup_detector_cleanup();
}

mutex_unlock(&watchdog_mutex);
Expand Down
34 changes: 28 additions & 6 deletions kernel/watchdog_hld.c
Original file line number Diff line number Diff line change
Expand Up @@ -21,6 +21,8 @@
static DEFINE_PER_CPU(bool, hard_watchdog_warn);
static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
static DEFINE_PER_CPU(struct perf_event *, dead_event);
static struct cpumask dead_events_mask;

static unsigned long hardlockup_allcpu_dumped;
static bool hardlockup_detector_disabled;
Expand Down Expand Up @@ -239,23 +241,43 @@ int watchdog_nmi_enable(unsigned int cpu)
return 0;
}

void watchdog_nmi_disable(unsigned int cpu)
/**
* hardlockup_detector_perf_disable - Disable the local event
*/
void hardlockup_detector_perf_disable(void)
{
struct perf_event *event = per_cpu(watchdog_ev, cpu);
struct perf_event *event = this_cpu_read(watchdog_ev);

if (event) {
perf_event_disable(event);
per_cpu(watchdog_ev, cpu) = NULL;

/* should be in cleanup, but blocks oprofile */
perf_event_release_kernel(event);
this_cpu_write(watchdog_ev, NULL);
this_cpu_write(dead_event, event);
cpumask_set_cpu(smp_processor_id(), &dead_events_mask);

/* watchdog_nmi_enable() expects this to be zero initially. */
if (atomic_dec_and_test(&watchdog_cpus))
firstcpu_err = 0;
}
}

/**
* hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
*
* Called from lockup_detector_cleanup(). Serialized by the caller.
*/
void hardlockup_detector_perf_cleanup(void)
{
int cpu;

for_each_cpu(cpu, &dead_events_mask) {
struct perf_event *event = per_cpu(dead_event, cpu);

per_cpu(dead_event, cpu) = NULL;
perf_event_release_kernel(event);
}
cpumask_clear(&dead_events_mask);
}

/**
* hardlockup_detector_perf_stop - Globally stop watchdog events
*
Expand Down

0 comments on commit 941154b

Please sign in to comment.