Re: [patch 10/29] lockup_detector/perf: Prevent cpu hotplug deadlock
From: Don Zickus
Date: Fri Sep 01 2017 - 15:02:17 EST
On Thu, Aug 31, 2017 at 09:16:08AM +0200, Thomas Gleixner wrote:
> The following deadlock is possible in the watchdog hotplug code:
>
> cpus_write_lock()
> ...
> takedown_cpu()
> smpboot_park_threads()
> smpboot_park_thread()
> kthread_park()
> ->park() := watchdog_disable()
> watchdog_nmi_disable()
> perf_event_release_kernel();
> put_event()
> _free_event()
> ->destroy() := hw_perf_event_destroy()
> x86_release_hardware()
> release_ds_buffers()
> get_online_cpus()
>
> when a per cpu watchdog perf event is destroyed which drops the last
> reference to the PMU hardware. The cleanup code there invokes
> get_online_cpus() which instantly deadlocks because the hotplug percpu
> rwsem is write locked.
The main reason perf_event_release_kernel is in this path is because the
oprofile folks complained they couldn't use the perf counters when the
nmi_watchdog was disabled on the command line.
I believe your patches only release the perf event on cpu unplug now.
Unless oprofile has been updated, this may still cause issues. But
detecting this on sysctl changes can probably work around this.
Cheers,
Don
>
> To solve this add a deferring mechanism:
>
> cpus_write_lock()
> kthread_park()
> watchdog_nmi_disable(deferred)
> perf_event_disable(event);
> move_event_to_deferred(event);
> ....
> cpus_write_unlock()
> cleaup_deferred_events()
> perf_event_release_kernel()
>
> This is still properly serialized against concurrent hotplug via the
> cpu_add_remove_lock, which is held by the task which initiated the hotplug
> event.
>
> This is also used to handle event destruction when the watchdog threads are
> parked via other mechanisms than CPU hotplug.
>
> Reported-by: Borislav Petkov <bp@xxxxxxxxx>
> Analyzed-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
> Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
> ---
> include/linux/nmi.h | 6 ++++++
> kernel/cpu.c | 6 ++++++
> kernel/watchdog.c | 24 ++++++++++++++++++++++++
> kernel/watchdog_hld.c | 34 ++++++++++++++++++++++++++++------
> 4 files changed, 64 insertions(+), 6 deletions(-)
>
> --- a/include/linux/nmi.h
> +++ b/include/linux/nmi.h
> @@ -13,9 +13,11 @@
> #ifdef CONFIG_LOCKUP_DETECTOR
> void lockup_detector_init(void);
> void lockup_detector_soft_poweroff(void);
> +void lockup_detector_cleanup(void);
> #else
> static inline void lockup_detector_init(void) { }
> static inline void lockup_detector_soft_poweroff(void) { }
> +static inline void lockup_detector_cleanup(void) { }
> #endif
>
> #ifdef CONFIG_SOFTLOCKUP_DETECTOR
> @@ -77,9 +79,13 @@ static inline void hardlockup_detector_d
> extern void arch_touch_nmi_watchdog(void);
> extern void hardlockup_detector_perf_stop(void);
> extern void hardlockup_detector_perf_restart(void);
> +extern void hardlockup_detector_perf_disable(void);
> +extern void hardlockup_detector_perf_cleanup(void);
> #else
> static inline void hardlockup_detector_perf_stop(void) { }
> static inline void hardlockup_detector_perf_restart(void) { }
> +static inline void hardlockup_detector_perf_disable(void) { }
> +static inline void hardlockup_detector_perf_cleanup(void) { }
> #if !defined(CONFIG_HAVE_NMI_WATCHDOG)
> static inline void arch_touch_nmi_watchdog(void) {}
> #endif
> --- a/kernel/cpu.c
> +++ b/kernel/cpu.c
> @@ -24,6 +24,7 @@
> #include <linux/lockdep.h>
> #include <linux/tick.h>
> #include <linux/irq.h>
> +#include <linux/nmi.h>
> #include <linux/smpboot.h>
> #include <linux/relay.h>
> #include <linux/slab.h>
> @@ -733,6 +734,11 @@ static int __ref _cpu_down(unsigned int
>
> out:
> cpus_write_unlock();
> + /*
> + * Do post unplug cleanup. This is still protected against
> + * concurrent CPU hotplug via cpu_add_remove_lock.
> + */
> + lockup_detector_cleanup();
> return ret;
> }
>
> --- a/kernel/watchdog.c
> +++ b/kernel/watchdog.c
> @@ -193,6 +193,8 @@ static int __init hardlockup_all_cpu_bac
> #endif
> #endif
>
> +static void __lockup_detector_cleanup(void);
> +
> /*
> * Hard-lockup warnings should be triggered after just a few seconds. Soft-
> * lockups can have false positives under extreme conditions. So we generally
> @@ -459,6 +461,7 @@ static void watchdog_disable(unsigned in
> hrtimer_cancel(hrtimer);
> /* disable the perf event */
> watchdog_nmi_disable(cpu);
> + hardlockup_detector_perf_disable();
> }
>
> static void watchdog_cleanup(unsigned int cpu, bool online)
> @@ -631,6 +634,24 @@ static void set_sample_period(void)
> }
> #endif /* SOFTLOCKUP */
>
> +static void __lockup_detector_cleanup(void)
> +{
> + lockdep_assert_held(&watchdog_mutex);
> + hardlockup_detector_perf_cleanup();
> +}
> +
> +/**
> + * lockup_detector_cleanup - Cleanup after cpu hotplug or sysctl changes
> + *
> + * Caller must not hold the cpu hotplug rwsem.
> + */
> +void lockup_detector_cleanup(void)
> +{
> + mutex_lock(&watchdog_mutex);
> + __lockup_detector_cleanup();
> + mutex_unlock(&watchdog_mutex);
> +}
> +
> /**
> * lockup_detector_soft_poweroff - Interface to stop lockup detector(s)
> *
> @@ -665,6 +686,8 @@ static int proc_watchdog_update(void)
>
> watchdog_nmi_reconfigure();
>
> + __lockup_detector_cleanup();
> +
> return err;
>
> }
> @@ -837,6 +860,7 @@ int proc_watchdog_cpumask(struct ctl_tab
> }
>
> watchdog_nmi_reconfigure();
> + __lockup_detector_cleanup();
> }
>
> mutex_unlock(&watchdog_mutex);
> --- a/kernel/watchdog_hld.c
> +++ b/kernel/watchdog_hld.c
> @@ -21,6 +21,8 @@
> static DEFINE_PER_CPU(bool, hard_watchdog_warn);
> static DEFINE_PER_CPU(bool, watchdog_nmi_touch);
> static DEFINE_PER_CPU(struct perf_event *, watchdog_ev);
> +static DEFINE_PER_CPU(struct perf_event *, dead_event);
> +static struct cpumask dead_events_mask;
>
> static unsigned long hardlockup_allcpu_dumped;
> static bool hardlockup_detector_disabled;
> @@ -239,16 +241,18 @@ int watchdog_nmi_enable(unsigned int cpu
> return 0;
> }
>
> -void watchdog_nmi_disable(unsigned int cpu)
> +/**
> + * hardlockup_detector_perf_disable - Disable the local event
> + */
> +void hardlockup_detector_perf_disable(void)
> {
> - struct perf_event *event = per_cpu(watchdog_ev, cpu);
> + struct perf_event *event = this_cpu_read(watchdog_ev);
>
> if (event) {
> perf_event_disable(event);
> - per_cpu(watchdog_ev, cpu) = NULL;
> -
> - /* should be in cleanup, but blocks oprofile */
> - perf_event_release_kernel(event);
> + this_cpu_write(watchdog_ev, NULL);
> + this_cpu_write(dead_event, event);
> + cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
>
> /* watchdog_nmi_enable() expects this to be zero initially. */
> if (atomic_dec_and_test(&watchdog_cpus))
> @@ -257,6 +261,24 @@ void watchdog_nmi_disable(unsigned int c
> }
>
> /**
> + * hardlockup_detector_perf_cleanup - Cleanup disabled events and destroy them
> + *
> + * Called from lockup_detector_cleanup(). Serialized by the caller.
> + */
> +void hardlockup_detector_perf_cleanup(void)
> +{
> + int cpu;
> +
> + for_each_cpu(cpu, &dead_events_mask) {
> + struct perf_event *event = per_cpu(dead_event, cpu);
> +
> + per_cpu(dead_event, cpu) = NULL;
> + perf_event_release_kernel(event);
> + }
> + cpumask_clear(&dead_events_mask);
> +}
> +
> +/**
> * hardlockup_detector_perf_stop - Globally stop watchdog events
> *
> * Special interface for x86 to handle the perf HT bug.
>
>