[patch V2 27/29] lockup_detector: Use new perf CPU enable mechanism
From: Thomas Gleixner
Date: Tue Sep 12 2017 - 15:51:06 EST
Get rid of the hodgepodge which tries to be smart about perf being
unavailable and error printout rate limiting.
That's all not required simply because this is never invoked when the perf
NMI watchdog is not functional.
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Don Zickus <dzickus@xxxxxxxxxx>
Cc: Chris Metcalf <cmetcalf@xxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Sebastian Siewior <bigeasy@xxxxxxxxxxxxx>
Cc: Nicholas Piggin <npiggin@xxxxxxxxx>
Cc: Ulrich Obergfell <uobergfe@xxxxxxxxxx>
Cc: Borislav Petkov <bp@xxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Link: http://lkml.kernel.org/r/20170831073055.173111470@xxxxxxxxxxxxx
---
kernel/watchdog.c | 4 +-
kernel/watchdog_hld.c | 88 ++------------------------------------------------
2 files changed, 8 insertions(+), 84 deletions(-)
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -107,6 +107,7 @@ static int __init hardlockup_all_cpu_bac
*/
int __weak watchdog_nmi_enable(unsigned int cpu)
{
+ hardlockup_detector_perf_enable();
return 0;
}
@@ -465,7 +466,8 @@ static void watchdog_enable(unsigned int
/* Initialize timestamp */
__touch_watchdog();
/* Enable the perf event */
- watchdog_nmi_enable(cpu);
+ if (watchdog_enabled & NMI_WATCHDOG_ENABLED)
+ watchdog_nmi_enable(cpu);
watchdog_set_prio(SCHED_FIFO, MAX_RT_PRIO - 1);
}
--- a/kernel/watchdog_hld.c
+++ b/kernel/watchdog_hld.c
@@ -25,7 +25,7 @@ static DEFINE_PER_CPU(struct perf_event
static struct cpumask dead_events_mask;
static unsigned long hardlockup_allcpu_dumped;
-static bool hardlockup_detector_disabled;
+static unsigned int watchdog_cpus;
void arch_touch_nmi_watchdog(void)
{
@@ -160,84 +160,6 @@ static void watchdog_overflow_callback(s
return;
}
-/*
- * People like the simple clean cpu node info on boot.
- * Reduce the watchdog noise by only printing messages
- * that are different from what cpu0 displayed.
- */
-static unsigned long firstcpu_err;
-static atomic_t watchdog_cpus;
-
-int watchdog_nmi_enable(unsigned int cpu)
-{
- struct perf_event_attr *wd_attr;
- struct perf_event *event = per_cpu(watchdog_ev, cpu);
- int firstcpu = 0;
-
- /* nothing to do if the hard lockup detector is disabled */
- if (!(watchdog_enabled & NMI_WATCHDOG_ENABLED))
- goto out;
-
- /* A failure disabled the hardlockup detector permanently */
- if (hardlockup_detector_disabled)
- return -ENODEV;
-
- /* is it already setup and enabled? */
- if (event && event->state > PERF_EVENT_STATE_OFF)
- goto out;
-
- /* it is setup but not enabled */
- if (event != NULL)
- goto out_enable;
-
- if (atomic_inc_return(&watchdog_cpus) == 1)
- firstcpu = 1;
-
- wd_attr = &wd_hw_attr;
- wd_attr->sample_period = hw_nmi_get_sample_period(watchdog_thresh);
-
- /* Try to register using hardware perf events */
- event = perf_event_create_kernel_counter(wd_attr, cpu, NULL, watchdog_overflow_callback, NULL);
-
- /* save the first cpu's error for future comparision */
- if (firstcpu && IS_ERR(event))
- firstcpu_err = PTR_ERR(event);
-
- if (!IS_ERR(event)) {
- /* only print for the first cpu initialized */
- if (firstcpu || firstcpu_err)
- pr_info("enabled on all CPUs, permanently consumes one hw-PMU counter.\n");
- goto out_save;
- }
-
- /* skip displaying the same error again */
- if (!firstcpu && (PTR_ERR(event) == firstcpu_err))
- return PTR_ERR(event);
-
- /* vary the KERN level based on the returned errno */
- if (PTR_ERR(event) == -EOPNOTSUPP)
- pr_info("disabled (cpu%i): not supported (no LAPIC?)\n", cpu);
- else if (PTR_ERR(event) == -ENOENT)
- pr_warn("disabled (cpu%i): hardware events not enabled\n",
- cpu);
- else
- pr_err("disabled (cpu%i): unable to create perf event: %ld\n",
- cpu, PTR_ERR(event));
-
- pr_info("Disabling hard lockup detector permanently\n");
- hardlockup_detector_disabled = true;
-
- return PTR_ERR(event);
-
- /* success path */
-out_save:
- per_cpu(watchdog_ev, cpu) = event;
-out_enable:
- perf_event_enable(per_cpu(watchdog_ev, cpu));
-out:
- return 0;
-}
-
static int hardlockup_detector_event_create(void)
{
unsigned int cpu = smp_processor_id();
@@ -267,6 +189,9 @@ void hardlockup_detector_perf_enable(voi
if (hardlockup_detector_event_create())
return;
+ if (!watchdog_cpus++)
+ pr_info("Enabled. Permanently consumes one hw-PMU counter.\n");
+
perf_event_enable(this_cpu_read(watchdog_ev));
}
@@ -282,10 +207,7 @@ void hardlockup_detector_perf_disable(vo
this_cpu_write(watchdog_ev, NULL);
this_cpu_write(dead_event, event);
cpumask_set_cpu(smp_processor_id(), &dead_events_mask);
-
- /* watchdog_nmi_enable() expects this to be zero initially. */
- if (atomic_dec_and_test(&watchdog_cpus))
- firstcpu_err = 0;
+ watchdog_cpus--;
}
}