[PATCHv2 3/4] kernel/watchdog: adapt the watchdog_hld interface for async model

From: Pingfan Liu
Date: Thu Sep 23 2021 - 10:10:42 EST


When lockup_detector_init()->watchdog_nmi_probe(), PMU may be not ready
yet. E.g. on arm64, PMU is not ready until
device_initcall(armv8_pmu_driver_init). And it is deeply integrated
with the driver model and cpuhp. Hence it is hard to push this
initialization before smp_init().

But it is easy to take an opposite approach by enabling watchdog_hld to
get the capability of PMU async.

The async model is achieved by expanding watchdog_nmi_probe() with
-EBUSY, and a re-initializing work_struct which waits on a wait_queue_head.

Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: Sumit Garg <sumit.garg@xxxxxxxxxx>
Cc: Catalin Marinas <catalin.marinas@xxxxxxx>
Cc: Will Deacon <will@xxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
Cc: Marc Zyngier <maz@xxxxxxxxxx>
Cc: Kees Cook <keescook@xxxxxxxxxxxx>
Cc: Masahiro Yamada <masahiroy@xxxxxxxxxx>
Cc: Sami Tolvanen <samitolvanen@xxxxxxxxxx>
Cc: Petr Mladek <pmladek@xxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Wang Qing <wangqing@xxxxxxxx>
Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx>
Cc: Santosh Sivaraj <santosh@xxxxxxxxxx>
Cc: linux-arm-kernel@xxxxxxxxxxxxxxxxxxx
To: linux-kernel@xxxxxxxxxxxxxxx
---
include/linux/nmi.h | 3 +++
kernel/watchdog.c | 37 +++++++++++++++++++++++++++++++++++--
2 files changed, 38 insertions(+), 2 deletions(-)

diff --git a/include/linux/nmi.h b/include/linux/nmi.h
index b7bcd63c36b4..270d440fe4b7 100644
--- a/include/linux/nmi.h
+++ b/include/linux/nmi.h
@@ -118,6 +118,9 @@ static inline int hardlockup_detector_perf_init(void) { return 0; }

void watchdog_nmi_stop(void);
void watchdog_nmi_start(void);
+
+extern bool hld_detector_delay_initialized;
+extern struct wait_queue_head hld_detector_wait;
int watchdog_nmi_probe(void);
void watchdog_nmi_enable(unsigned int cpu);
void watchdog_nmi_disable(unsigned int cpu);
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 6e6dd5f0bc3e..bd4ae1839b72 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -103,7 +103,10 @@ void __weak watchdog_nmi_disable(unsigned int cpu)
hardlockup_detector_perf_disable();
}

-/* Return 0, if a NMI watchdog is available. Error code otherwise */
+/*
+ * Return 0, if a NMI watchdog is available. -EBUSY if not ready.
+ * Other negative value if not support.
+ */
int __weak __init watchdog_nmi_probe(void)
{
return hardlockup_detector_perf_init();
@@ -739,15 +742,45 @@ int proc_watchdog_cpumask(struct ctl_table *table, int write,
}
#endif /* CONFIG_SYSCTL */

+static void lockup_detector_delay_init(struct work_struct *work);
+bool hld_detector_delay_initialized __initdata;
+
+struct wait_queue_head hld_detector_wait __initdata =
+ __WAIT_QUEUE_HEAD_INITIALIZER(hld_detector_wait);
+
+static struct work_struct detector_work __initdata =
+ __WORK_INITIALIZER(detector_work, lockup_detector_delay_init);
+
+static void __init lockup_detector_delay_init(struct work_struct *work)
+{
+ int ret;
+
+ wait_event(hld_detector_wait, hld_detector_delay_initialized);
+ ret = watchdog_nmi_probe();
+ if (!ret) {
+ nmi_watchdog_available = true;
+ lockup_detector_setup();
+ } else {
+ WARN_ON(ret == -EBUSY);
+ pr_info("Perf NMI watchdog permanently disabled\n");
+ }
+}
+
void __init lockup_detector_init(void)
{
+ int ret;
+
if (tick_nohz_full_enabled())
pr_info("Disabling watchdog on nohz_full cores by default\n");

cpumask_copy(&watchdog_cpumask,
housekeeping_cpumask(HK_FLAG_TIMER));

- if (!watchdog_nmi_probe())
+ ret = watchdog_nmi_probe();
+ if (!ret)
nmi_watchdog_available = true;
+ else if (ret == -EBUSY)
+ queue_work_on(smp_processor_id(), system_wq, &detector_work);
+
lockup_detector_setup();
}
--
2.31.1