[PATCH v2 3/4] clocksource: Dynamically increase watchdog_max_skew
From: Waiman Long
Date: Tue Nov 16 2021 - 18:45:41 EST
It is possible that a long-lasting intensive workload running on
a system may cause the clock skew test to be skipped for extended
period of time. One way to avoid this is to dynamically increase the
watchdog_max_skew used in the clock skew test.
However, we also don't want watchdog_max_skew to be continuously increased
without bound. So we limit the increase up to 10*WATCHDOG_MAX_SKEW. If
that happens, there is something wrong the current watchdog and we are
going to mark it as unstable and select a new watchdog, if possible.
Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
kernel/time/clocksource.c | 59 +++++++++++++++++++++++++++++++++++----
1 file changed, 53 insertions(+), 6 deletions(-)
diff --git a/kernel/time/clocksource.c b/kernel/time/clocksource.c
index a7814b543a9b..b1813b09fe9b 100644
--- a/kernel/time/clocksource.c
+++ b/kernel/time/clocksource.c
@@ -108,6 +108,15 @@ static u64 suspend_start;
* a lower bound for cs->uncertainty_margin values when registering clocks.
*/
#define WATCHDOG_MAX_SKEW (100 * NSEC_PER_USEC)
+static u64 watchdog_max_skew = WATCHDOG_MAX_SKEW;
+
+/*
+ * The clock-skew check will be skipped if the watchdog shows too much
+ * read-back delay. To avoid indefinite test skips, watchdog_max_skew will be
+ * increased after a certain number of test skips.
+ */
+#define CLOCK_SKEW_SKIP_MAX 10
+static int clock_skew_skip;
#ifdef CONFIG_CLOCKSOURCE_WATCHDOG
static void clocksource_watchdog_work(struct work_struct *work);
@@ -205,6 +214,8 @@ EXPORT_SYMBOL_GPL(max_cswd_read_retries);
static int verify_n_cpus = 8;
module_param(verify_n_cpus, int, 0644);
+static void __clocksource_select_watchdog(bool fallback);
+
enum wd_read_status {
WD_READ_SUCCESS,
WD_READ_UNSTABLE,
@@ -228,7 +239,7 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
wd_delta = clocksource_delta(wd_end, *wdnow, watchdog->mask);
wd_delay = clocksource_cyc2ns(wd_delta, watchdog->mult,
watchdog->shift);
- if (wd_delay <= WATCHDOG_MAX_SKEW) {
+ if (wd_delay <= watchdog_max_skew) {
if (nretries > 1 || nretries >= max_cswd_read_retries) {
pr_warn("timekeeping watchdog on CPU%d: %s retried %d times before success\n",
smp_processor_id(), watchdog->name, nretries);
@@ -241,13 +252,13 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
* there is too much external interferences that cause
* significant delay in reading both clocksource and watchdog.
*
- * If consecutive WD read-back delay > WATCHDOG_MAX_SKEW/2,
+ * If consecutive WD read-back delay > watchdog_max_skew/2,
* report system busy, reinit the watchdog and skip the current
* watchdog test.
*/
wd_delta = clocksource_delta(wd_end2, wd_end, watchdog->mask);
wd_seq_delay = clocksource_cyc2ns(wd_delta, watchdog->mult, watchdog->shift);
- if (wd_seq_delay > WATCHDOG_MAX_SKEW/2)
+ if (wd_seq_delay > watchdog_max_skew/2)
goto skip_test;
}
@@ -260,6 +271,36 @@ static enum wd_read_status cs_watchdog_read(struct clocksource *cs, u64 *csnow,
smp_processor_id(), watchdog->name, wd_seq_delay);
pr_warn("wd-%s-wd read-back delay of %lldns, clock-skew test skipped!\n",
cs->name, wd_delay);
+ if (++clock_skew_skip > CLOCK_SKEW_SKIP_MAX) {
+ /*
+ * Increase watchdog_max_skew and watchdog->uncertainty_margin
+ * unless it will exceed 10*WATCHDOG_MAX_SKEW. In that case, the
+ * watchdog itself will be marked unstable.
+ */
+ clock_skew_skip = 0;
+ if (wd_seq_delay > 5 * WATCHDOG_MAX_SKEW) {
+ const char *old_wd_name = watchdog->name;
+ unsigned long flags;
+
+ /*
+ * Consecutive watchdog delay exceed limit, mark
+ * watchdog as unstable & select a new watchdog,
+ * if possible.
+ */
+ local_irq_save(flags);
+ __clocksource_unstable(watchdog);
+ __clocksource_select_watchdog(true);
+ local_irq_restore(flags);
+ pr_warn("timekeeping watchdog: old %s watchdog marked unstable, new %s watchdog selected\n",
+ old_wd_name, watchdog->name);
+ return WD_READ_SKIP;
+ }
+ watchdog_max_skew = 2 * wd_seq_delay;
+ if (wd_seq_delay > watchdog->uncertainty_margin)
+ watchdog->uncertainty_margin = wd_seq_delay;
+ pr_warn("timekeeping watchdog on CPU%d: watchdog_max_skew increased to %lldns\n",
+ smp_processor_id(), watchdog_max_skew);
+ }
return WD_READ_SKIP;
}
@@ -559,12 +600,10 @@ static void clocksource_enqueue_watchdog(struct clocksource *cs)
}
}
-static void clocksource_select_watchdog(bool fallback)
+static void __clocksource_select_watchdog(bool fallback)
{
struct clocksource *cs, *old_wd;
- unsigned long flags;
- spin_lock_irqsave(&watchdog_lock, flags);
/* save current watchdog */
old_wd = watchdog;
if (fallback)
@@ -593,6 +632,14 @@ static void clocksource_select_watchdog(bool fallback)
/* Check if the watchdog timer needs to be started. */
clocksource_start_watchdog();
+}
+
+static void clocksource_select_watchdog(bool fallback)
+{
+ unsigned long flags;
+
+ spin_lock_irqsave(&watchdog_lock, flags);
+ __clocksource_select_watchdog(fallback);
spin_unlock_irqrestore(&watchdog_lock, flags);
}
--
2.27.0