[PATCH v2 1/1] sched: watchdog: Touch kernel watchdog with sched count

From: Xi Wang
Date: Tue Oct 20 2020 - 16:57:43 EST


The main purpose of kernel watchdog is to test whether scheduler can
still schedule tasks on a cpu. In order to reduce latency / jitter
from periodically invoking watchdog reset in thread context, we can
simply test if pick_next_task can run. This is done by forcing resched
and checking rq->sched_count. Compared to actually resetting watchdog
from cpu stop / migration threads, we lose coverage on: a migration
thread actually get picked and we actually context switch to the
migration thread. These steps are unlikely to silently fail. The
change would provide nearly the same level of protection with less
overhead.

With this patch we can still switch back to the old method with the
boot option watchdog_touch_with_thread. However code for the old
method can be completely removed in the future.

Suggested-by: Paul Turner <pjt@xxxxxxxxxx>
Suggested-by: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Signed-off-by: Xi Wang <xii@xxxxxxxxxx>
---
include/linux/sched.h | 4 ++++
kernel/sched/core.c | 23 ++++++++++++++++++++--
kernel/sched/sched.h | 6 +++++-
kernel/watchdog.c | 44 +++++++++++++++++++++++++++++++++++++------
4 files changed, 68 insertions(+), 9 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index d383cf09e78f..1e3bef9a9cdb 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1662,6 +1662,10 @@ extern int sched_setattr(struct task_struct *, const struct sched_attr *);
extern int sched_setattr_nocheck(struct task_struct *, const struct sched_attr *);
extern struct task_struct *idle_task(int cpu);

+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
+extern unsigned int sched_get_count(int cpu);
+#endif
+
/**
* is_idle_task - is the specified task an idle task?
* @p: the task in question.
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8160ab5263f8..378f0f36c402 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -4293,8 +4293,6 @@ static inline void schedule_debug(struct task_struct *prev, bool preempt)
rcu_sleep_check();

profile_hit(SCHED_PROFILING, __builtin_return_address(0));
-
- schedstat_inc(this_rq()->sched_count);
}

static void put_prev_task_balance(struct rq *rq, struct task_struct *prev,
@@ -4492,6 +4490,12 @@ static void __sched notrace __schedule(bool preempt)
clear_tsk_need_resched(prev);
clear_preempt_need_resched();

+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
+ this_rq()->sched_count++; /* sched count is also used by watchdog */
+#else
+ schedstat_inc(this_rq()->sched_count);
+#endif
+
if (likely(prev != next)) {
rq->nr_switches++;
/*
@@ -5117,6 +5121,21 @@ struct task_struct *idle_task(int cpu)
return cpu_rq(cpu)->idle;
}

+#ifdef CONFIG_SOFTLOCKUP_DETECTOR
+
+/**
+ * sched_get_count - get the sched count of a CPU.
+ * @cpu: the CPU in question.
+ *
+ * Return: sched count.
+ */
+unsigned int sched_get_count(int cpu)
+{
+ return cpu_rq(cpu)->sched_count;
+}
+
+#endif
+
/**
* find_process_by_pid - find a process with a matching PID value.
* @pid: the pid in question.
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 28709f6b0975..f23255981d52 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -959,6 +959,11 @@ struct rq {
u64 clock_pelt;
unsigned long lost_idle_time;

+#if defined(CONFIG_SCHEDSTATS) || defined(CONFIG_SOFTLOCKUP_DETECTOR)
+ /* Also used by watchdog - no longer grouping with other sched stats */
+ unsigned int sched_count;
+#endif
+
atomic_t nr_iowait;

#ifdef CONFIG_MEMBARRIER
@@ -1036,7 +1041,6 @@ struct rq {
unsigned int yld_count;

/* schedule() stats */
- unsigned int sched_count;
unsigned int sched_goidle;

/* try_to_wake_up() stats */
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 5abb5b22ad13..df7f7e585502 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -170,6 +170,7 @@ static bool softlockup_initialized __read_mostly;
static u64 __read_mostly sample_period;

static DEFINE_PER_CPU(unsigned long, watchdog_touch_ts);
+static DEFINE_PER_CPU(unsigned int, watchdog_sched_prev);
static DEFINE_PER_CPU(struct hrtimer, watchdog_hrtimer);
static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
@@ -177,6 +178,12 @@ static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
static unsigned long soft_lockup_nmi_warn;

+/*
+ * Touch watchdog if __schedule and pick_next_task can run - avoid actual
+ * context switch and associated latency for most cases
+ */
+int __read_mostly watchdog_touch_with_sched = 1;
+
static int __init nowatchdog_setup(char *str)
{
watchdog_user_enabled = 0;
@@ -198,6 +205,13 @@ static int __init watchdog_thresh_setup(char *str)
}
__setup("watchdog_thresh=", watchdog_thresh_setup);

+static int __init watchdog_touch_with_thread_setup(char *str)
+{
+ watchdog_touch_with_sched = 0;
+ return 1;
+}
+__setup("watchdog_touch_with_thread", watchdog_touch_with_thread_setup);
+
static void __lockup_detector_cleanup(void);

/*
@@ -239,6 +253,9 @@ static void set_sample_period(void)
static void __touch_watchdog(void)
{
__this_cpu_write(watchdog_touch_ts, get_timestamp());
+ if (watchdog_touch_with_sched)
+ __this_cpu_write(watchdog_sched_prev,
+ sched_get_count(smp_processor_id()));
}

/**
@@ -351,12 +368,14 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* kick the hardlockup detector */
watchdog_interrupt_count();

- /* kick the softlockup detector */
- if (completion_done(this_cpu_ptr(&softlockup_completion))) {
- reinit_completion(this_cpu_ptr(&softlockup_completion));
- stop_one_cpu_nowait(smp_processor_id(),
- softlockup_fn, NULL,
- this_cpu_ptr(&softlockup_stop_work));
+ if (!watchdog_touch_with_sched) {
+ /* kick the softlockup detector */
+ if (completion_done(this_cpu_ptr(&softlockup_completion))) {
+ reinit_completion(this_cpu_ptr(&softlockup_completion));
+ stop_one_cpu_nowait(smp_processor_id(),
+ softlockup_fn, NULL,
+ this_cpu_ptr(&softlockup_stop_work));
+ }
}

/* .. and repeat */
@@ -378,6 +397,19 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
return HRTIMER_RESTART;
}

+ if (watchdog_touch_with_sched) {
+ /* Trigger reschedule for the next round */
+ set_tsk_need_resched(current);
+ set_preempt_need_resched();
+ /* sched_count increase in __schedule is taken as watchdog touched */
+ if (sched_get_count(smp_processor_id()) -
+ __this_cpu_read(watchdog_sched_prev)) {
+ __touch_watchdog();
+ __this_cpu_write(soft_watchdog_warn, false);
+ return HRTIMER_RESTART;
+ }
+ }
+
/* check for a softlockup
* This is done by making sure a high priority task is
* being scheduled. The task touches the watchdog to
--
2.29.0.rc1.297.gfa9743e501-goog