[PATCH 3/3] kernel/watchdog: use soft lockup to detect irq flood
From: Pingfan Liu
Date: Tue Nov 17 2020 - 22:36:57 EST
When irq flood happens, interrupt handler occupies all of the cpu time.
This results in a situation where soft lockup can be observed, although it
is different from the design purpose of soft lockup.
In order to distinguish this situation, it is helpful to print out the
statistics of irq frequency when warning soft lockup to evaluate the
potential irq flood.
Thomas and Guilherme suggested patches to suppress the odd irq in different
situation. [1].[2]. But it seems to be an open question in a near future. For now,
it had better print some hints for users than nothing.
[1]: https://lore.kernel.org/lkml/87tuueftou.fsf@xxxxxxxxxxxxxxxxxxxxxxx/
[2]: https://lore.kernel.org/linux-pci/20181018183721.27467-1-gpiccoli@xxxxxxxxxxxxx/
Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Jisheng Zhang <Jisheng.Zhang@xxxxxxxxxxxxx>
Cc: "Peter Zijlstra (Intel)" <peterz@xxxxxxxxxxxxx>
Cc: Vlastimil Babka <vbabka@xxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: "Guilherme G. Piccoli" <gpiccoli@xxxxxxxxxxxxx>
Cc: Petr Mladek <pmladek@xxxxxxxx>
Cc: kexec@xxxxxxxxxxxxxxxxxxx
To: linux-kernel@xxxxxxxxxxxxxxx
---
kernel/watchdog.c | 17 ++++++++++++++++-
1 file changed, 16 insertions(+), 1 deletion(-)
diff --git a/kernel/watchdog.c b/kernel/watchdog.c
index 1cc619a..a0ab2a8 100644
--- a/kernel/watchdog.c
+++ b/kernel/watchdog.c
@@ -23,6 +23,7 @@
#include <linux/sched/debug.h>
#include <linux/sched/isolation.h>
#include <linux/stop_machine.h>
+#include <linux/kernel_stat.h>
#include <asm/irq_regs.h>
#include <linux/kvm_para.h>
@@ -175,6 +176,9 @@ static DEFINE_PER_CPU(bool, softlockup_touch_sync);
static DEFINE_PER_CPU(bool, soft_watchdog_warn);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts);
static DEFINE_PER_CPU(unsigned long, hrtimer_interrupts_saved);
+static DEFINE_PER_CPU(unsigned long, last_irq_sum);
+static DEFINE_PER_CPU(unsigned long, last_unused_irq_sum);
+
static unsigned long soft_lockup_nmi_warn;
static int __init nowatchdog_setup(char *str)
@@ -353,6 +357,8 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
/* kick the softlockup detector */
if (completion_done(this_cpu_ptr(&softlockup_completion))) {
+ __this_cpu_write(last_irq_sum, kstat_this_cpu->irqs_sum);
+ __this_cpu_write(last_unused_irq_sum, kstat_this_cpu->unused_irqs_sum);
reinit_completion(this_cpu_ptr(&softlockup_completion));
stop_one_cpu_nowait(smp_processor_id(),
softlockup_fn, NULL,
@@ -386,6 +392,9 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
*/
duration = is_softlockup(touch_ts);
if (unlikely(duration)) {
+ unsigned long irq_sum, unused_irq_sum;
+ unsigned int seconds;
+
/*
* If a virtual machine is stopped by the host it can look to
* the watchdog like a soft lockup, check to see if the host
@@ -409,9 +418,15 @@ static enum hrtimer_restart watchdog_timer_fn(struct hrtimer *hrtimer)
}
}
+ irq_sum = kstat_this_cpu->irqs_sum - __this_cpu_read(last_irq_sum);
+ unused_irq_sum = kstat_this_cpu->unused_irqs_sum -
+ __this_cpu_read(last_unused_irq_sum);
+ seconds = (unsigned int)convert_seconds(duration);
pr_emerg("BUG: soft lockup - CPU#%d stuck for %us! [%s:%d]\n",
- smp_processor_id(), (unsigned int)convert_seconds(duration),
+ smp_processor_id(), seconds,
current->comm, task_pid_nr(current));
+ pr_emerg("%lu irqs at rate: %lu / s, %lu unused irq at rate: %lu / s\n",
+ irq_sum, irq_sum/seconds, unused_irq_sum, unused_irq_sum/seconds);
print_modules();
print_irqtrace_events(current);
if (regs)
--
2.7.5