[PATCH v5 5/6] cpu_isolated: add debug boot flag

From: Chris Metcalf
Date: Tue Jul 28 2015 - 15:51:02 EST


The new "cpu_isolated_debug" flag simplifies debugging
of CPU_ISOLATED kernels when processes are running in
PR_CPU_ISOLATED_ENABLE mode. Such processes should get no interrupts
from the kernel, and if they do, when this boot flag is specified
a kernel stack dump on the console is generated.

It's possible to use ftrace to simply detect whether a cpu_isolated
core has unexpectedly entered the kernel. But what this boot flag
does is allow the kernel to provide better diagnostics, e.g. by
reporting in the IPI-generating code what remote core and context
is preparing to deliver an interrupt to a cpu_isolated core.

It may be worth considering other ways to generate useful debugging
output rather than console spew, but for now that is simple and direct.

Signed-off-by: Chris Metcalf <cmetcalf@xxxxxxxxxx>
---
Documentation/kernel-parameters.txt | 7 +++++++
arch/tile/mm/homecache.c | 5 ++++-
include/linux/cpu_isolated.h | 2 ++
kernel/irq_work.c | 5 ++++-
kernel/sched/core.c | 21 +++++++++++++++++++++
kernel/signal.c | 5 +++++
kernel/smp.c | 4 ++++
kernel/softirq.c | 7 +++++++
8 files changed, 54 insertions(+), 2 deletions(-)

diff --git a/Documentation/kernel-parameters.txt b/Documentation/kernel-parameters.txt
index 1d6f0459cd7b..940e4c9f1978 100644
--- a/Documentation/kernel-parameters.txt
+++ b/Documentation/kernel-parameters.txt
@@ -749,6 +749,13 @@ bytes respectively. Such letter suffixes can also be entirely omitted.
/proc/<pid>/coredump_filter.
See also Documentation/filesystems/proc.txt.

+ cpu_isolated_debug [KNL]
+ In kernels built with CONFIG_CPU_ISOLATED and booted
+ in nohz_full= mode, this setting will generate console
+ backtraces when the kernel is about to interrupt a
+ task that has requested PR_CPU_ISOLATED_ENABLE
+ and is running on a nohz_full core.
+
cpuidle.off=1 [CPU_IDLE]
disable the cpuidle sub-system

diff --git a/arch/tile/mm/homecache.c b/arch/tile/mm/homecache.c
index 40ca30a9fee3..fdef5e3d6396 100644
--- a/arch/tile/mm/homecache.c
+++ b/arch/tile/mm/homecache.c
@@ -31,6 +31,7 @@
#include <linux/smp.h>
#include <linux/module.h>
#include <linux/hugetlb.h>
+#include <linux/cpu_isolated.h>

#include <asm/page.h>
#include <asm/sections.h>
@@ -83,8 +84,10 @@ static void hv_flush_update(const struct cpumask *cache_cpumask,
* Don't bother to update atomically; losing a count
* here is not that critical.
*/
- for_each_cpu(cpu, &mask)
+ for_each_cpu(cpu, &mask) {
++per_cpu(irq_stat, cpu).irq_hv_flush_count;
+ cpu_isolated_debug(cpu);
+ }
}

/*
diff --git a/include/linux/cpu_isolated.h b/include/linux/cpu_isolated.h
index b0f1c2669b2f..4ea67d640be7 100644
--- a/include/linux/cpu_isolated.h
+++ b/include/linux/cpu_isolated.h
@@ -18,11 +18,13 @@ extern void cpu_isolated_enter(void);
extern void cpu_isolated_syscall(int nr);
extern void cpu_isolated_exception(void);
extern void cpu_isolated_wait(void);
+extern void cpu_isolated_debug(int cpu);
#else
static inline bool is_cpu_isolated(void) { return false; }
static inline void cpu_isolated_enter(void) { }
static inline void cpu_isolated_syscall(int nr) { }
static inline void cpu_isolated_exception(void) { }
+static inline void cpu_isolated_debug(int cpu) { }
#endif

static inline bool cpu_isolated_strict(void)
diff --git a/kernel/irq_work.c b/kernel/irq_work.c
index cbf9fb899d92..3c08a41f9898 100644
--- a/kernel/irq_work.c
+++ b/kernel/irq_work.c
@@ -17,6 +17,7 @@
#include <linux/cpu.h>
#include <linux/notifier.h>
#include <linux/smp.h>
+#include <linux/cpu_isolated.h>
#include <asm/processor.h>


@@ -75,8 +76,10 @@ bool irq_work_queue_on(struct irq_work *work, int cpu)
if (!irq_work_claim(work))
return false;

- if (llist_add(&work->llnode, &per_cpu(raised_list, cpu)))
+ if (llist_add(&work->llnode, &per_cpu(raised_list, cpu))) {
+ cpu_isolated_debug(cpu);
arch_send_call_function_single_ipi(cpu);
+ }

return true;
}
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 78b4bad10081..647671900497 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -74,6 +74,7 @@
#include <linux/binfmts.h>
#include <linux/context_tracking.h>
#include <linux/compiler.h>
+#include <linux/cpu_isolated.h>

#include <asm/switch_to.h>
#include <asm/tlb.h>
@@ -745,6 +746,26 @@ bool sched_can_stop_tick(void)
}
#endif /* CONFIG_NO_HZ_FULL */

+#ifdef CONFIG_CPU_ISOLATED
+/* Enable debugging of any interrupts of cpu_isolated cores. */
+static int cpu_isolated_debug_flag;
+static int __init cpu_isolated_debug_func(char *str)
+{
+ cpu_isolated_debug_flag = true;
+ return 1;
+}
+__setup("cpu_isolated_debug", cpu_isolated_debug_func);
+
+void cpu_isolated_debug(int cpu)
+{
+ if (cpu_isolated_debug_flag && tick_nohz_full_cpu(cpu) &&
+ (cpu_curr(cpu)->cpu_isolated_flags & PR_CPU_ISOLATED_ENABLE)) {
+ pr_err("Interrupt detected for cpu_isolated cpu %d\n", cpu);
+ dump_stack();
+ }
+}
+#endif
+
void sched_avg_update(struct rq *rq)
{
s64 period = sched_avg_period();
diff --git a/kernel/signal.c b/kernel/signal.c
index 836df8dac6cc..90ee460c2586 100644
--- a/kernel/signal.c
+++ b/kernel/signal.c
@@ -684,6 +684,11 @@ int dequeue_signal(struct task_struct *tsk, sigset_t *mask, siginfo_t *info)
*/
void signal_wake_up_state(struct task_struct *t, unsigned int state)
{
+#ifdef CONFIG_NO_HZ_FULL
+ /* If the task is being killed, don't complain about cpu_isolated. */
+ if (state & TASK_WAKEKILL)
+ t->cpu_isolated_flags = 0;
+#endif
set_tsk_thread_flag(t, TIF_SIGPENDING);
/*
* TASK_WAKEKILL also means wake it up in the stopped/traced/killable
diff --git a/kernel/smp.c b/kernel/smp.c
index 07854477c164..846e42a3daa3 100644
--- a/kernel/smp.c
+++ b/kernel/smp.c
@@ -14,6 +14,7 @@
#include <linux/smp.h>
#include <linux/cpu.h>
#include <linux/sched.h>
+#include <linux/cpu_isolated.h>

#include "smpboot.h"

@@ -178,6 +179,7 @@ static int generic_exec_single(int cpu, struct call_single_data *csd,
* locking and barrier primitives. Generic code isn't really
* equipped to do the right thing...
*/
+ cpu_isolated_debug(cpu);
if (llist_add(&csd->llist, &per_cpu(call_single_queue, cpu)))
arch_send_call_function_single_ipi(cpu);

@@ -457,6 +459,8 @@ void smp_call_function_many(const struct cpumask *mask,
}

/* Send a message to all CPUs in the map */
+ for_each_cpu(cpu, cfd->cpumask)
+ cpu_isolated_debug(cpu);
arch_send_call_function_ipi_mask(cfd->cpumask);

if (wait) {
diff --git a/kernel/softirq.c b/kernel/softirq.c
index 479e4436f787..456149a4a34f 100644
--- a/kernel/softirq.c
+++ b/kernel/softirq.c
@@ -24,8 +24,10 @@
#include <linux/ftrace.h>
#include <linux/smp.h>
#include <linux/smpboot.h>
+#include <linux/context_tracking.h>
#include <linux/tick.h>
#include <linux/irq.h>
+#include <linux/cpu_isolated.h>

#define CREATE_TRACE_POINTS
#include <trace/events/irq.h>
@@ -335,6 +337,11 @@ void irq_enter(void)
_local_bh_enable();
}

+ if (context_tracking_cpu_is_enabled() &&
+ context_tracking_in_user() &&
+ !in_interrupt())
+ cpu_isolated_debug(smp_processor_id());
+
__irq_enter();
}

--
2.1.2

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/