[PATCH 2/2] tracing: Add support for critical section events
From: Joel Fernandes
Date: Sun Sep 03 2017 - 04:51:26 EST
Critical section trace events can be used for tracing the start and
end of a critical section which can be used by a trace viewer like
systrace to graphically view the start and end of a critical section and
correlate them with latencies and scheduling issues.
Reason for starting critical section:
Either preemption or interrupts were turned off.
Reason for ending critical section:
Both preemption and interrupts are now turned back on.
Cc: Steven Rostedt <rostedt@xxxxxxxxxxx>
Cc: kernel-team@xxxxxxxxxxx
Signed-off-by: Joel Fernandes <joelaf@xxxxxxxxxx>
---
include/linux/ftrace.h | 2 +-
include/linux/irqflags.h | 3 +-
include/trace/events/critical.h | 45 +++++++++++++++++++
kernel/trace/Kconfig | 5 +++
kernel/trace/Makefile | 1 +
kernel/trace/trace_irqsoff.c | 97 +++++++++++++++++++++++++++++++++++++++--
6 files changed, 147 insertions(+), 6 deletions(-)
create mode 100644 include/trace/events/critical.h
diff --git a/include/linux/ftrace.h b/include/linux/ftrace.h
index 6383115e9d2c..6020c473a198 100644
--- a/include/linux/ftrace.h
+++ b/include/linux/ftrace.h
@@ -742,7 +742,7 @@ static inline unsigned long get_lock_parent_ip(void)
static inline void time_hardirqs_off(unsigned long a0, unsigned long a1) { }
#endif
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_CRITICAL_SECTION_EVENTS)
extern void trace_preempt_on(unsigned long a0, unsigned long a1);
extern void trace_preempt_off(unsigned long a0, unsigned long a1);
#else
diff --git a/include/linux/irqflags.h b/include/linux/irqflags.h
index 9bc050bc81b2..79c372fe9dcb 100644
--- a/include/linux/irqflags.h
+++ b/include/linux/irqflags.h
@@ -61,7 +61,8 @@ do { \
#endif
#if defined(CONFIG_IRQSOFF_TRACER) || \
- defined(CONFIG_PREEMPT_TRACER)
+ defined(CONFIG_PREEMPT_TRACER) || \
+ defined(CONFIG_CRITICAL_SECTION_EVENTS)
extern void stop_critical_timings(void);
extern void start_critical_timings(void);
#else
diff --git a/include/trace/events/critical.h b/include/trace/events/critical.h
new file mode 100644
index 000000000000..bd07a6a50252
--- /dev/null
+++ b/include/trace/events/critical.h
@@ -0,0 +1,45 @@
+#ifdef CONFIG_CRITICAL_SECTION_EVENTS
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM critical
+
+#if !defined(_TRACE_CRITICAL_H) || defined(TRACE_HEADER_MULTI_READ)
+#define _TRACE_CRITICAL_H
+
+#include <linux/ktime.h>
+#include <linux/tracepoint.h>
+#include <linux/string.h>
+#include <asm/sections.h>
+
+DECLARE_EVENT_CLASS(critical_template,
+
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+
+ TP_ARGS(ip, parent_ip),
+
+ TP_STRUCT__entry(
+ __field(u32, caller_offs)
+ __field(u32, parent_offs)
+ ),
+
+ TP_fast_assign(
+ __entry->caller_offs = (u32)(ip - (unsigned long)_stext);
+ __entry->parent_offs = (u32)(parent_ip - (unsigned long)_stext);
+ ),
+
+ TP_printk("caller=%pF parent=%pF\n",
+ (void *)((unsigned long)(_stext) + __entry->caller_offs),
+ (void *)((unsigned long)(_stext) + __entry->parent_offs))
+);
+
+DEFINE_EVENT(critical_template, critical_start,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+
+DEFINE_EVENT(critical_template, critical_stop,
+ TP_PROTO(unsigned long ip, unsigned long parent_ip),
+ TP_ARGS(ip, parent_ip));
+#endif /* _TRACE_CRITICAL_H */
+
+/* This part must be outside protection */
+#include <trace/define_trace.h>
+#endif
diff --git a/kernel/trace/Kconfig b/kernel/trace/Kconfig
index 434c840e2d82..c08b23fcae6d 100644
--- a/kernel/trace/Kconfig
+++ b/kernel/trace/Kconfig
@@ -209,6 +209,11 @@ config PREEMPT_TRACER
enabled. This option and the irqs-off timing option can be
used together or separately.)
+config CRITICAL_SECTION_EVENTS
+ bool "Trace critical sections as trace events"
+ select TRACE_IRQFLAGS
+ default n
+
config SCHED_TRACER
bool "Scheduling Latency Tracer"
select GENERIC_TRACER
diff --git a/kernel/trace/Makefile b/kernel/trace/Makefile
index 90f2701d92a7..265135e35b79 100644
--- a/kernel/trace/Makefile
+++ b/kernel/trace/Makefile
@@ -36,6 +36,7 @@ obj-$(CONFIG_CONTEXT_SWITCH_TRACER) += trace_sched_switch.o
obj-$(CONFIG_FUNCTION_TRACER) += trace_functions.o
obj-$(CONFIG_IRQSOFF_TRACER) += trace_irqsoff.o
obj-$(CONFIG_PREEMPT_TRACER) += trace_irqsoff.o
+obj-$(CONFIG_CRITICAL_SECTION_EVENTS) += trace_irqsoff.o
obj-$(CONFIG_SCHED_TRACER) += trace_sched_wakeup.o
obj-$(CONFIG_HWLAT_TRACER) += trace_hwlat.o
obj-$(CONFIG_NOP_TRACER) += trace_nop.o
diff --git a/kernel/trace/trace_irqsoff.c b/kernel/trace/trace_irqsoff.c
index edd52ee8b756..7f59f41fbdc0 100644
--- a/kernel/trace/trace_irqsoff.c
+++ b/kernel/trace/trace_irqsoff.c
@@ -13,9 +13,69 @@
#include <linux/uaccess.h>
#include <linux/module.h>
#include <linux/ftrace.h>
+#define CREATE_TRACE_POINTS
+#include <trace/events/critical.h>
#include "trace.h"
+#ifdef CONFIG_CRITICAL_SECTION_EVENTS
+static DEFINE_PER_CPU(int, tracing_events_cpu);
+
+/*
+ * Called when either preempt or irq are turned off
+ */
+static inline void
+start_critical_event(unsigned long ip, unsigned long parent_ip)
+{
+ if (this_cpu_read(tracing_events_cpu))
+ return;
+
+ this_cpu_write(tracing_events_cpu, 1);
+ trace_critical_start(ip, parent_ip);
+}
+
+/*
+ * Called when both preempt and irq are turned back on
+ */
+static inline void
+stop_critical_event(unsigned long ip, unsigned long parent_ip)
+{
+ if (!this_cpu_read(tracing_events_cpu))
+ return;
+
+ trace_critical_stop(ip, parent_ip);
+ this_cpu_write(tracing_events_cpu, 0);
+}
+
+static inline int
+__trace_critical_start_enabled(void)
+{
+ if (irqs_disabled() || preempt_count())
+ return 1;
+
+ return 0;
+}
+
+static inline int
+__trace_critical_stop_enabled(void)
+{
+ if ((irqs_disabled() && !preempt_count()) ||
+ (!irqs_disabled() && preempt_count()))
+ return 1;
+
+ return 0;
+}
+#else
+
+static inline void start_critical_event(unsigned long ip,
+ unsigned long pip) { }
+static inline void stop_critical_event(unsigned long ip,
+ unsigned long pip) { }
+
+static inline int __trace_critical_start_enabled(void) { return 0; }
+static inline int __trace_critical_stop_enabled(void) { return 0; }
+#endif
+
#if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_PREEMPT_TRACER)
static struct trace_array *irqsoff_trace __read_mostly;
static int tracer_enabled __read_mostly;
@@ -710,7 +770,7 @@ void time_hardirqs_off(unsigned long a0, unsigned long a1)
}
#endif
#else /* !CONFIG_PROVE_LOCKING */
-# ifdef CONFIG_IRQSOFF_TRACER
+# if defined(CONFIG_IRQSOFF_TRACER) || defined(CONFIG_CRITICAL_SECTION_EVENTS)
/*
* Stubs:
@@ -733,6 +793,9 @@ inline void print_irqtrace_events(struct task_struct *curr)
*/
void trace_hardirqs_on(void)
{
+ if (__trace_critical_stop_enabled())
+ stop_critical_event(CALLER_ADDR0, CALLER_ADDR1);
+
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -740,6 +803,9 @@ EXPORT_SYMBOL(trace_hardirqs_on);
void trace_hardirqs_off(void)
{
+ if (__trace_critical_start_enabled())
+ start_critical_event(CALLER_ADDR0, CALLER_ADDR1);
+
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -747,6 +813,9 @@ EXPORT_SYMBOL(trace_hardirqs_off);
__visible void trace_hardirqs_on_caller(unsigned long caller_addr)
{
+ if (__trace_critical_stop_enabled())
+ stop_critical_event(CALLER_ADDR0, caller_addr);
+
if (!preempt_trace() && irq_trace())
stop_critical_timing(CALLER_ADDR0, caller_addr);
}
@@ -754,32 +823,44 @@ EXPORT_SYMBOL(trace_hardirqs_on_caller);
__visible void trace_hardirqs_off_caller(unsigned long caller_addr)
{
+ if (__trace_critical_start_enabled())
+ start_critical_event(CALLER_ADDR0, caller_addr);
+
if (!preempt_trace() && irq_trace())
start_critical_timing(CALLER_ADDR0, caller_addr);
}
EXPORT_SYMBOL(trace_hardirqs_off_caller);
-#endif /* CONFIG_IRQSOFF_TRACER */
+#endif /* CONFIG_IRQSOFF_TRACER || CONFIG_CRITICAL_SECTION_EVENTS */
#endif /* CONFIG_PROVE_LOCKING */
-#ifdef CONFIG_PREEMPT_TRACER
+#if defined(CONFIG_PREEMPT_TRACER) || defined(CONFIG_CRITICAL_SECTION_EVENTS)
void trace_preempt_on(unsigned long a0, unsigned long a1)
{
+ if (__trace_critical_stop_enabled())
+ stop_critical_event(a0, a1);
+
if (preempt_trace() && !irq_trace())
stop_critical_timing(a0, a1);
}
void trace_preempt_off(unsigned long a0, unsigned long a1)
{
+ if (__trace_critical_start_enabled())
+ start_critical_event(a0, a1);
+
if (preempt_trace() && !irq_trace())
start_critical_timing(a0, a1);
}
-#endif /* CONFIG_PREEMPT_TRACER */
+#endif /* CONFIG_PREEMPT_TRACER || CONFIG_CRITICAL_SECTION_EVENTS */
/* start and stop critical timings used to for stoppage (in idle) */
void start_critical_timings(void)
{
+ if (__trace_critical_start_enabled())
+ start_critical_event(CALLER_ADDR0, CALLER_ADDR1);
+
if (preempt_trace() || irq_trace())
start_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
@@ -787,6 +868,14 @@ EXPORT_SYMBOL_GPL(start_critical_timings);
void stop_critical_timings(void)
{
+ /*
+ * we have to use *_start_enabled instead of *_stop_enabled
+ * because the condition for stopping critical timings is
+ * whether a critical section was entered before entering idle
+ */
+ if (__trace_critical_start_enabled())
+ stop_critical_event(CALLER_ADDR0, CALLER_ADDR1);
+
if (preempt_trace() || irq_trace())
stop_critical_timing(CALLER_ADDR0, CALLER_ADDR1);
}
--
2.14.1.581.gf28d330327-goog