[PATCH v6 4/4] tracing: Histogram for delayed hrtimer offsets

From: Binoy Jayan
Date: Wed Sep 07 2016 - 07:14:58 EST


Generate a histogram of the latencies of delayed timer offsets in
nanoseconds. It shows the latency captured due to a delayed timer expire
event. It happens for example when a timer misses its deadline due to
disabled interrupts. A process if scheduled as a result of the timer
expiration suffers this latency.

The following filter(s) may be used

'hist:key=common_pid.execname:val=toffset,hitcount'
'hist:key=cpu,tcomm:val=toffset:sort=tcomm'
'hist:key=comm,tcomm:sort=comm,tcomm'

Signed-off-by: Binoy Jayan <binoy.jayan@xxxxxxxxxx>
---
include/linux/hrtimer.h | 3 +++
include/trace/events/timer.h | 25 +++++++++++++++++++++
kernel/time/Kconfig | 7 ++++++
kernel/time/hrtimer.c | 52 ++++++++++++++++++++++++++++++++++++++++++++
4 files changed, 87 insertions(+)

diff --git a/include/linux/hrtimer.h b/include/linux/hrtimer.h
index 5e00f80..9146842 100644
--- a/include/linux/hrtimer.h
+++ b/include/linux/hrtimer.h
@@ -104,6 +104,9 @@ struct hrtimer {
struct hrtimer_clock_base *base;
u8 state;
u8 is_rel;
+#ifdef CONFIG_DELAYED_TIMER_OFFSETS_HIST
+ ktime_t praecox;
+#endif
#ifdef CONFIG_TIMER_STATS
int start_pid;
void *start_site;
diff --git a/include/trace/events/timer.h b/include/trace/events/timer.h
index 28c5da6..ee45aed 100644
--- a/include/trace/events/timer.h
+++ b/include/trace/events/timer.h
@@ -382,6 +382,31 @@ TRACE_EVENT(tick_stop,
);
#endif

+#ifdef CONFIG_DELAYED_TIMER_OFFSETS_HIST
+TRACE_EVENT(latency_hrtimer_interrupt,
+
+ TP_PROTO(long long toffset, struct task_struct *task),
+
+ TP_ARGS(toffset, task),
+
+ TP_STRUCT__entry(
+ __field(long long, toffset)
+ __array(char, tcomm, TASK_COMM_LEN)
+ __field(int, tprio)
+ ),
+
+ TP_fast_assign(
+ __entry->toffset = toffset;
+ memcpy(__entry->tcomm, task != NULL ? task->comm : "<none>",
+ task != NULL ? TASK_COMM_LEN : 7);
+ __entry->tprio = task != NULL ? task->prio : -1;
+ ),
+
+ TP_printk("toffset=%lld thread=%s[%d]",
+ __entry->toffset, __entry->tcomm, __entry->tprio)
+);
+#endif
+
#endif /* _TRACE_TIMER_H */

/* This part must be outside protection */
diff --git a/kernel/time/Kconfig b/kernel/time/Kconfig
index 4008d9f..8ff19dd 100644
--- a/kernel/time/Kconfig
+++ b/kernel/time/Kconfig
@@ -193,5 +193,12 @@ config HIGH_RES_TIMERS
hardware is not capable then this option only increases
the size of the kernel image.

+config DELAYED_TIMER_OFFSETS_HIST
+ depends on HIGH_RES_TIMERS
+ select GENERIC_TRACER
+ bool "Delayed Timer Offsets Histogram"
+ help
+ Generate a histogram of delayed timer offsets in nanoseconds.
+
endmenu
endif
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 9ba7c82..432d49a 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -56,6 +56,8 @@

#include "tick-internal.h"

+static enum hrtimer_restart hrtimer_wakeup(struct hrtimer *timer);
+
/*
* The timer bases:
*
@@ -960,6 +962,52 @@ static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,
return tim;
}

+#ifdef CONFIG_DELAYED_TIMER_OFFSETS_HIST
+static inline void latency_hrtimer_timing_start(struct hrtimer *timer,
+ struct hrtimer_clock_base *new_base,
+ ktime_t tim)
+{
+ if (unlikely(trace_latency_hrtimer_interrupt_enabled())) {
+ ktime_t now = new_base->get_time();
+
+ if (ktime_to_ns(tim) < ktime_to_ns(now))
+ timer->praecox = now;
+ else
+ timer->praecox = ktime_set(0, 0);
+ }
+}
+
+static inline void latency_hrtimer_timing_stop(struct hrtimer *timer,
+ ktime_t basenow)
+{
+ long latency;
+ struct task_struct *task;
+
+ if (likely(!trace_latency_hrtimer_interrupt_enabled()))
+ return;
+
+ latency = ktime_to_ns(ktime_sub(basenow,
+ ktime_to_ns(timer->praecox) ?
+ timer->praecox : hrtimer_get_expires(timer)));
+ task = timer->function == hrtimer_wakeup ?
+ container_of(timer, struct hrtimer_sleeper,
+ timer)->task : NULL;
+ if (latency > 0)
+ trace_latency_hrtimer_interrupt((u64) latency, task);
+}
+#else
+static inline void latency_hrtimer_timing_start(struct hrtimer *timer,
+ struct hrtimer_clock_base *new_base,
+ ktime_t tim)
+{
+}
+static inline void latency_hrtimer_timing_stop(struct hrtimer *timer,
+ ktime_t basenow)
+{
+}
+
+#endif
+
/**
* hrtimer_start_range_ns - (re)start an hrtimer on the current CPU
* @timer: the timer to be added
@@ -992,6 +1040,8 @@ void hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim,

timer_stats_hrtimer_set_start_info(timer);

+ latency_hrtimer_timing_start(timer, new_base, tim);
+
leftmost = enqueue_hrtimer(timer, new_base);
if (!leftmost)
goto unlock;
@@ -1284,6 +1334,8 @@ static void __hrtimer_run_queues(struct hrtimer_cpu_base *cpu_base, ktime_t now)

timer = container_of(node, struct hrtimer, node);

+ latency_hrtimer_timing_stop(timer, basenow);
+
/*
* The immediate goal for using the softexpires is
* minimizing wakeups, not running timers at the
--
The Qualcomm Innovation Center, Inc. is a member of the Code Aurora Forum,
a Linux Foundation Collaborative Project