[PATCH RT 01/12] Latency histogramms: Cope with backwards running local trace clock
From: Steven Rostedt
Date: Tue Jul 17 2012 - 11:34:54 EST
From: Carsten Emde <C.Emde@xxxxxxxxx>
Thanks to the wonders of modern technology, the local trace clock can
now run backwards. Since this never happened before, the time difference
between now and somewhat earlier was expected to never become negative
and, thus, stored in an unsigned integer variable. Nowadays, we need a
signed integer to ensure that the value is stored as underflow in the
related histogram. (In cases where this is not a misfunction, bipolar
histograms can be used.)
This patch takes care that all latency variables are represented as
signed integers and negative numbers are considered as histogram
underflows.
In one of the misbehaving processors switching to global clock solved
the problem:
echo global >/sys/kernel/debug/tracing/trace_clock
Signed-off-by: Carsten Emde <C.Emde@xxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Steven Rostedt <rostedt@xxxxxxxxxxx>
---
include/linux/sched.h | 2 +-
kernel/trace/latency_hist.c | 71 ++++++++++++++++++++++---------------------
2 files changed, 38 insertions(+), 35 deletions(-)
diff --git a/include/linux/sched.h b/include/linux/sched.h
index 7fc8321..8461abc 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1629,7 +1629,7 @@ struct task_struct {
#ifdef CONFIG_WAKEUP_LATENCY_HIST
u64 preempt_timestamp_hist;
#ifdef CONFIG_MISSED_TIMER_OFFSETS_HIST
- unsigned long timer_offset;
+ long timer_offset;
#endif
#endif
#endif /* CONFIG_TRACING */
diff --git a/kernel/trace/latency_hist.c b/kernel/trace/latency_hist.c
index 9d49fcb..d514eef 100644
--- a/kernel/trace/latency_hist.c
+++ b/kernel/trace/latency_hist.c
@@ -27,6 +27,8 @@
#include "trace.h"
#include <trace/events/sched.h>
+#define NSECS_PER_USECS 1000L
+
#define CREATE_TRACE_POINTS
#include <trace/events/hist.h>
@@ -46,11 +48,11 @@ enum {
struct hist_data {
atomic_t hist_mode; /* 0 log, 1 don't log */
long offset; /* set it to MAX_ENTRY_NUM/2 for a bipolar scale */
- unsigned long min_lat;
- unsigned long max_lat;
+ long min_lat;
+ long max_lat;
unsigned long long below_hist_bound_samples;
unsigned long long above_hist_bound_samples;
- unsigned long long accumulate_lat;
+ long long accumulate_lat;
unsigned long long total_samples;
unsigned long long hist_array[MAX_ENTRY_NUM];
};
@@ -152,8 +154,8 @@ static struct enable_data timerandwakeup_enabled_data = {
static DEFINE_PER_CPU(struct maxlatproc_data, timerandwakeup_maxlatproc);
#endif
-void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
- unsigned long timeroffset, cycle_t stop,
+void notrace latency_hist(int latency_type, int cpu, long latency,
+ long timeroffset, cycle_t stop,
struct task_struct *p)
{
struct hist_data *my_hist;
@@ -224,7 +226,7 @@ void notrace latency_hist(int latency_type, int cpu, unsigned long latency,
my_hist->hist_array[latency]++;
if (unlikely(latency > my_hist->max_lat ||
- my_hist->min_lat == ULONG_MAX)) {
+ my_hist->min_lat == LONG_MAX)) {
#if defined(CONFIG_WAKEUP_LATENCY_HIST) || \
defined(CONFIG_MISSED_TIMER_OFFSETS_HIST)
if (latency_type == WAKEUP_LATENCY ||
@@ -263,15 +265,14 @@ static void *l_start(struct seq_file *m, loff_t *pos)
atomic_dec(&my_hist->hist_mode);
if (likely(my_hist->total_samples)) {
- unsigned long avg = (unsigned long)
- div64_u64(my_hist->accumulate_lat,
+ long avg = (long) div64_s64(my_hist->accumulate_lat,
my_hist->total_samples);
snprintf(minstr, sizeof(minstr), "%ld",
- (long) my_hist->min_lat - my_hist->offset);
+ my_hist->min_lat - my_hist->offset);
snprintf(avgstr, sizeof(avgstr), "%ld",
- (long) avg - my_hist->offset);
+ avg - my_hist->offset);
snprintf(maxstr, sizeof(maxstr), "%ld",
- (long) my_hist->max_lat - my_hist->offset);
+ my_hist->max_lat - my_hist->offset);
} else {
strcpy(minstr, "<undef>");
strcpy(avgstr, minstr);
@@ -376,10 +377,10 @@ static void hist_reset(struct hist_data *hist)
memset(hist->hist_array, 0, sizeof(hist->hist_array));
hist->below_hist_bound_samples = 0ULL;
hist->above_hist_bound_samples = 0ULL;
- hist->min_lat = ULONG_MAX;
- hist->max_lat = 0UL;
+ hist->min_lat = LONG_MAX;
+ hist->max_lat = LONG_MIN;
hist->total_samples = 0ULL;
- hist->accumulate_lat = 0ULL;
+ hist->accumulate_lat = 0LL;
atomic_inc(&hist->hist_mode);
}
@@ -790,9 +791,9 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
stop = ftrace_now(cpu);
time_set++;
- if (start && stop >= start) {
- unsigned long latency =
- nsecs_to_usecs(stop - start);
+ if (start) {
+ long latency = ((long) (stop - start)) /
+ NSECS_PER_USECS;
latency_hist(IRQSOFF_LATENCY, cpu, latency, 0,
stop, NULL);
@@ -808,9 +809,9 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
if (!(time_set++))
stop = ftrace_now(cpu);
- if (start && stop >= start) {
- unsigned long latency =
- nsecs_to_usecs(stop - start);
+ if (start) {
+ long latency = ((long) (stop - start)) /
+ NSECS_PER_USECS;
latency_hist(PREEMPTOFF_LATENCY, cpu, latency,
0, stop, NULL);
@@ -827,9 +828,10 @@ static notrace void probe_preemptirqsoff_hist(void *v, int reason,
if (!time_set)
stop = ftrace_now(cpu);
- if (start && stop >= start) {
- unsigned long latency =
- nsecs_to_usecs(stop - start);
+ if (start) {
+ long latency = ((long) (stop - start)) /
+ NSECS_PER_USECS;
+
latency_hist(PREEMPTIRQSOFF_LATENCY, cpu,
latency, 0, stop, NULL);
}
@@ -908,7 +910,7 @@ static notrace void probe_wakeup_latency_hist_stop(void *v,
{
unsigned long flags;
int cpu = task_cpu(next);
- unsigned long latency;
+ long latency;
cycle_t stop;
struct task_struct *cpu_wakeup_task;
@@ -939,7 +941,8 @@ static notrace void probe_wakeup_latency_hist_stop(void *v,
*/
stop = ftrace_now(raw_smp_processor_id());
- latency = nsecs_to_usecs(stop - next->preempt_timestamp_hist);
+ latency = ((long) (stop - next->preempt_timestamp_hist)) /
+ NSECS_PER_USECS;
if (per_cpu(wakeup_sharedprio, cpu)) {
latency_hist(WAKEUP_LATENCY_SHAREDPRIO, cpu, latency, 0, stop,
@@ -975,7 +978,7 @@ static notrace void probe_hrtimer_interrupt(void *v, int cpu,
(task->prio < curr->prio ||
(task->prio == curr->prio &&
!cpumask_test_cpu(cpu, &task->cpus_allowed)))) {
- unsigned long latency;
+ long latency;
cycle_t now;
if (missed_timer_offsets_pid) {
@@ -985,7 +988,7 @@ static notrace void probe_hrtimer_interrupt(void *v, int cpu,
}
now = ftrace_now(cpu);
- latency = (unsigned long) div_s64(-latency_ns, 1000);
+ latency = (long) div_s64(-latency_ns, NSECS_PER_USECS);
latency_hist(MISSED_TIMER_OFFSETS, cpu, latency, latency, now,
task);
#ifdef CONFIG_WAKEUP_LATENCY_HIST
@@ -1026,7 +1029,7 @@ static __init int latency_hist_init(void)
&per_cpu(irqsoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(irqsoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
}
entry = debugfs_create_file("reset", 0644, dentry,
(void *)IRQSOFF_LATENCY, &latency_hist_reset_fops);
@@ -1041,7 +1044,7 @@ static __init int latency_hist_init(void)
&per_cpu(preemptoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(preemptoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
}
entry = debugfs_create_file("reset", 0644, dentry,
(void *)PREEMPTOFF_LATENCY, &latency_hist_reset_fops);
@@ -1056,7 +1059,7 @@ static __init int latency_hist_init(void)
&per_cpu(preemptirqsoff_hist, i), &latency_hist_fops);
my_hist = &per_cpu(preemptirqsoff_hist, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
}
entry = debugfs_create_file("reset", 0644, dentry,
(void *)PREEMPTIRQSOFF_LATENCY, &latency_hist_reset_fops);
@@ -1081,14 +1084,14 @@ static __init int latency_hist_init(void)
&latency_hist_fops);
my_hist = &per_cpu(wakeup_latency_hist, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
entry = debugfs_create_file(name, 0444, dentry_sharedprio,
&per_cpu(wakeup_latency_hist_sharedprio, i),
&latency_hist_fops);
my_hist = &per_cpu(wakeup_latency_hist_sharedprio, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
sprintf(name, cpufmt_maxlatproc, i);
@@ -1122,7 +1125,7 @@ static __init int latency_hist_init(void)
&per_cpu(missed_timer_offsets, i), &latency_hist_fops);
my_hist = &per_cpu(missed_timer_offsets, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
sprintf(name, cpufmt_maxlatproc, i);
mp = &per_cpu(missed_timer_offsets_maxlatproc, i);
@@ -1150,7 +1153,7 @@ static __init int latency_hist_init(void)
&latency_hist_fops);
my_hist = &per_cpu(timerandwakeup_latency_hist, i);
atomic_set(&my_hist->hist_mode, 1);
- my_hist->min_lat = 0xFFFFFFFFUL;
+ my_hist->min_lat = LONG_MAX;
sprintf(name, cpufmt_maxlatproc, i);
mp = &per_cpu(timerandwakeup_maxlatproc, i);
--
1.7.10.4
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/