[RFC PATCH 18/30 v3] add get_monotonic_cycles
From: Steven Rostedt
Date: Tue Jan 15 2008 - 15:58:31 EST
The latency tracer needs a way to get an accurate time
without grabbing any locks. Locks themselves might call
the latency tracer and cause at best a slow down.
This patch adds get_monotonic_cycles that returns cycles
from a reliable clock source in a monotonic fashion.
Signed-off-by: Steven Rostedt <srostedt@xxxxxxxxxx>
---
include/linux/clocksource.h | 3 ++
kernel/time/timekeeping.c | 48 ++++++++++++++++++++++++++++++++++++++++++++
2 files changed, 51 insertions(+)
Index: linux-compile.git/include/linux/clocksource.h
===================================================================
--- linux-compile.git.orig/include/linux/clocksource.h 2008-01-14 13:14:14.000000000 -0500
+++ linux-compile.git/include/linux/clocksource.h 2008-01-14 13:14:14.000000000 -0500
@@ -273,6 +273,9 @@ extern int clocksource_register(struct c
extern struct clocksource* clocksource_get_next(void);
extern void clocksource_change_rating(struct clocksource *cs, int rating);
extern void clocksource_resume(void);
+extern cycle_t get_monotonic_cycles(void);
+extern unsigned long cycles_to_usecs(cycle_t cycles);
+extern cycle_t usecs_to_cycles(unsigned long usecs);
/* used to initialize clock */
extern struct clocksource clocksource_jiffies;
Index: linux-compile.git/kernel/time/timekeeping.c
===================================================================
--- linux-compile.git.orig/kernel/time/timekeeping.c 2008-01-14 13:14:14.000000000 -0500
+++ linux-compile.git/kernel/time/timekeeping.c 2008-01-14 13:14:14.000000000 -0500
@@ -103,6 +103,54 @@ static inline void __get_realtime_clock_
timespec_add_ns(ts, nsecs);
}
+cycle_t notrace get_monotonic_cycles(void)
+{
+ cycle_t cycle_now, cycle_delta, cycle_raw, cycle_last;
+
+ do {
+ /*
+ * cycle_raw and cycle_last can change on
+ * another CPU and we need the delta calculation
+ * of cycle_now and cycle_last happen atomic, as well
+ * as the adding to cycle_raw. We don't need to grab
+ * any locks, we just keep trying until get all the
+ * calculations together in one state.
+ *
+ * In fact, we __cant__ grab any locks. This
+ * function is called from the latency_tracer which can
+ * be called anywhere. To grab any locks (including
+ * seq_locks) we risk putting ourselves into a deadlock.
+ */
+ cycle_raw = clock->cycle_raw;
+ cycle_last = clock->cycle_last;
+
+ /* read clocksource: */
+ cycle_now = clocksource_read(clock);
+
+ /* calculate the delta since the last update_wall_time: */
+ cycle_delta = (cycle_now - cycle_last) & clock->mask;
+
+ } while (cycle_raw != clock->cycle_raw ||
+ cycle_last != clock->cycle_last);
+
+ return cycle_raw + cycle_delta;
+}
+
+unsigned long notrace cycles_to_usecs(cycle_t cycles)
+{
+ u64 ret = cyc2ns(clock, cycles);
+
+ ret += NSEC_PER_USEC/2; /* For rounding in do_div() */
+ do_div(ret, NSEC_PER_USEC);
+
+ return ret;
+}
+
+cycle_t notrace usecs_to_cycles(unsigned long usecs)
+{
+ return ns2cyc(clock, (u64)usecs * 1000);
+}
+
/**
* getnstimeofday - Returns the time of day in a timespec
* @ts: pointer to the timespec to be set
--
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/