[patch V2 19/20] timer: Split out index calculation

From: Thomas Gleixner
Date: Fri Jun 17 2016 - 09:29:45 EST


From: Anna-Maria Gleixner <anna-maria@xxxxxxxxxxxxx>

For further optimizations we need to seperate index calculation and
queueing. No functional change.

Signed-off-by: Anna-Maria Gleixner <anna-maria@xxxxxxxxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Cc: Chris Mason <clm@xxxxxx>
Cc: Eric Dumazet <edumazet@xxxxxxxxxx>
Cc: rt@xxxxxxxxxxxxx
Cc: "Paul E. McKenney" <paulmck@xxxxxxxxxxxxxxxxxx>
Cc: Arjan van de Ven <arjan@xxxxxxxxxxxxx>

---
kernel/time/timer.c | 45 +++++++++++++++++++++++++++++++--------------
1 file changed, 31 insertions(+), 14 deletions(-)

--- a/kernel/time/timer.c
+++ b/kernel/time/timer.c
@@ -467,12 +467,9 @@ static inline unsigned calc_index(unsign
return LVL_OFFS(lvl) + (expires & LVL_MASK);
}

-static void
-__internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static int calc_wheel_index(unsigned long expires, unsigned long clk)
{
- unsigned long expires = timer->expires;
- unsigned long delta = expires - base->clk;
- struct hlist_head *vec;
+ unsigned long delta = expires - clk;
unsigned int idx;

if (delta < LVL_START(1)) {
@@ -490,7 +487,7 @@ static void
} else if (delta < LVL_START(7)) {
idx = calc_index(expires, 6);
} else if ((long) delta < 0) {
- idx = (base->clk >> BASE_CLK_SHIFT) & LVL_MASK;
+ idx = (clk >> BASE_CLK_SHIFT) & LVL_MASK;
} else {
/*
* Force expire obscene large timeouts at the capacity limit
@@ -501,20 +498,33 @@ static void

idx = calc_index(expires, 7);
}
- /*
- * Enqueue the timer into the array bucket, mark it pending in
- * the bitmap and store the index in the timer flags.
- */
- vec = base->vectors + idx;
- hlist_add_head(&timer->entry, vec);
+ return idx;
+}
+
+/*
+ * Enqueue the timer into the hash bucket, mark it pending in
+ * the bitmap and store the index in the timer flags.
+ */
+static void enqueue_timer(struct timer_base *base, struct timer_list *timer,
+ unsigned int idx)
+{
+ hlist_add_head(&timer->entry, base->vectors + idx);
__set_bit(idx, base->pending_map);
timer_set_idx(timer, idx);
}

-static void internal_add_timer(struct timer_base *base, struct timer_list *timer)
+static void
+__internal_add_timer(struct timer_base *base, struct timer_list *timer)
{
- __internal_add_timer(base, timer);
+ unsigned int idx;
+
+ idx = calc_wheel_index(timer->expires, base->clk);
+ enqueue_timer(base, timer, idx);
+}

+static void
+trigger_dyntick_cpu(struct timer_base *base, struct timer_list *timer)
+{
/*
* We might have to IPI the remote CPU if the base is idle and the
* timer is not deferrable. If the other cpu is on the way to idle
@@ -539,6 +549,13 @@ static void internal_add_timer(struct ti
wake_up_nohz_cpu(base->cpu);
}

+static void
+internal_add_timer(struct timer_base *base, struct timer_list *timer)
+{
+ __internal_add_timer(base, timer);
+ trigger_dyntick_cpu(base, timer);
+}
+
#ifdef CONFIG_TIMER_STATS
void __timer_stats_timer_set_start_info(struct timer_list *timer, void *addr)
{