[patch 7/7] tick: Get rid of tick_period

From: Thomas Gleixner
Date: Tue Nov 17 2020 - 09:24:36 EST


The variable tick_period is initialized to NSEC_PER_TICK / HZ during boot
and never updated again.

If NSEC_PER_TICK is not an integer multiple of HZ this computation is less
accurate than TICK_NSEC which has proper rounding in place.

Aside of the inaccuracy there is no reason for having this variable at
all. It's just a pointless indirection and all usage sites can just use the
TICK_NSEC constant.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/time/tick-broadcast.c | 2 +-
kernel/time/tick-common.c | 8 +++-----
kernel/time/tick-internal.h | 1 -
kernel/time/tick-sched.c | 22 +++++++++++-----------
4 files changed, 15 insertions(+), 18 deletions(-)

--- a/kernel/time/tick-broadcast.c
+++ b/kernel/time/tick-broadcast.c
@@ -331,7 +331,7 @@ static void tick_handle_periodic_broadca
bc_local = tick_do_periodic_broadcast();

if (clockevent_state_oneshot(dev)) {
- ktime_t next = ktime_add(dev->next_event, tick_period);
+ ktime_t next = ktime_add_ns(dev->next_event, TICK_NSEC);

clockevents_program_event(dev, next, true);
}
--- a/kernel/time/tick-common.c
+++ b/kernel/time/tick-common.c
@@ -32,7 +32,6 @@ DEFINE_PER_CPU(struct tick_device, tick_
* no requirement to write hold the jiffies seqcount for it.
*/
ktime_t tick_next_period;
-ktime_t tick_period;

/*
* tick_do_timer_cpu is a timer core internal variable which holds the CPU NR
@@ -90,7 +89,7 @@ static void tick_periodic(int cpu)
write_seqcount_begin(&jiffies_seq);

/* Keep track of the next tick event */
- tick_next_period = ktime_add(tick_next_period, tick_period);
+ tick_next_period = ktime_add_ns(tick_next_period, TICK_NSEC);

do_timer(1);
write_seqcount_end(&jiffies_seq);
@@ -129,7 +128,7 @@ void tick_handle_periodic(struct clock_e
* Setup the next period for devices, which do not have
* periodic mode:
*/
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);

if (!clockevents_program_event(dev, next, false))
return;
@@ -175,7 +174,7 @@ void tick_setup_periodic(struct clock_ev
for (;;) {
if (!clockevents_program_event(dev, next, false))
return;
- next = ktime_add(next, tick_period);
+ next = ktime_add_ns(next, TICK_NSEC);
}
}
}
@@ -222,7 +221,6 @@ static void tick_setup_device(struct tic
tick_do_timer_cpu = cpu;

tick_next_period = ktime_get();
- tick_period = NSEC_PER_SEC / HZ;
#ifdef CONFIG_NO_HZ_FULL
/*
* The boot CPU may be nohz_full, in which case set
--- a/kernel/time/tick-internal.h
+++ b/kernel/time/tick-internal.h
@@ -15,7 +15,6 @@

DECLARE_PER_CPU(struct tick_device, tick_cpu_device);
extern ktime_t tick_next_period;
-extern ktime_t tick_period;
extern int tick_do_timer_cpu __read_mostly;

extern void tick_setup_periodic(struct clock_event_device *dev, int broadcast);
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -95,17 +95,17 @@ static void tick_do_update_jiffies64(kti
write_seqcount_begin(&jiffies_seq);

delta = ktime_sub(now, tick_next_period);
- if (unlikely(delta >= tick_period)) {
+ if (unlikely(delta >= TICK_NSEC)) {
/* Slow path for long idle sleep times */
- s64 incr = ktime_to_ns(tick_period);
+ s64 incr = TICK_NSEC;

ticks += ktime_divns(delta, incr);

last_jiffies_update = ktime_add_ns(last_jiffies_update,
incr * ticks);
} else {
- last_jiffies_update = ktime_add(last_jiffies_update,
- tick_period);
+ last_jiffies_update = ktime_add_ns(last_jiffies_update,
+ TICK_NSEC);
}

/* Advance jiffies to complete the jiffies_seq protected job */
@@ -116,7 +116,7 @@ static void tick_do_update_jiffies64(kti
* pairs with the READ_ONCE() in the lockless quick check above.
*/
WRITE_ONCE(tick_next_period,
- ktime_add(last_jiffies_update, tick_period));
+ ktime_add_ns(last_jiffies_update, TICK_NSEC));

/*
* Release the sequence count. calc_global_load() below is not
@@ -691,7 +691,7 @@ static void tick_nohz_restart(struct tic
hrtimer_set_expires(&ts->sched_timer, ts->last_tick);

/* Forward the time to expire in the future */
- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);

if (ts->nohz_mode == NOHZ_MODE_HIGHRES) {
hrtimer_start_expires(&ts->sched_timer,
@@ -1260,7 +1260,7 @@ static void tick_nohz_handler(struct clo
if (unlikely(ts->tick_stopped))
return;

- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
}

@@ -1297,7 +1297,7 @@ static void tick_nohz_switch_to_nohz(voi
next = tick_init_jiffy_update();

hrtimer_set_expires(&ts->sched_timer, next);
- hrtimer_forward_now(&ts->sched_timer, tick_period);
+ hrtimer_forward_now(&ts->sched_timer, TICK_NSEC);
tick_program_event(hrtimer_get_expires(&ts->sched_timer), 1);
tick_nohz_activate(ts, NOHZ_MODE_LOWRES);
}
@@ -1363,7 +1363,7 @@ static enum hrtimer_restart tick_sched_t
if (unlikely(ts->tick_stopped))
return HRTIMER_NORESTART;

- hrtimer_forward(timer, now, tick_period);
+ hrtimer_forward(timer, now, TICK_NSEC);

return HRTIMER_RESTART;
}
@@ -1397,13 +1397,13 @@ void tick_setup_sched_timer(void)

/* Offset the tick to avert jiffies_lock contention. */
if (sched_skew_tick) {
- u64 offset = ktime_to_ns(tick_period) >> 1;
+ u64 offset = TICK_NSEC >> 1;
do_div(offset, num_possible_cpus());
offset *= smp_processor_id();
hrtimer_add_expires_ns(&ts->sched_timer, offset);
}

- hrtimer_forward(&ts->sched_timer, now, tick_period);
+ hrtimer_forward(&ts->sched_timer, now, TICK_NSEC);
hrtimer_start_expires(&ts->sched_timer, HRTIMER_MODE_ABS_PINNED_HARD);
tick_nohz_activate(ts, NOHZ_MODE_HIGHRES);
}