[tip: sched/hrtick] hrtimer: Keep track of first expiring timer per clock base

From: tip-bot2 for Thomas Gleixner

Date: Sat Feb 28 2026 - 10:40:18 EST


The following commit has been merged into the sched/hrtick branch of tip:

Commit-ID: eddffab8282e388dddf032f3295fcec87eb08095
Gitweb: https://git.kernel.org/tip/eddffab8282e388dddf032f3295fcec87eb08095
Author: Thomas Gleixner <tglx@xxxxxxxxxx>
AuthorDate: Tue, 24 Feb 2026 17:38:28 +01:00
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Fri, 27 Feb 2026 16:40:14 +01:00

hrtimer: Keep track of first expiring timer per clock base

Evaluating the next expiry time of all clock bases is cache line expensive
as the expiry time of the first expiring timer is not cached in the base
and requires to access the timer itself, which is definitely in a different
cache line.

It's way more efficient to keep track of the expiry time on enqueue and
dequeue operations as the relevant data is already in the cache at that
point.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://patch.msgid.link/20260224163431.404839710@xxxxxxxxxx
---
include/linux/hrtimer_defs.h | 2 ++-
kernel/time/hrtimer.c | 37 ++++++++++++++++++++++++++++++++---
2 files changed, 36 insertions(+), 3 deletions(-)

diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index b6846ef..fb38df4 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -19,6 +19,7 @@
* timer to a base on another cpu.
* @clockid: clock id for per_cpu support
* @seq: seqcount around __run_hrtimer
+ * @expires_next: Absolute time of the next event in this clock base
* @running: pointer to the currently running hrtimer
* @active: red black tree root node for the active timers
* @offset: offset of this clock to the monotonic base
@@ -28,6 +29,7 @@ struct hrtimer_clock_base {
unsigned int index;
clockid_t clockid;
seqcount_raw_spinlock_t seq;
+ ktime_t expires_next;
struct hrtimer *running;
struct timerqueue_head active;
ktime_t offset;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index e9592cb..d70899a 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -1107,7 +1107,18 @@ static bool enqueue_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *ba
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);

- return timerqueue_add(&base->active, &timer->node);
+ if (!timerqueue_add(&base->active, &timer->node))
+ return false;
+
+ base->expires_next = hrtimer_get_expires(timer);
+ return true;
+}
+
+static inline void base_update_next_timer(struct hrtimer_clock_base *base)
+{
+ struct timerqueue_node *next = timerqueue_getnext(&base->active);
+
+ base->expires_next = next ? next->expires : KTIME_MAX;
}

/*
@@ -1122,6 +1133,7 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *b
bool newstate, bool reprogram)
{
struct hrtimer_cpu_base *cpu_base = base->cpu_base;
+ bool was_first;

lockdep_assert_held(&cpu_base->lock);

@@ -1131,9 +1143,17 @@ static void __remove_hrtimer(struct hrtimer *timer, struct hrtimer_clock_base *b
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, newstate);

+ was_first = &timer->node == timerqueue_getnext(&base->active);
+
if (!timerqueue_del(&base->active, &timer->node))
cpu_base->active_bases &= ~(1 << base->index);

+ /* Nothing to update if this was not the first timer in the base */
+ if (!was_first)
+ return;
+
+ base_update_next_timer(base);
+
/*
* If reprogram is false don't update cpu_base->next_timer and do not
* touch the clock event device.
@@ -1182,9 +1202,12 @@ static inline bool
remove_and_enqueue_same_base(struct hrtimer *timer, struct hrtimer_clock_base *base,
const enum hrtimer_mode mode, ktime_t expires, u64 delta_ns)
{
+ bool was_first = false;
+
/* Remove it from the timer queue if active */
if (timer->is_queued) {
debug_hrtimer_deactivate(timer);
+ was_first = &timer->node == timerqueue_getnext(&base->active);
timerqueue_del(&base->active, &timer->node);
}

@@ -1197,8 +1220,16 @@ remove_and_enqueue_same_base(struct hrtimer *timer, struct hrtimer_clock_base *b
/* Pairs with the lockless read in hrtimer_is_queued() */
WRITE_ONCE(timer->is_queued, HRTIMER_STATE_ENQUEUED);

- /* Returns true if this is the first expiring timer */
- return timerqueue_add(&base->active, &timer->node);
+ /* If it's the first expiring timer now or again, update base */
+ if (timerqueue_add(&base->active, &timer->node)) {
+ base->expires_next = expires;
+ return true;
+ }
+
+ if (was_first)
+ base_update_next_timer(base);
+
+ return false;
}

static inline ktime_t hrtimer_update_lowres(struct hrtimer *timer, ktime_t tim,