[tip: sched/hrtick] hrtimer: Rename hrtimer_cpu_base::in_hrtirq to deferred_rearm

From: tip-bot2 for Thomas Gleixner

Date: Sat Feb 28 2026 - 10:46:00 EST


The following commit has been merged into the sched/hrtick branch of tip:

Commit-ID: 9e07a9c980eaa93fd1bba722d31eeb4bf0cbbfb4
Gitweb: https://git.kernel.org/tip/9e07a9c980eaa93fd1bba722d31eeb4bf0cbbfb4
Author: Thomas Gleixner <tglx@xxxxxxxxxx>
AuthorDate: Tue, 24 Feb 2026 17:37:53 +01:00
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Fri, 27 Feb 2026 16:40:12 +01:00

hrtimer: Rename hrtimer_cpu_base::in_hrtirq to deferred_rearm

The upcoming deferred rearming scheme has the same effect as the deferred
rearming when the hrtimer interrupt is executing. So it can reuse the
in_hrtirq flag, but when it gets deferred beyond the hrtimer interrupt
path, then the name does not make sense anymore.

Rename it to deferred_rearm upfront to keep the actual functional change
separate from the mechanical rename churn.

Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Link: https://patch.msgid.link/20260224163430.935623347@xxxxxxxxxx
---
include/linux/hrtimer_defs.h | 4 ++--
kernel/time/hrtimer.c | 28 +++++++++-------------------
2 files changed, 11 insertions(+), 21 deletions(-)

diff --git a/include/linux/hrtimer_defs.h b/include/linux/hrtimer_defs.h
index f9fbf9a..2c3bdbd 100644
--- a/include/linux/hrtimer_defs.h
+++ b/include/linux/hrtimer_defs.h
@@ -53,7 +53,7 @@ enum hrtimer_base_type {
* @active_bases: Bitfield to mark bases with active timers
* @clock_was_set_seq: Sequence counter of clock was set events
* @hres_active: State of high resolution mode
- * @in_hrtirq: hrtimer_interrupt() is currently executing
+ * @deferred_rearm: A deferred rearm is pending
* @hang_detected: The last hrtimer interrupt detected a hang
* @softirq_activated: displays, if the softirq is raised - update of softirq
* related settings is not required then.
@@ -84,7 +84,7 @@ struct hrtimer_cpu_base {
unsigned int active_bases;
unsigned int clock_was_set_seq;
bool hres_active;
- bool in_hrtirq;
+ bool deferred_rearm;
bool hang_detected;
bool softirq_activated;
bool online;
diff --git a/kernel/time/hrtimer.c b/kernel/time/hrtimer.c
index 2e05a18..6f05d25 100644
--- a/kernel/time/hrtimer.c
+++ b/kernel/time/hrtimer.c
@@ -883,11 +883,8 @@ static void hrtimer_reprogram(struct hrtimer *timer, bool reprogram)
if (expires >= cpu_base->expires_next)
return;

- /*
- * If the hrtimer interrupt is running, then it will reevaluate the
- * clock bases and reprogram the clock event device.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending skip reprogramming the device */
+ if (cpu_base->deferred_rearm)
return;

cpu_base->next_timer = timer;
@@ -921,12 +918,8 @@ static bool update_needs_ipi(struct hrtimer_cpu_base *cpu_base, unsigned int act
if (seq == cpu_base->clock_was_set_seq)
return false;

- /*
- * If the remote CPU is currently handling an hrtimer interrupt, it
- * will reevaluate the first expiring timer of all clock bases
- * before reprogramming. Nothing to do here.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending the remote CPU will take care of it */
+ if (cpu_base->deferred_rearm)
return false;

/*
@@ -1334,11 +1327,8 @@ static bool __hrtimer_start_range_ns(struct hrtimer *timer, ktime_t tim, u64 del
first = enqueue_hrtimer(timer, base, mode, was_armed);
}

- /*
- * If the hrtimer interrupt is running, then it will reevaluate the
- * clock bases and reprogram the clock event device.
- */
- if (cpu_base->in_hrtirq)
+ /* If a deferred rearm is pending skip reprogramming the device */
+ if (cpu_base->deferred_rearm)
return false;

if (!was_first || cpu_base != this_cpu_base) {
@@ -1947,14 +1937,14 @@ static __latent_entropy void hrtimer_run_softirq(void)

/*
* Very similar to hrtimer_force_reprogram(), except it deals with
- * in_hrtirq and hang_detected.
+ * deferred_rearm and hang_detected.
*/
static void hrtimer_rearm(struct hrtimer_cpu_base *cpu_base, ktime_t now)
{
ktime_t expires_next = hrtimer_update_next_event(cpu_base);

cpu_base->expires_next = expires_next;
- cpu_base->in_hrtirq = false;
+ cpu_base->deferred_rearm = false;

if (unlikely(cpu_base->hang_detected)) {
/*
@@ -1985,7 +1975,7 @@ void hrtimer_interrupt(struct clock_event_device *dev)
raw_spin_lock_irqsave(&cpu_base->lock, flags);
entry_time = now = hrtimer_update_base(cpu_base);
retry:
- cpu_base->in_hrtirq = true;
+ cpu_base->deferred_rearm = true;
/*
* Set expires_next to KTIME_MAX, which prevents that remote CPUs queue
* timers while __hrtimer_run_queues() is expiring the clock bases.