Re: [patch 2/2] sched/idle: Make default_idle_call() NOHZ aware

From: Rafael J. Wysocki

Date: Sat Mar 07 2026 - 11:26:05 EST


On Friday, March 6, 2026 10:31:49 PM CET Rafael J. Wysocki wrote:
> On Fri, Mar 6, 2026 at 10:21 PM Rafael J. Wysocki <rafael@xxxxxxxxxx> wrote:
> > On Wednesday, March 4, 2026 4:03:06 AM CET Qais Yousef wrote:
> > > On 03/02/26 22:25, Rafael J. Wysocki wrote:
> > > > On Mon, Mar 2, 2026 at 12:04 PM Christian Loehle

[cut]

> > > > >
> > > > > Why don't we just require one or two consecutive tick wakeups before stopping?
> > > >
> > > > Exactly my thought and I think one should be sufficient.
> > >
> > > I concur. From our experience with TEO util threshold these averages can
> > > backfire. I think one tick is sufficient delay to not be obviously broken.
> >
> > So if I'm not mistaken, it would be something like the appended prototype
> > (completely untested, but it builds for me).
> >
> > ---
> > drivers/cpuidle/cpuidle.c | 10 ----------
> > kernel/sched/idle.c | 32 ++++++++++++++++++++++++--------
> > 2 files changed, 24 insertions(+), 18 deletions(-)
> >
> > --- a/drivers/cpuidle/cpuidle.c
> > +++ b/drivers/cpuidle/cpuidle.c
> > @@ -359,16 +359,6 @@ noinstr int cpuidle_enter_state(struct c
> > int cpuidle_select(struct cpuidle_driver *drv, struct cpuidle_device *dev,
> > bool *stop_tick)
> > {
> > - /*
> > - * If there is only a single idle state (or none), there is nothing
> > - * meaningful for the governor to choose. Skip the governor and
> > - * always use state 0 with the tick running.
> > - */
> > - if (drv->state_count <= 1) {
> > - *stop_tick = false;
> > - return 0;
> > - }
> > -
> > return cpuidle_curr_governor->select(drv, dev, stop_tick);
> > }
> >
> > --- a/kernel/sched/idle.c
> > +++ b/kernel/sched/idle.c
> > @@ -161,6 +161,14 @@ static int call_cpuidle(struct cpuidle_d
> > return cpuidle_enter(drv, dev, next_state);
> > }
> >
> > +static void idle_call_stop_or_retain_tick(bool stop_tick)
> > +{
> > + if (stop_tick || tick_nohz_tick_stopped())
> > + tick_nohz_idle_stop_tick();
> > + else
> > + tick_nohz_idle_retain_tick();
> > +}
> > +
> > /**
> > * cpuidle_idle_call - the main idle function
> > *
> > @@ -170,7 +178,7 @@ static int call_cpuidle(struct cpuidle_d
> > * set, and it returns with polling set. If it ever stops polling, it
> > * must clear the polling bit.
> > */
> > -static void cpuidle_idle_call(void)
> > +static void cpuidle_idle_call(bool got_tick)
> > {
> > struct cpuidle_device *dev = cpuidle_get_device();
> > struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
> > @@ -186,7 +194,7 @@ static void cpuidle_idle_call(void)
> > }
> >
> > if (cpuidle_not_available(drv, dev)) {
> > - tick_nohz_idle_stop_tick();
> > + idle_call_stop_or_retain_tick(!got_tick);
>
> Oh, I got this backwards (here and below).
>
> The tick should be stopped if we've got the tick previously, but you
> get the idea.

In the meantime I realized that if the .select() governor
callback is skipped, its .reflect() callback should be skipped
either, so I've posted this:

https://lkml.org/lkml/2026/3/7/569

and here's a fixed version of the last patch on top of the above (for
completeness):

---
kernel/sched/idle.c | 25 ++++++++++++++++---------
1 file changed, 16 insertions(+), 9 deletions(-)

--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -161,6 +161,14 @@ static int call_cpuidle(struct cpuidle_d
return cpuidle_enter(drv, dev, next_state);
}

+static void idle_call_stop_or_retain_tick(bool stop_tick)
+{
+ if (stop_tick || tick_nohz_tick_stopped())
+ tick_nohz_idle_stop_tick();
+ else
+ tick_nohz_idle_retain_tick();
+}
+
/**
* cpuidle_idle_call - the main idle function
*
@@ -170,7 +178,7 @@ static int call_cpuidle(struct cpuidle_d
* set, and it returns with polling set. If it ever stops polling, it
* must clear the polling bit.
*/
-static void cpuidle_idle_call(void)
+static void cpuidle_idle_call(bool stop_tick)
{
struct cpuidle_device *dev = cpuidle_get_device();
struct cpuidle_driver *drv = cpuidle_get_cpu_driver(dev);
@@ -186,7 +194,7 @@ static void cpuidle_idle_call(void)
}

if (cpuidle_not_available(drv, dev)) {
- tick_nohz_idle_stop_tick();
+ idle_call_stop_or_retain_tick(stop_tick);

default_idle_call();
goto exit_idle;
@@ -222,17 +230,14 @@ static void cpuidle_idle_call(void)
next_state = cpuidle_find_deepest_state(drv, dev, max_latency_ns);
call_cpuidle(drv, dev, next_state);
} else if (drv->state_count > 1) {
- bool stop_tick = true;
+ stop_tick = true;

/*
* Ask the cpuidle framework to choose a convenient idle state.
*/
next_state = cpuidle_select(drv, dev, &stop_tick);

- if (stop_tick || tick_nohz_tick_stopped())
- tick_nohz_idle_stop_tick();
- else
- tick_nohz_idle_retain_tick();
+ idle_call_stop_or_retain_tick(stop_tick);

entered_state = call_cpuidle(drv, dev, next_state);
/*
@@ -240,7 +245,7 @@ static void cpuidle_idle_call(void)
*/
cpuidle_reflect(dev, entered_state);
} else {
- tick_nohz_idle_retain_tick();
+ idle_call_stop_or_retain_tick(stop_tick);

/*
* If there is only a single idle state (or none), there is
@@ -268,6 +273,7 @@ exit_idle:
static void do_idle(void)
{
int cpu = smp_processor_id();
+ bool got_tick = false;

/*
* Check if we need to update blocked load
@@ -338,8 +344,9 @@ static void do_idle(void)
tick_nohz_idle_restart_tick();
cpu_idle_poll();
} else {
- cpuidle_idle_call();
+ cpuidle_idle_call(got_tick);
}
+ got_tick = tick_nohz_idle_got_tick();
arch_cpu_idle_exit();
}