[PATCH v2] sched/idle: disable tick in idle=poll idle entry

From: Marcelo Tosatti

Date: Wed Oct 29 2025 - 14:06:33 EST



Commit a5183862e76fdc25f36b39c2489b816a5c66e2e5
("tick/nohz: Conditionally restart tick on idle exit") allows
a nohz_full CPU to enter idle and return from it with the
scheduler tick disabled (since the tick might be undesired noise).

The idle=poll case still unconditionally restarts the tick when entering
idle.

To reduce the noise for that case as well, stop the tick when entering
idle, for the idle=poll case.

Change tick_nohz_full_kick_cpu to set NEED_RESCHED bit, to handle the
case where a new timer is added from an interrupt. This breaks out of
cpu_idle_poll and rearms the timer if necessary.

---

v2: Handle the case where a new timer is added from an interrupt (Frederic Weisbecker)

include/linux/sched.h | 2 ++
kernel/sched/core.c | 10 ++++++++++
kernel/sched/idle.c | 2 +-
kernel/time/tick-sched.c | 1 +
4 files changed, 14 insertions(+), 1 deletion(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index cbb7340c5866..1f6938dc20cd 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -2428,4 +2428,6 @@ extern void migrate_enable(void);

DEFINE_LOCK_GUARD_0(migrate, migrate_disable(), migrate_enable())

+void set_tif_resched_if_polling(int cpu);
+
#endif
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f1ebf67b48e2..f0b84600084b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -988,6 +988,11 @@ static bool set_nr_if_polling(struct task_struct *p)
return true;
}

+void set_tif_resched_if_polling(int cpu)
+{
+ set_nr_if_polling(cpu_rq(cpu)->idle);
+}
+
#else
static inline bool set_nr_and_not_polling(struct thread_info *ti, int tif)
{
@@ -999,6 +1004,11 @@ static inline bool set_nr_if_polling(struct task_struct *p)
{
return false;
}
+
+void set_tif_resched_if_polling(int cpu)
+{
+ set_tsk_need_resched(cpu_rq(cpu)->idle);
+}
#endif

static bool __wake_q_add(struct wake_q_head *head, struct task_struct *task)
diff --git a/kernel/sched/idle.c b/kernel/sched/idle.c
index c39b089d4f09..428c2d1cbd1b 100644
--- a/kernel/sched/idle.c
+++ b/kernel/sched/idle.c
@@ -324,7 +324,7 @@ static void do_idle(void)
* idle as we know that the IPI is going to arrive right away.
*/
if (cpu_idle_force_poll || tick_check_broadcast_expired()) {
- tick_nohz_idle_restart_tick();
+ tick_nohz_idle_stop_tick();
cpu_idle_poll();
} else {
cpuidle_idle_call();
diff --git a/kernel/time/tick-sched.c b/kernel/time/tick-sched.c
index c527b421c865..efc3653999dc 100644
--- a/kernel/time/tick-sched.c
+++ b/kernel/time/tick-sched.c
@@ -408,6 +408,7 @@ void tick_nohz_full_kick_cpu(int cpu)
if (!tick_nohz_full_cpu(cpu))
return;

+ set_tif_resched_if_polling(cpu);
irq_work_queue_on(&per_cpu(nohz_full_kick_work, cpu), cpu);
}