[PATCH v2 14/17] sched/core: Compute steal values at regular intervals

From: Shrikanth Hegde

Date: Tue Apr 07 2026 - 15:26:39 EST


Kick off the work to compute the steal time at regular interval.
Gated with sched feature STEAL_MONITOR to avoid any overhead in systems
that are not interested in it.

The sampling period can configured at runtime using steal_mon_period.
By default is 1000 milliseconds. i.e. 1 second

This work is done by first online housekeeping CPU only. Hence it won't
need any complicated synchronization.

Signed-off-by: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx>
---
kernel/sched/core.c | 27 +++++++++++++++++++++++++++
kernel/sched/sched.h | 2 ++
2 files changed, 29 insertions(+)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 8c80600ddd28..1c6fcf1ae4fe 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5694,6 +5694,10 @@ void sched_tick(void)
rq->idle_balance = idle_cpu(cpu);
sched_balance_trigger(rq);
}
+
+ /* This feature works currently on SMT system */
+ if (sched_feat(STEAL_MONITOR) && IS_ENABLED(CONFIG_SCHED_SMT))
+ sched_trigger_steal_computation(cpu);
}

#ifdef CONFIG_NO_HZ_FULL
@@ -11355,4 +11359,27 @@ void sched_steal_detection_work(struct work_struct *work)
now = ktime_get();
sm->prev_time = now;
}
+
+void sched_trigger_steal_computation(int cpu)
+{
+ int first_hk_cpu = cpumask_first_and(housekeeping_cpumask(HK_TYPE_KERNEL_NOISE),
+ cpu_online_mask);
+ ktime_t now;
+
+ /* Done by first online housekeeping CPU only */
+ if (likely(cpu != first_hk_cpu))
+ return;
+
+ /*
+ * Since everything is updated by first housekeeping CPU,
+ * There is no need for complex syncronization.
+ */
+ now = ktime_get();
+
+ /* Default is once per second */
+ if (likely((now - steal_mon.prev_time) < steal_mon.sampling_period_ms * NSEC_PER_MSEC))
+ return;
+
+ schedule_work_on(first_hk_cpu, &steal_mon.work);
+}
#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index c0fbfb04eda3..337357e48a83 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -4157,6 +4157,7 @@ static inline bool task_can_run_on_preferred_cpu(struct task_struct *p)
void sched_push_current_non_preferred_cpu(struct rq *rq);
void sched_init_steal_monitor(void);
void sched_steal_detection_work(struct work_struct *work);
+void sched_trigger_steal_computation(int cpu);
#else
static inline bool task_can_run_on_preferred_cpu(struct task_struct *p)
{
@@ -4165,6 +4166,7 @@ static inline bool task_can_run_on_preferred_cpu(struct task_struct *p)

static inline void sched_push_current_non_preferred_cpu(struct rq *rq) { }
static inline void sched_init_steal_monitor(void) { }
+static inline void sched_trigger_steal_computation(int cpu) { }
#endif

#endif /* _KERNEL_SCHED_SCHED_H */
--
2.47.3