[PATCH v2 16/17] sched/core: Mark the direction of steal values to avoid oscillations
From: Shrikanth Hegde
Date: Tue Apr 07 2026 - 15:24:11 EST
Cache the previous decision on steal time. So consecutive values of
high values or low values are taken for increase/decrease of preferred
CPUs.
Signed-off-by: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx>
---
kernel/sched/core.c | 12 ++++++++++--
1 file changed, 10 insertions(+), 2 deletions(-)
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 6e2b733adf45..cb9110f95ebf 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11374,7 +11374,7 @@ void sched_steal_detection_work(struct work_struct *work)
steal_ratio = (delta_steal * 100 * 100) / (delta_ns * num_online_cpus());
/* If the steal time values are high, reduce one core from preferred CPUs */
- if (steal_ratio > sm->high_threshold) {
+ if (sm->previous_decision == 1 && steal_ratio > sm->high_threshold) {
int last_cpu;
cpumask_and(sm->tmp_mask, cpu_online_mask, cpu_preferred_mask);
@@ -11396,7 +11396,7 @@ void sched_steal_detection_work(struct work_struct *work)
}
/* If the steal time values are low, increase one core as preferred CPUs */
- if (steal_ratio < sm->low_threshold) {
+ if (sm->previous_decision == -1 && steal_ratio < sm->low_threshold) {
int first_cpu;
first_cpu = cpumask_first_andnot(cpu_online_mask, cpu_preferred_mask);
@@ -11407,6 +11407,14 @@ void sched_steal_detection_work(struct work_struct *work)
for_each_cpu(tmp_cpu, cpu_smt_mask(first_cpu))
set_cpu_preferred(tmp_cpu, true);
}
+
+ /* mark the direction. This helps to avoid ping-pongs */
+ if (steal_ratio > sm->high_threshold)
+ sm->previous_decision = 1;
+ else if (steal_ratio < sm->low_threshold)
+ sm->previous_decision = -1;
+ else
+ sm->previous_decision = 0;
#endif
}
--
2.47.3