[PATCH v2 03/17] cpumask: Introduce cpu_preferred_mask
From: Shrikanth Hegde
Date: Tue Apr 07 2026 - 15:22:01 EST
This patch does
- Declare and Define cpu_preferred_mask.
- Get/Set helpers for it.
Values are set/clear by the scheduler by detecting the steal time values.
A CPU is set to preferred when it comes online. Later it may be
marked as non-preferred depending on steal time values with
STEAL_MONITOR enabled.
Signed-off-by: Shrikanth Hegde <sshegde@xxxxxxxxxxxxx>
---
include/linux/cpumask.h | 22 ++++++++++++++++++++++
kernel/cpu.c | 6 ++++++
kernel/sched/core.c | 5 +++++
3 files changed, 33 insertions(+)
diff --git a/include/linux/cpumask.h b/include/linux/cpumask.h
index 80211900f373..80c5cc13b8ad 100644
--- a/include/linux/cpumask.h
+++ b/include/linux/cpumask.h
@@ -1296,6 +1296,28 @@ static __always_inline bool cpu_dying(unsigned int cpu)
#endif /* NR_CPUS > 1 */
+/*
+ * All related wrappers kept together to avoid too many ifdefs
+ * See Documentation/scheduler/sched-arch.rst for details
+ */
+#ifdef CONFIG_PARAVIRT
+extern struct cpumask __cpu_preferred_mask;
+#define cpu_preferred_mask ((const struct cpumask *)&__cpu_preferred_mask)
+#define set_cpu_preferred(cpu, preferred) assign_cpu((cpu), &__cpu_preferred_mask, (preferred))
+
+static __always_inline bool cpu_preferred(unsigned int cpu)
+{
+ return cpumask_test_cpu(cpu, cpu_preferred_mask);
+}
+#else
+static __always_inline bool cpu_preferred(unsigned int cpu)
+{
+ return true;
+}
+
+static __always_inline void set_cpu_preferred(unsigned int cpu, bool preferred) { }
+#endif
+
#define cpu_is_offline(cpu) unlikely(!cpu_online(cpu))
#if NR_CPUS <= BITS_PER_LONG
diff --git a/kernel/cpu.c b/kernel/cpu.c
index bc4f7a9ba64e..2d4d037680d4 100644
--- a/kernel/cpu.c
+++ b/kernel/cpu.c
@@ -3137,6 +3137,12 @@ void set_cpu_online(unsigned int cpu, bool online)
if (cpumask_test_and_clear_cpu(cpu, &__cpu_online_mask))
atomic_dec(&__num_online_cpus);
}
+
+ /*
+ * An online CPU is by default assumed to be preferred
+ * Unitl STEAL_MONITOR changes it
+ */
+ set_cpu_preferred(cpu, online);
}
/*
diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index f351296922ac..7ea05a7a717b 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -11228,3 +11228,8 @@ void sched_change_end(struct sched_change_ctx *ctx)
p->sched_class->prio_changed(rq, p, ctx->prio);
}
}
+
+#ifdef CONFIG_PARAVIRT
+struct cpumask __cpu_preferred_mask __read_mostly;
+EXPORT_SYMBOL(__cpu_preferred_mask);
+#endif
--
2.47.3