[RFC][PATCH 6/7] sched: Optimize SCHED_SMT
From: Peter Zijlstra
Date: Mon May 09 2016 - 06:57:36 EST
Avoid pointless SCHED_SMT code when running on !SMT hardware.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/sched/core.c | 19 +++++++++++++++++++
kernel/sched/fair.c | 8 +++++++-
kernel/sched/idle_task.c | 2 --
kernel/sched/sched.h | 17 +++++++++++++++++
4 files changed, 43 insertions(+), 3 deletions(-)
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -7263,6 +7263,22 @@ int sched_cpu_dying(unsigned int cpu)
}
#endif
+#ifdef CONFIG_SCHED_SMT
+DEFINE_STATIC_KEY_FALSE(sched_smt_present);
+
+static void sched_init_smt(void)
+{
+ /*
+ * We've enumerated all CPUs and will assume that if any CPU
+ * has SMT siblings, CPU0 will too.
+ */
+ if (cpumask_weight(cpu_smt_mask(0)) > 1)
+ static_branch_enable(&sched_smt_present);
+}
+#else
+static inline void sched_init_smt(void) { }
+#endif
+
void __init sched_init_smp(void)
{
cpumask_var_t non_isolated_cpus;
@@ -7292,6 +7308,9 @@ void __init sched_init_smp(void)
init_sched_rt_class();
init_sched_dl_class();
+
+ sched_init_smt();
+
sched_smp_initialized = true;
}
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -5249,7 +5249,7 @@ static inline bool test_idle_cores(int c
* Since SMT siblings share all cache levels, inspecting this limited remote
* state should be fairly cheap.
*/
-void update_idle_core(struct rq *rq)
+void __update_idle_core(struct rq *rq)
{
int core = cpu_of(rq);
int cpu;
@@ -5281,6 +5281,9 @@ static int select_idle_core(struct task_
struct cpumask *cpus = this_cpu_cpumask_var_ptr(select_idle_mask);
int core, cpu, wrap;
+ if (!static_branch_likely(&sched_smt_present))
+ return -1;
+
if (!test_idle_cores(target, false))
return -1;
@@ -5314,6 +5317,9 @@ static int select_idle_smt(struct task_s
{
int cpu;
+ if (!static_branch_likely(&sched_smt_present))
+ return -1;
+
for_each_cpu(cpu, cpu_smt_mask(target)) {
if (!cpumask_test_cpu(cpu, tsk_cpus_allowed(p)))
continue;
--- a/kernel/sched/idle_task.c
+++ b/kernel/sched/idle_task.c
@@ -23,8 +23,6 @@ static void check_preempt_curr_idle(stru
resched_curr(rq);
}
-extern void update_idle_core(struct rq *rq);
-
static struct task_struct *
pick_next_task_idle(struct rq *rq, struct task_struct *prev, struct pin_cookie cookie)
{
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1811,3 +1811,20 @@ static inline void account_reset_rq(stru
rq->prev_steal_time_rq = 0;
#endif
}
+
+
+#ifdef CONFIG_SCHED_SMT
+
+extern struct static_key_false sched_smt_present;
+
+extern void __update_idle_core(struct rq *rq);
+
+static inline void update_idle_core(struct rq *rq)
+{
+ if (static_branch_unlikely(&sched_smt_present))
+ __update_idle_core(rq);
+}
+
+#else
+static inline void update_idle_core(struct rq *rq) { }
+#endif