[PATCH 3.16 42/86] sched: Add sched_smt_active()

From: Ben Hutchings
Date: Thu May 16 2019 - 12:04:48 EST


3.16.68-rc1 review patch. If anyone has any objections, please let me know.

------------------

From: Ben Hutchings <ben@xxxxxxxxxxxxxxx>

Add the sched_smt_active() function needed for some x86 speculation
mitigations. This was introduced upstream by commits 1b568f0aabf2
"sched/core: Optimize SCHED_SMT", ba2591a5993e "sched/smt: Update
sched_smt_present at runtime", c5511d03ec09 "sched/smt: Make
sched_smt_present track topology", and 321a874a7ef8 "sched/smt: Expose
sched_smt_present static key". The upstream implementation uses the
static_key_{disable,enable}_cpuslocked() functions, which aren't
practical to backport.

Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Ingo Molnar <mingo@xxxxxxxxxx>
Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Konrad Rzeszutek Wilk <konrad.wilk@xxxxxxxxxx>
---
include/linux/sched/smt.h | 18 ++++++++++++++++++
kernel/sched/core.c | 19 +++++++++++++++++++
kernel/sched/sched.h | 1 +
3 files changed, 38 insertions(+)

--- /dev/null
+++ b/include/linux/sched/smt.h
@@ -0,0 +1,18 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+#ifndef _LINUX_SCHED_SMT_H
+#define _LINUX_SCHED_SMT_H
+
+#include <linux/atomic.h>
+
+#ifdef CONFIG_SCHED_SMT
+extern atomic_t sched_smt_present;
+
+static __always_inline bool sched_smt_active(void)
+{
+ return atomic_read(&sched_smt_present);
+}
+#else
+static inline bool sched_smt_active(void) { return false; }
+#endif
+
+#endif
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -5210,6 +5210,10 @@ static void __cpuinit set_cpu_rq_start_t
rq->age_stamp = sched_clock_cpu(cpu);
}

+#ifdef CONFIG_SCHED_SMT
+atomic_t sched_smt_present = ATOMIC_INIT(0);
+#endif
+
static int sched_cpu_active(struct notifier_block *nfb,
unsigned long action, void *hcpu)
{
@@ -5226,6 +5230,13 @@ static int sched_cpu_active(struct notif
* Thus, fall-through and help the starting CPU along.
*/
case CPU_DOWN_FAILED:
+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going up, increment the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask((long)hcpu)) == 2)
+ atomic_inc(&sched_smt_present);
+#endif
set_cpu_active((long)hcpu, true);
return NOTIFY_OK;
default:
@@ -5243,6 +5254,14 @@ static int sched_cpu_inactive(struct not
case CPU_DOWN_PREPARE:
set_cpu_active(cpu, false);

+#ifdef CONFIG_SCHED_SMT
+ /*
+ * When going down, decrement the number of cores with SMT present.
+ */
+ if (cpumask_weight(cpu_smt_mask(cpu)) == 2)
+ atomic_dec(&sched_smt_present);
+#endif
+
/* explicitly allow suspend */
if (!(action & CPU_TASKS_FROZEN)) {
struct dl_bw *dl_b = dl_bw_of(cpu);
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -2,6 +2,7 @@
#include <linux/sched.h>
#include <linux/sched/sysctl.h>
#include <linux/sched/rt.h>
+#include <linux/sched/smt.h>
#include <linux/sched/deadline.h>
#include <linux/mutex.h>
#include <linux/spinlock.h>