[PATCH 4.4 199/266] x86/speculation: Enable cross-hyperthread spectre v2 STIBP mitigation

From: Greg Kroah-Hartman
Date: Wed May 15 2019 - 08:08:02 EST


From: Jiri Kosina <jkosina@xxxxxxx>

commit 53c613fe6349994f023245519265999eed75957f upstream.

STIBP is a feature provided by certain Intel ucodes / CPUs. This feature
(once enabled) prevents cross-hyperthread control of decisions made by
indirect branch predictors.

Enable this feature if

- the CPU is vulnerable to spectre v2
- the CPU supports SMT and has SMT siblings online
- spectre_v2 mitigation autoselection is enabled (default)

After some previous discussion, this leaves STIBP on all the time, as wrmsr
on crossing kernel boundary is a no-no. This could perhaps later be a bit
more optimized (like disabling it in NOHZ, experiment with disabling it in
idle, etc) if needed.

Note that the synchronization of the mask manipulation via newly added
spec_ctrl_mutex is currently not strictly needed, as the only updater is
already being serialized by cpu_add_remove_lock, but let's make this a
little bit more future-proof.

Signed-off-by: Jiri Kosina <jkosina@xxxxxxx>
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Josh Poimboeuf <jpoimboe@xxxxxxxxxx>
Cc: Andrea Arcangeli <aarcange@xxxxxxxxxx>
Cc: "WoodhouseDavid" <dwmw@xxxxxxxxxxxx>
Cc: Andi Kleen <ak@xxxxxxxxxxxxxxx>
Cc: Tim Chen <tim.c.chen@xxxxxxxxxxxxxxx>
Cc: "SchauflerCasey" <casey.schaufler@xxxxxxxxx>
Link: https://lkml.kernel.org/r/nycvar.YFH.7.76.1809251438240.15880@xxxxxxxxxxxxx
[bwh: Backported to 4.4:
- Don't add any calls to arch_smt_update() yet. They will be introduced by
"x86/speculation: Rework SMT state change".
- Use IS_ENABLED(CONFIG_SMP) instead of cpu_smt_control for now. This
will be fixed by "x86/speculation: Rework SMT state change".]
Signed-off-by: Ben Hutchings <ben@xxxxxxxxxxxxxxx>
Signed-off-by: Greg Kroah-Hartman <gregkh@xxxxxxxxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/bugs.c | 55 ++++++++++++++++++++++++++++++++++++++++-----
1 file changed, 50 insertions(+), 5 deletions(-)

--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -32,12 +32,10 @@ static void __init spectre_v2_select_mit
static void __init ssb_select_mitigation(void);
static void __init l1tf_select_mitigation(void);

-/*
- * Our boot-time value of the SPEC_CTRL MSR. We read it once so that any
- * writes to SPEC_CTRL contain whatever reserved bits have been set.
- */
+/* The base value of the SPEC_CTRL MSR that always has to be preserved. */
u64 x86_spec_ctrl_base;
EXPORT_SYMBOL_GPL(x86_spec_ctrl_base);
+static DEFINE_MUTEX(spec_ctrl_mutex);

/*
* The vendor and possibly platform specific bits which can be modified in
@@ -315,6 +313,46 @@ static enum spectre_v2_mitigation_cmd __
return cmd;
}

+static bool stibp_needed(void)
+{
+ if (spectre_v2_enabled == SPECTRE_V2_NONE)
+ return false;
+
+ if (!boot_cpu_has(X86_FEATURE_STIBP))
+ return false;
+
+ return true;
+}
+
+static void update_stibp_msr(void *info)
+{
+ wrmsrl(MSR_IA32_SPEC_CTRL, x86_spec_ctrl_base);
+}
+
+void arch_smt_update(void)
+{
+ u64 mask;
+
+ if (!stibp_needed())
+ return;
+
+ mutex_lock(&spec_ctrl_mutex);
+ mask = x86_spec_ctrl_base;
+ if (IS_ENABLED(CONFIG_SMP))
+ mask |= SPEC_CTRL_STIBP;
+ else
+ mask &= ~SPEC_CTRL_STIBP;
+
+ if (mask != x86_spec_ctrl_base) {
+ pr_info("Spectre v2 cross-process SMT mitigation: %s STIBP\n",
+ IS_ENABLED(CONFIG_SMP) ?
+ "Enabling" : "Disabling");
+ x86_spec_ctrl_base = mask;
+ on_each_cpu(update_stibp_msr, NULL, 1);
+ }
+ mutex_unlock(&spec_ctrl_mutex);
+}
+
static void __init spectre_v2_select_mitigation(void)
{
enum spectre_v2_mitigation_cmd cmd = spectre_v2_parse_cmdline();
@@ -414,6 +452,9 @@ specv2_set_mode:
setup_force_cpu_cap(X86_FEATURE_USE_IBRS_FW);
pr_info("Enabling Restricted Speculation for firmware calls\n");
}
+
+ /* Enable STIBP if appropriate */
+ arch_smt_update();
}

#undef pr_fmt
@@ -722,6 +763,8 @@ static void __init l1tf_select_mitigatio
static ssize_t cpu_show_common(struct device *dev, struct device_attribute *attr,
char *buf, unsigned int bug)
{
+ int ret;
+
if (!boot_cpu_has_bug(bug))
return sprintf(buf, "Not affected\n");

@@ -736,10 +779,12 @@ static ssize_t cpu_show_common(struct de
return sprintf(buf, "Mitigation: __user pointer sanitization\n");

case X86_BUG_SPECTRE_V2:
- return sprintf(buf, "%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
+ ret = sprintf(buf, "%s%s%s%s%s\n", spectre_v2_strings[spectre_v2_enabled],
boot_cpu_has(X86_FEATURE_USE_IBPB) ? ", IBPB" : "",
boot_cpu_has(X86_FEATURE_USE_IBRS_FW) ? ", IBRS_FW" : "",
+ (x86_spec_ctrl_base & SPEC_CTRL_STIBP) ? ", STIBP" : "",
spectre_v2_module_string());
+ return ret;

case X86_BUG_SPEC_STORE_BYPASS:
return sprintf(buf, "%s\n", ssb_strings[ssb_mode]);