Re: [PATCH] x86/srso: Correct the mitigation status when SMT is disabled
From: Josh Poimboeuf
Date: Tue Aug 15 2023 - 15:59:52 EST
On Tue, Aug 15, 2023 at 11:57:24AM +0200, Borislav Petkov wrote:
> --- a/arch/x86/kernel/cpu/bugs.c
> +++ b/arch/x86/kernel/cpu/bugs.c
> @@ -2417,8 +2417,7 @@ static void __init srso_select_mitigation(void)
> * Zen1/2 with SMT off aren't vulnerable after the right
> * IBPB microcode has been applied.
> */
> - if ((boot_cpu_data.x86 < 0x19) &&
> - (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
> + if (boot_cpu_data.x86 < 0x19 && !cpu_smt_possible()) {
> setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
> return;
> }
> @@ -2698,8 +2697,12 @@ static ssize_t retbleed_show_state(char *buf)
>
> static ssize_t srso_show_state(char *buf)
> {
> - if (boot_cpu_has(X86_FEATURE_SRSO_NO))
> - return sysfs_emit(buf, "Not affected\n");
> + if (boot_cpu_has(X86_FEATURE_SRSO_NO)) {
> + if (sched_smt_active())
> + return sysfs_emit(buf, "Not affected\n");
> + else
> + return sysfs_emit(buf, "Mitigation: SMT disabled\n");
> + }
AFAICT, nowhere in the spec does it say the SRSO_NO bit won't get set by
future (fixed) HW. In fact I'd expect it will, similar to other *_NO
flags.
Regardless, here SRSO_NO seems to mean two different things: "reported
safe by host (or HW)" and "not reported safe on Zen1/2 with SMT not
possible".
Also, in this code, the SRSO_NO+SMT combo doesn't seem logically
possible, as srso_show_state() only gets called if X86_BUG_SRSO is set,
which only happens if SRSO_NO is not set by the HW/host in the first
place. So here, if boot_cpu_has(X86_FEATURE_SRSO_NO), it means SRSO_NO
was manually set by srso_select_mitigation(), and SMT can't possibly be
enabled.
Instead of piggybacking on SRSO_NO, which is confusing, why not just add
a new mitigation type, like:
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 6c04aef4b63b..c925b98f5a15 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -2343,6 +2343,7 @@ early_param("l1tf", l1tf_cmdline);
enum srso_mitigation {
SRSO_MITIGATION_NONE,
SRSO_MITIGATION_MICROCODE,
+ SRSO_MITIGATION_SMT,
SRSO_MITIGATION_SAFE_RET,
SRSO_MITIGATION_IBPB,
SRSO_MITIGATION_IBPB_ON_VMEXIT,
@@ -2359,6 +2360,7 @@ enum srso_mitigation_cmd {
static const char * const srso_strings[] = {
[SRSO_MITIGATION_NONE] = "Vulnerable",
[SRSO_MITIGATION_MICROCODE] = "Mitigation: microcode",
+ [SRSO_MITIGATION_SMT] = "Mitigation: SMT disabled",
[SRSO_MITIGATION_SAFE_RET] = "Mitigation: safe RET",
[SRSO_MITIGATION_IBPB] = "Mitigation: IBPB",
[SRSO_MITIGATION_IBPB_ON_VMEXIT] = "Mitigation: IBPB on VMEXIT only"
@@ -2407,19 +2409,15 @@ static void __init srso_select_mitigation(void)
pr_warn("IBPB-extending microcode not applied!\n");
pr_warn(SRSO_NOTICE);
} else {
- /*
- * Enable the synthetic (even if in a real CPUID leaf)
- * flags for guests.
- */
+ /* Enable the synthetic flag, as HW doesn't set it. */
setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
/*
* Zen1/2 with SMT off aren't vulnerable after the right
* IBPB microcode has been applied.
*/
- if ((boot_cpu_data.x86 < 0x19) &&
- (!cpu_smt_possible() || (cpu_smt_control == CPU_SMT_DISABLED))) {
- setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+ if ((boot_cpu_data.x86 < 0x19) && !cpu_smt_possible()) {
+ srso_mitigation = SRSO_MITIGATION_SMT;
return;
}
}
@@ -2698,9 +2696,6 @@ static ssize_t retbleed_show_state(char *buf)
static ssize_t srso_show_state(char *buf)
{
- if (boot_cpu_has(X86_FEATURE_SRSO_NO))
- return sysfs_emit(buf, "Not affected\n");
-
return sysfs_emit(buf, "%s%s\n",
srso_strings[srso_mitigation],
(cpu_has_ibpb_brtype_microcode() ? "" : ", no microcode"));