[RFC][PATCH 06/17] x86/cpu: Add SRSO untrain to retbleed=
From: Peter Zijlstra
Date: Wed Aug 09 2023 - 03:27:21 EST
Since it is now readily apparent that the two SRSO
untrain_ret+return_thunk variants are exactly the same mechanism as
the existing (retbleed) zen untrain_ret+return_thunk, add them to the
existing retbleed options.
This avoids all confusion as to which of the three -- if any -- ought
to be active, there's a single point of control and no funny
interactions.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
arch/x86/kernel/cpu/bugs.c | 87 +++++++++++++++++++++++++++++++++++++++------
1 file changed, 76 insertions(+), 11 deletions(-)
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -748,6 +748,8 @@ enum spectre_v2_mitigation spectre_v2_en
enum retbleed_mitigation {
RETBLEED_MITIGATION_NONE,
RETBLEED_MITIGATION_UNRET,
+ RETBLEED_MITIGATION_UNRET_SRSO,
+ RETBLEED_MITIGATION_UNRET_SRSO_ALIAS,
RETBLEED_MITIGATION_IBPB,
RETBLEED_MITIGATION_IBRS,
RETBLEED_MITIGATION_EIBRS,
@@ -758,17 +760,21 @@ enum retbleed_mitigation_cmd {
RETBLEED_CMD_OFF,
RETBLEED_CMD_AUTO,
RETBLEED_CMD_UNRET,
+ RETBLEED_CMD_UNRET_SRSO,
+ RETBLEED_CMD_UNRET_SRSO_ALIAS,
RETBLEED_CMD_IBPB,
RETBLEED_CMD_STUFF,
};
static const char * const retbleed_strings[] = {
- [RETBLEED_MITIGATION_NONE] = "Vulnerable",
- [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
- [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
- [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
- [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
- [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
+ [RETBLEED_MITIGATION_NONE] = "Vulnerable",
+ [RETBLEED_MITIGATION_UNRET] = "Mitigation: untrained return thunk",
+ [RETBLEED_MITIGATION_UNRET_SRSO] = "Mitigation: srso untrained return thunk",
+ [RETBLEED_MITIGATION_UNRET_SRSO_ALIAS] = "Mitigation: srso alias untrained return thunk",
+ [RETBLEED_MITIGATION_IBPB] = "Mitigation: IBPB",
+ [RETBLEED_MITIGATION_IBRS] = "Mitigation: IBRS",
+ [RETBLEED_MITIGATION_EIBRS] = "Mitigation: Enhanced IBRS",
+ [RETBLEED_MITIGATION_STUFF] = "Mitigation: Stuffing",
};
static enum retbleed_mitigation retbleed_mitigation __ro_after_init =
@@ -796,6 +802,10 @@ static int __init retbleed_parse_cmdline
retbleed_cmd = RETBLEED_CMD_AUTO;
} else if (!strcmp(str, "unret")) {
retbleed_cmd = RETBLEED_CMD_UNRET;
+ } else if (!strcmp(str, "srso")) {
+ retbleed_cmd = RETBLEED_CMD_UNRET_SRSO;
+ } else if (!strcmp(str, "srso_alias")) {
+ retbleed_cmd = RETBLEED_CMD_UNRET_SRSO_ALIAS;
} else if (!strcmp(str, "ibpb")) {
retbleed_cmd = RETBLEED_CMD_IBPB;
} else if (!strcmp(str, "stuff")) {
@@ -817,21 +827,54 @@ early_param("retbleed", retbleed_parse_c
#define RETBLEED_UNTRAIN_MSG "WARNING: BTB untrained return thunk mitigation is only effective on AMD/Hygon!\n"
#define RETBLEED_INTEL_MSG "WARNING: Spectre v2 mitigation leaves CPU vulnerable to RETBleed attacks, data leaks possible!\n"
+#define RETBLEED_SRSO_NOTICE "WARNING: See https://kernel.org/doc/html/latest/admin-guide/hw-vuln/srso.html for mitigation options."
static void __init retbleed_select_mitigation(void)
{
bool mitigate_smt = false;
+ bool has_microcode = false;
- if (!boot_cpu_has_bug(X86_BUG_RETBLEED) || cpu_mitigations_off())
+ if ((!boot_cpu_has_bug(X86_BUG_RETBLEED) && !boot_cpu_has_bug(X86_BUG_SRSO)) ||
+ cpu_mitigations_off())
return;
+ if (boot_cpu_has_bug(X86_BUG_SRSO)) {
+ has_microcode = boot_cpu_has(X86_FEATURE_IBPB_BRTYPE) || cpu_has_ibpb_brtype_microcode();
+ if (!has_microcode) {
+ pr_warn("IBPB-extending microcode not applied!\n");
+ pr_warn(RETBLEED_SRSO_NOTICE);
+ } else {
+ /*
+ * Enable the synthetic (even if in a real CPUID leaf)
+ * flags for guests.
+ */
+ setup_force_cpu_cap(X86_FEATURE_IBPB_BRTYPE);
+ setup_force_cpu_cap(X86_FEATURE_SBPB);
+
+ /*
+ * Zen1/2 with SMT off aren't vulnerable after the right
+ * IBPB microcode has been applied.
+ */
+ if ((boot_cpu_data.x86 < 0x19) &&
+ (cpu_smt_control == CPU_SMT_DISABLED))
+ setup_force_cpu_cap(X86_FEATURE_SRSO_NO);
+ }
+ }
+
switch (retbleed_cmd) {
case RETBLEED_CMD_OFF:
return;
case RETBLEED_CMD_UNRET:
+ case RETBLEED_CMD_UNRET_SRSO:
+ case RETBLEED_CMD_UNRET_SRSO_ALIAS:
if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET_SRSO)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
+ if (retbleed_cmd == RETBLEED_CMD_UNRET_SRSO_ALIAS)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
} else {
pr_err("WARNING: kernel not compiled with CPU_UNRET_ENTRY.\n");
goto do_cmd_auto;
@@ -843,6 +886,8 @@ static void __init retbleed_select_mitig
pr_err("WARNING: CPU does not support IBPB.\n");
goto do_cmd_auto;
} else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY)) {
+ if (boot_cpu_has_bug(X86_BUG_SRSO) && !has_microcode)
+ pr_err("IBPB-extending microcode not applied; SRSO NOT mitigated\n");
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
} else {
pr_err("WARNING: kernel not compiled with CPU_IBPB_ENTRY.\n");
@@ -870,8 +915,17 @@ static void __init retbleed_select_mitig
default:
if (boot_cpu_data.x86_vendor == X86_VENDOR_AMD ||
boot_cpu_data.x86_vendor == X86_VENDOR_HYGON) {
- if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY))
- retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+ if (IS_ENABLED(CONFIG_CPU_UNRET_ENTRY)) {
+ if (boot_cpu_has_bug(X86_BUG_RETBLEED))
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET;
+
+ if (boot_cpu_has_bug(X86_BUG_SRSO) && !boot_cpu_has(X86_FEATURE_SRSO_NO)) {
+ if (boot_cpu_data.x86 == 0x19)
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO_ALIAS;
+ else
+ retbleed_mitigation = RETBLEED_MITIGATION_UNRET_SRSO;
+ }
+ }
else if (IS_ENABLED(CONFIG_CPU_IBPB_ENTRY) && boot_cpu_has(X86_FEATURE_IBPB))
retbleed_mitigation = RETBLEED_MITIGATION_IBPB;
}
@@ -886,9 +940,20 @@ static void __init retbleed_select_mitig
}
switch (retbleed_mitigation) {
+ case RETBLEED_MITIGATION_UNRET_SRSO_ALIAS:
+ setup_force_cpu_cap(X86_FEATURE_SRSO_ALIAS);
+ x86_return_thunk = srso_alias_return_thunk;
+ goto do_rethunk;
+
+ case RETBLEED_MITIGATION_UNRET_SRSO:
+ setup_force_cpu_cap(X86_FEATURE_SRSO);
+ x86_return_thunk = srso_return_thunk;
+ goto do_rethunk;
+
case RETBLEED_MITIGATION_UNRET:
- setup_force_cpu_cap(X86_FEATURE_RETHUNK);
setup_force_cpu_cap(X86_FEATURE_UNRET);
+do_rethunk:
+ setup_force_cpu_cap(X86_FEATURE_RETHUNK);
if (boot_cpu_data.x86_vendor != X86_VENDOR_AMD &&
boot_cpu_data.x86_vendor != X86_VENDOR_HYGON)