[PATCH 4/6] x86/bugs: Use a static branch to guard IBPB on vCPU load
From: Yosry Ahmed
Date: Wed Feb 19 2025 - 17:09:54 EST
Instead of using X86_FEATURE_USE_IBPB to guard the IBPB execution in the
vCPU load path, introduce a static branch, similar to switch_mm_*_ibpb.
This makes it obvious in spectre_v2_user_select_mitigation() what
exactly is being toggled, instead of the unclear X86_FEATURE_USE_IBPB
(which will be shortly removed). It also provides more fine-grained
control, making it simpler to change/add paths that control the IBPB in
the vCPU load path without affecting other IBPBs.
Signed-off-by: Yosry Ahmed <yosry.ahmed@xxxxxxxxx>
---
arch/x86/include/asm/nospec-branch.h | 2 ++
arch/x86/kernel/cpu/bugs.c | 5 +++++
arch/x86/kvm/svm/svm.c | 2 +-
arch/x86/kvm/vmx/vmx.c | 2 +-
4 files changed, 9 insertions(+), 2 deletions(-)
diff --git a/arch/x86/include/asm/nospec-branch.h b/arch/x86/include/asm/nospec-branch.h
index 7cbb76a2434b9..a22836c5fb338 100644
--- a/arch/x86/include/asm/nospec-branch.h
+++ b/arch/x86/include/asm/nospec-branch.h
@@ -552,6 +552,8 @@ DECLARE_STATIC_KEY_FALSE(switch_to_cond_stibp);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
DECLARE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+DECLARE_STATIC_KEY_FALSE(vcpu_load_ibpb);
+
DECLARE_STATIC_KEY_FALSE(mds_idle_clear);
DECLARE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index a5d0998d76049..685a6f97fea8f 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -113,6 +113,10 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_ibpb);
/* Control unconditional IBPB in switch_mm() */
DEFINE_STATIC_KEY_FALSE(switch_mm_always_ibpb);
+/* Control IBPB on vCPU load */
+DEFINE_STATIC_KEY_FALSE(vcpu_load_ibpb);
+EXPORT_SYMBOL_GPL(vcpu_load_ibpb);
+
/* Control MDS CPU buffer clear before idling (halt, mwait) */
DEFINE_STATIC_KEY_FALSE(mds_idle_clear);
EXPORT_SYMBOL_GPL(mds_idle_clear);
@@ -1365,6 +1369,7 @@ spectre_v2_user_select_mitigation(void)
/* Initialize Indirect Branch Prediction Barrier */
if (boot_cpu_has(X86_FEATURE_IBPB)) {
setup_force_cpu_cap(X86_FEATURE_USE_IBPB);
+ static_branch_enable(&vcpu_load_ibpb);
spectre_v2_user_ibpb = mode;
switch (cmd) {
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a4ba5b4e3d682..043d56d276ad6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -1560,7 +1560,7 @@ static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
sd->current_vmcb = svm->vmcb;
if (!cpu_feature_enabled(X86_FEATURE_IBPB_ON_VMEXIT) &&
- cpu_feature_enabled(X86_FEATURE_USE_IBPB))
+ static_branch_likely(&vcpu_load_ibpb))
indirect_branch_prediction_barrier();
}
if (kvm_vcpu_apicv_active(vcpu))
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 729a8ee24037b..7f950d0b50757 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -1478,7 +1478,7 @@ void vmx_vcpu_load_vmcs(struct kvm_vcpu *vcpu, int cpu,
* may switch the active VMCS multiple times).
*/
if (!buddy || WARN_ON_ONCE(buddy->vmcs != prev))
- if (cpu_feature_enabled(X86_FEATURE_USE_IBPB))
+ if (static_branch_likely(&vcpu_load_ibpb))
indirect_branch_prediction_barrier();
}
--
2.48.1.601.g30ceb7b040-goog