Re: [PATCH v9 07/10] x86/vmscape: Use static_call() for predictor flush
From: Sean Christopherson
Date: Fri Apr 03 2026 - 10:53:34 EST
On Thu, Apr 02, 2026, Pawan Gupta wrote:
> Adding more mitigation options at exit-to-userspace for VMSCAPE would
> usually require a series of checks to decide which mitigation to use. In
> this case, the mitigation is done by calling a function, which is decided
> at boot. So, adding more feature flags and multiple checks can be avoided
> by using static_call() to the mitigating function.
>
> Replace the flag-based mitigation selector with a static_call(). This also
> frees the existing X86_FEATURE_IBPB_EXIT_TO_USER.
...
> @@ -3133,8 +3139,14 @@ static void __init vmscape_update_mitigation(void)
> static void __init vmscape_apply_mitigation(void)
> {
> if (vmscape_mitigation == VMSCAPE_MITIGATION_IBPB_EXIT_TO_USER)
> - setup_force_cpu_cap(X86_FEATURE_IBPB_EXIT_TO_USER);
> + static_call_update(vmscape_predictor_flush, write_ibpb);
> +}
> +
> +bool vmscape_mitigation_enabled(void)
> +{
> + return !!static_call_query(vmscape_predictor_flush);
> }
> +EXPORT_SYMBOL_FOR_KVM(vmscape_mitigation_enabled);
>
> #undef pr_fmt
> #define pr_fmt(fmt) fmt
> diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
> index 45d7cfedc507..e204482e64f3 100644
> --- a/arch/x86/kvm/x86.c
> +++ b/arch/x86/kvm/x86.c
> @@ -11463,7 +11463,7 @@ static int vcpu_enter_guest(struct kvm_vcpu *vcpu)
> * set for the CPU that actually ran the guest, and not the CPU that it
> * may migrate to.
> */
> - if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
> + if (vmscape_mitigation_enabled())
This is pretty lame. It turns a statically patched MOV
11548 if (cpu_feature_enabled(X86_FEATURE_IBPB_EXIT_TO_USER))
11549 this_cpu_write(x86_ibpb_exit_to_user, true);
0x000000000003c57a <+858>: movb $0x1,%gs:0x0(%rip) # 0x3c582 <vcpu_enter_guest+866>
into a function call and two sets of conditional branches. And with mitigations
enabled, that function call may trigger the wonderful unret insanity
11548 if (vmscape_mitigation_enabled())
0x000000000003c575 <+853>: call 0x3c57a <vcpu_enter_guest+858>
0x000000000003c57a <+858>: test %al,%al
0x000000000003c57c <+860>: je 0x3c586 <vcpu_enter_guest+870>
11549 this_cpu_write(x86_predictor_flush_exit_to_user, true);
0x000000000003c57e <+862>: movb $0x1,%gs:0x0(%rip) # 0x3c586 <vcpu_enter_guest+870>
3166 {
0xffffffff81285320 <+0>: endbr64
0xffffffff81285324 <+4>: call 0xffffffff812aa5a0 <__fentry__>
3167 return !!static_call_query(vmscape_predictor_flush);
0xffffffff81285329 <+9>: mov 0x13a4f30(%rip),%rax # 0xffffffff8262a260 <__SCK__vmscape_predictor_flush>
0xffffffff81285330 <+16>: test %rax,%rax
0xffffffff81285333 <+19>: setne %al
3168 }
0xffffffff81285336 <+22>: jmp 0xffffffff81db1e30 <__x86_return_thunk>
While this isn't KVM's super hot inner run loop, it's still very much a hot path.
Even more annoying, KVM will eat the function call on kernels with CPU_MITIGATIONS=n.
I'd like to at least do something like the below to make the common case of
multiple guest entry/exits more or less free, and to avoid the CALL+(UN)RET
overhead, but trying to include linux/static_call.h in processor.h (or any other
core x86 header) creates a cyclical dependency :-/
diff --git a/arch/x86/include/asm/processor.h b/arch/x86/include/asm/processor.h
index 20ab4dd588c6..0dc0680a80f8 100644
--- a/arch/x86/include/asm/processor.h
+++ b/arch/x86/include/asm/processor.h
@@ -36,6 +36,7 @@ struct vm86;
#include <linux/err.h>
#include <linux/irqflags.h>
#include <linux/mem_encrypt.h>
+#include <linux/static_call.h>
/*
* We handle most unaligned accesses in hardware. On the other hand
@@ -753,7 +754,11 @@ enum mds_mitigations {
};
extern bool gds_ucode_mitigated(void);
-extern bool vmscape_mitigation_enabled(void);
+
+static inline bool vmscape_mitigation_enabled(void)
+{
+ return !!static_call_query(vmscape_predictor_flush);
+}
/*
* Make previous memory operations globally visible before
diff --git a/arch/x86/kernel/cpu/bugs.c b/arch/x86/kernel/cpu/bugs.c
index 366ebe1e1fb9..02bf626f0773 100644
--- a/arch/x86/kernel/cpu/bugs.c
+++ b/arch/x86/kernel/cpu/bugs.c
@@ -148,6 +148,7 @@ DEFINE_STATIC_KEY_FALSE(switch_mm_cond_l1d_flush);
* sequence. This defaults to no mitigation.
*/
DEFINE_STATIC_CALL_NULL(vmscape_predictor_flush, write_ibpb);
+EXPORT_STATIC_CALL_GPL(vmscape_predictor_flush);
#undef pr_fmt
#define pr_fmt(fmt) "mitigations: " fmt
@@ -3162,12 +3163,6 @@ static void __init vmscape_apply_mitigation(void)
static_call_update(vmscape_predictor_flush, clear_bhb_loop_nofence);
}
-bool vmscape_mitigation_enabled(void)
-{
- return !!static_call_query(vmscape_predictor_flush);
-}
-EXPORT_SYMBOL_FOR_KVM(vmscape_mitigation_enabled);
-
#undef pr_fmt
#define pr_fmt(fmt) fmt
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index a1fbbab08291..117c60d00758 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -11545,7 +11545,9 @@ static noinline int vcpu_enter_guest(struct kvm_vcpu *vcpu)
* set for the CPU that actually ran the guest, and not the CPU that it
* may migrate to.
*/
- if (vmscape_mitigation_enabled())
+ if (IS_ENABLED(CONFIG_CPU_MITIGATIONS) &&
+ !this_cpu_read(x86_predictor_flush_exit_to_user) &&
+ vmscape_mitigation_enabled())
this_cpu_write(x86_predictor_flush_exit_to_user, true);
/*