[PATCH] x86: kvm: Initialize static calls before SMP boot

From: David Sauerwein

Date: Wed Mar 04 2026 - 11:55:37 EST


Updating static calls is expensive on wide SMP systems because all
online CPUs need to act in a coordinated manner for code patching to
work as expected.

Static are initialized only after SMP boot, where the code patching
overhead is noticeable. Pre-initialize the majority of these static
calls before SMP boot to get rid of that overhead.

The pre-initialization most likely already sets the correct value. To
still handle any potential differences, make the static call
initialization re-entrant so that the differences are corrected during
post-SMP-boot initialization. Values that are already set correctly are
skipped.

This patch was tested on a 6th Gen Intel Xeon Platinum 8375C CPU with
128 SMT cores. When comparing before and after we see that 85% less time
is spent in kvm_ops_update (29.5ms -> 4.2ms).

Co-developed-by: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Signed-off-by: Jan H. Schönherr <jschoenh@xxxxxxxxx>
Signed-off-by: David Sauerwein <dssauerw@xxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 1 +
arch/x86/kvm/svm/svm.c | 11 +++++++++++
arch/x86/kvm/vmx/vmx.c | 18 ++++++++++++++++++
arch/x86/kvm/x86.c | 19 ++++++++++++++++++-
kernel/events/core.c | 6 +++++-
5 files changed, 53 insertions(+), 2 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 48598d017d6f..18072d7bed36 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -2000,6 +2000,7 @@ extern struct kvm_x86_ops kvm_x86_ops;
#define KVM_X86_OP_OPTIONAL_RET0 KVM_X86_OP
#include <asm/kvm-x86-ops.h>

+int kvm_x86_vendor_init_early(struct kvm_x86_init_ops *ops);
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops);
void kvm_x86_vendor_exit(void);

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 9d29b2e7e855..de7aa4d1f0e6 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -5452,6 +5452,17 @@ static void __svm_exit(void)
kvm_x86_vendor_exit();
}

+#ifndef MODULE
+static int __init svm_ops_early_init(void)
+{
+ if (!kvm_is_svm_supported())
+ return -EOPNOTSUPP;
+
+ return kvm_x86_vendor_init_early(&svm_init_ops);
+}
+early_initcall(svm_ops_early_init);
+#endif
+
static int __init svm_init(void)
{
int r;
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 91b6f2f3edc2..569545854e16 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -8617,6 +8617,8 @@ __init int vmx_hardware_setup(void)
vt_x86_ops.set_hv_timer = NULL;
vt_x86_ops.cancel_hv_timer = NULL;
}
+ WARN_ON(enable_preemption_timer && vt_x86_ops.set_hv_timer == NULL);
+ WARN_ON(enable_preemption_timer && vt_x86_ops.cancel_hv_timer == NULL);

kvm_caps.supported_mce_cap |= MCG_LMCE_P;
kvm_caps.supported_mce_cap |= MCG_CMCI_P;
@@ -8698,6 +8700,22 @@ void vmx_exit(void)
kvm_x86_vendor_exit();
}

+#ifndef MODULE
+static int __init vmx_ops_early_init(void)
+{
+ if (!kvm_is_vmx_supported())
+ return -EOPNOTSUPP;
+
+ if (!enable_preemption_timer) {
+ vt_x86_ops.set_hv_timer = NULL;
+ vt_x86_ops.cancel_hv_timer = NULL;
+ }
+
+ return kvm_x86_vendor_init_early(&vt_init_ops);
+}
+early_initcall(vmx_ops_early_init);
+#endif
+
int __init vmx_init(void)
{
int r, cpu;
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index c9c2aa6f4705..dbd00a26538f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9971,6 +9971,22 @@ static void kvm_x86_check_cpu_compat(void *ret)
*(int *)ret = kvm_x86_check_processor_compatibility();
}

+int kvm_x86_vendor_init_early(struct kvm_x86_init_ops *ops)
+{
+ guard(mutex)(&vendor_module_lock);
+
+ if (kvm_x86_ops.enable_virtualization_cpu) {
+ pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
+ return -EEXIST;
+ }
+
+ kvm_ops_update(ops);
+ kvm_register_perf_callbacks(ops->handle_intel_pt_intr);
+
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM_INTERNAL(kvm_x86_vendor_init_early);
+
int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
{
u64 host_pat;
@@ -9978,7 +9994,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)

guard(mutex)(&vendor_module_lock);

- if (kvm_x86_ops.enable_virtualization_cpu) {
+ if (kvm_x86_ops.enable_virtualization_cpu &&
+ kvm_x86_ops.enable_virtualization_cpu != ops->runtime_ops->enable_virtualization_cpu) {
pr_err("already loaded vendor module '%s'\n", kvm_x86_ops.name);
return -EEXIST;
}
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 2c35acc2722b..731975791895 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -7378,7 +7378,8 @@ DEFINE_STATIC_CALL_RET0(__perf_guest_handle_intel_pt_intr, *perf_guest_cbs->hand

void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
{
- if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs)))
+ if (WARN_ON_ONCE(rcu_access_pointer(perf_guest_cbs) &&
+ rcu_access_pointer(perf_guest_cbs) != cbs))
return;

rcu_assign_pointer(perf_guest_cbs, cbs);
@@ -7389,6 +7390,9 @@ void perf_register_guest_info_callbacks(struct perf_guest_info_callbacks *cbs)
if (cbs->handle_intel_pt_intr)
static_call_update(__perf_guest_handle_intel_pt_intr,
cbs->handle_intel_pt_intr);
+ else
+ static_call_update(__perf_guest_handle_intel_pt_intr,
+ (void *)__static_call_return0);
}
EXPORT_SYMBOL_GPL(perf_register_guest_info_callbacks);

--
2.47.3




Amazon Web Services Development Center Germany GmbH
Tamara-Danz-Str. 13
10243 Berlin
Geschaeftsfuehrung: Christof Hellmis, Andreas Stieger
Eingetragen am Amtsgericht Charlottenburg unter HRB 257764 B
Sitz: Berlin
Ust-ID: DE 365 538 597