[PATCH 2/4] KVM: x86: Register emergency virt callback in common code, via kvm_x86_ops

From: Sean Christopherson
Date: Thu Apr 25 2024 - 19:40:37 EST


Move the registration of KVM's "disable virtualization in an emergency"
callbacks into common KVM code, using a pointer in kvm_x86_ops to provide
each vendor's callback.

There is no reason to force vendor code to do the registration, and the
callback should be installed when kvm_x86_ops themselves are ready, i.e.
when it's possible for .hardware_enabled() to be invoked. E.g. TDX needs
to do VMXON during module initialization, so registering the callback as
part of early setup means one less thing to mess up.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/include/asm/kvm_host.h | 3 +++
arch/x86/kvm/svm/svm.c | 5 +----
arch/x86/kvm/vmx/main.c | 2 ++
arch/x86/kvm/vmx/vmx.c | 6 +-----
arch/x86/kvm/vmx/x86_ops.h | 1 +
arch/x86/kvm/x86.c | 5 +++++
6 files changed, 13 insertions(+), 9 deletions(-)

diff --git a/arch/x86/include/asm/kvm_host.h b/arch/x86/include/asm/kvm_host.h
index 1d13e3cd1dc5..d64a51da150c 100644
--- a/arch/x86/include/asm/kvm_host.h
+++ b/arch/x86/include/asm/kvm_host.h
@@ -36,6 +36,7 @@
#include <asm/kvm_page_track.h>
#include <asm/kvm_vcpu_regs.h>
#include <asm/hyperv-tlfs.h>
+#include <asm/reboot.h>

#define __KVM_HAVE_ARCH_VCPU_DEBUGFS

@@ -1606,6 +1607,8 @@ struct kvm_x86_ops {

int (*hardware_enable)(void);
void (*hardware_disable)(void);
+ cpu_emergency_virt_cb *emergency_disable;
+
void (*hardware_unsetup)(void);
bool (*has_emulated_msr)(struct kvm *kvm, u32 index);
void (*vcpu_after_set_cpuid)(struct kvm_vcpu *vcpu);
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0f3b59da0d4a..3b54243d0c22 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -4919,6 +4919,7 @@ static void *svm_alloc_apic_backing_page(struct kvm_vcpu *vcpu)
static struct kvm_x86_ops svm_x86_ops __initdata = {
.name = KBUILD_MODNAME,

+ .emergency_disable = svm_emergency_disable,
.check_processor_compatibility = svm_check_processor_compat,

.hardware_unsetup = svm_hardware_unsetup,
@@ -5352,8 +5353,6 @@ static struct kvm_x86_init_ops svm_init_ops __initdata = {
static void __svm_exit(void)
{
kvm_x86_vendor_exit();
-
- cpu_emergency_unregister_virt_callback(svm_emergency_disable);
}

static int __init svm_init(void)
@@ -5369,8 +5368,6 @@ static int __init svm_init(void)
if (r)
return r;

- cpu_emergency_register_virt_callback(svm_emergency_disable);
-
/*
* Common KVM initialization _must_ come last, after this, /dev/kvm is
* exposed to userspace!
diff --git a/arch/x86/kvm/vmx/main.c b/arch/x86/kvm/vmx/main.c
index 7c546ad3e4c9..3f423afc263b 100644
--- a/arch/x86/kvm/vmx/main.c
+++ b/arch/x86/kvm/vmx/main.c
@@ -24,6 +24,8 @@ struct kvm_x86_ops vt_x86_ops __initdata = {

.hardware_enable = vmx_hardware_enable,
.hardware_disable = vmx_hardware_disable,
+ .emergency_disable = vmx_emergency_disable,
+
.has_emulated_msr = vmx_has_emulated_msr,

.vm_size = sizeof(struct kvm_vmx),
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index f10b5f8f364b..19bc62b60fac 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -753,7 +753,7 @@ static int kvm_cpu_vmxoff(void)
return -EIO;
}

-static void vmx_emergency_disable(void)
+void vmx_emergency_disable(void)
{
int cpu = raw_smp_processor_id();
struct loaded_vmcs *v;
@@ -8562,8 +8562,6 @@ static void __vmx_exit(void)
{
allow_smaller_maxphyaddr = false;

- cpu_emergency_unregister_virt_callback(vmx_emergency_disable);
-
vmx_cleanup_l1d_flush();
}

@@ -8610,8 +8608,6 @@ static int __init vmx_init(void)
pi_init_cpu(cpu);
}

- cpu_emergency_register_virt_callback(vmx_emergency_disable);
-
vmx_check_vmcs12_offsets();

/*
diff --git a/arch/x86/kvm/vmx/x86_ops.h b/arch/x86/kvm/vmx/x86_ops.h
index 502704596c83..afddfe3747dd 100644
--- a/arch/x86/kvm/vmx/x86_ops.h
+++ b/arch/x86/kvm/vmx/x86_ops.h
@@ -15,6 +15,7 @@ void vmx_hardware_unsetup(void);
int vmx_check_processor_compat(void);
int vmx_hardware_enable(void);
void vmx_hardware_disable(void);
+void vmx_emergency_disable(void);
int vmx_vm_init(struct kvm *kvm);
void vmx_vm_destroy(struct kvm *kvm);
int vmx_vcpu_precreate(struct kvm *kvm);
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index e9ef1fa4b90b..12e88aa2cca2 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -9797,6 +9797,8 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)

kvm_ops_update(ops);

+ cpu_emergency_register_virt_callback(kvm_x86_ops.emergency_disable);
+
for_each_online_cpu(cpu) {
smp_call_function_single(cpu, kvm_x86_check_cpu_compat, &r, 1);
if (r < 0)
@@ -9847,6 +9849,7 @@ int kvm_x86_vendor_init(struct kvm_x86_init_ops *ops)
return 0;

out_unwind_ops:
+ cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable);
kvm_x86_ops.hardware_enable = NULL;
static_call(kvm_x86_hardware_unsetup)();
out_mmu_exit:
@@ -9887,6 +9890,8 @@ void kvm_x86_vendor_exit(void)
static_key_deferred_flush(&kvm_xen_enabled);
WARN_ON(static_branch_unlikely(&kvm_xen_enabled.key));
#endif
+ cpu_emergency_unregister_virt_callback(kvm_x86_ops.emergency_disable);
+
mutex_lock(&vendor_module_lock);
kvm_x86_ops.hardware_enable = NULL;
mutex_unlock(&vendor_module_lock);
--
2.44.0.769.g3c40516874-goog