[PATCH v3 07/16] KVM: SVM: Move core EFER.SVME enablement to kernel
From: Sean Christopherson
Date: Fri Feb 13 2026 - 20:30:29 EST
Move the innermost EFER.SVME logic out of KVM and into to core x86 to land
the SVM support alongside VMX support. This will allow providing a more
unified API from the kernel to KVM, and will allow moving the bulk of the
emergency disabling insanity out of KVM without having a weird split
between kernel and KVM for SVM vs. VMX.
No functional change intended.
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/include/asm/virt.h | 6 +++++
arch/x86/kvm/svm/svm.c | 33 +++++------------------
arch/x86/virt/hw.c | 53 +++++++++++++++++++++++++++++++++++++
3 files changed, 65 insertions(+), 27 deletions(-)
diff --git a/arch/x86/include/asm/virt.h b/arch/x86/include/asm/virt.h
index cca0210a5c16..9a0753eaa20c 100644
--- a/arch/x86/include/asm/virt.h
+++ b/arch/x86/include/asm/virt.h
@@ -15,6 +15,12 @@ int x86_vmx_disable_virtualization_cpu(void);
void x86_vmx_emergency_disable_virtualization_cpu(void);
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void);
+int x86_svm_disable_virtualization_cpu(void);
+void x86_svm_emergency_disable_virtualization_cpu(void);
+#endif
+
#else
static __always_inline void x86_virt_init(void) {}
#endif
diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index 0ae66c770ebc..5f033bf3ba83 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -478,27 +478,9 @@ static __always_inline struct sev_es_save_area *sev_es_host_save_area(struct svm
return &sd->save_area->host_sev_es_save;
}
-static inline void kvm_cpu_svm_disable(void)
-{
- uint64_t efer;
-
- wrmsrq(MSR_VM_HSAVE_PA, 0);
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME) {
- /*
- * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
- * NMI aren't blocked.
- */
- stgi();
- wrmsrq(MSR_EFER, efer & ~EFER_SVME);
- }
-}
-
static void svm_emergency_disable_virtualization_cpu(void)
{
- virt_rebooting = true;
-
- kvm_cpu_svm_disable();
+ wrmsrq(MSR_VM_HSAVE_PA, 0);
}
static void svm_disable_virtualization_cpu(void)
@@ -507,7 +489,7 @@ static void svm_disable_virtualization_cpu(void)
if (tsc_scaling)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
- kvm_cpu_svm_disable();
+ x86_svm_disable_virtualization_cpu();
amd_pmu_disable_virt();
}
@@ -516,12 +498,12 @@ static int svm_enable_virtualization_cpu(void)
{
struct svm_cpu_data *sd;
- uint64_t efer;
int me = raw_smp_processor_id();
+ int r;
- rdmsrq(MSR_EFER, efer);
- if (efer & EFER_SVME)
- return -EBUSY;
+ r = x86_svm_enable_virtualization_cpu();
+ if (r)
+ return r;
sd = per_cpu_ptr(&svm_data, me);
sd->asid_generation = 1;
@@ -529,8 +511,6 @@ static int svm_enable_virtualization_cpu(void)
sd->next_asid = sd->max_asid + 1;
sd->min_asid = max_sev_asid + 1;
- wrmsrq(MSR_EFER, efer | EFER_SVME);
-
wrmsrq(MSR_VM_HSAVE_PA, sd->save_area_pa);
if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
@@ -541,7 +521,6 @@ static int svm_enable_virtualization_cpu(void)
__svm_write_tsc_multiplier(SVM_TSC_RATIO_DEFAULT);
}
-
/*
* Get OSVW bits.
*
diff --git a/arch/x86/virt/hw.c b/arch/x86/virt/hw.c
index dc426c2bc24a..014e9dfab805 100644
--- a/arch/x86/virt/hw.c
+++ b/arch/x86/virt/hw.c
@@ -163,6 +163,59 @@ static __init int x86_vmx_init(void)
static __init int x86_vmx_init(void) { return -EOPNOTSUPP; }
#endif
+#if IS_ENABLED(CONFIG_KVM_AMD)
+int x86_svm_enable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ if (!cpu_feature_enabled(X86_FEATURE_SVM))
+ return -EOPNOTSUPP;
+
+ rdmsrq(MSR_EFER, efer);
+ if (efer & EFER_SVME)
+ return -EBUSY;
+
+ wrmsrq(MSR_EFER, efer | EFER_SVME);
+ return 0;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_enable_virtualization_cpu);
+
+int x86_svm_disable_virtualization_cpu(void)
+{
+ int r = -EIO;
+ u64 efer;
+
+ /*
+ * Force GIF=1 prior to disabling SVM, e.g. to ensure INIT and
+ * NMI aren't blocked.
+ */
+ asm goto("1: stgi\n\t"
+ _ASM_EXTABLE(1b, %l[fault])
+ ::: "memory" : fault);
+ r = 0;
+
+fault:
+ rdmsrq(MSR_EFER, efer);
+ wrmsrq(MSR_EFER, efer & ~EFER_SVME);
+ return r;
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_disable_virtualization_cpu);
+
+void x86_svm_emergency_disable_virtualization_cpu(void)
+{
+ u64 efer;
+
+ virt_rebooting = true;
+
+ rdmsrq(MSR_EFER, efer);
+ if (!(efer & EFER_SVME))
+ return;
+
+ x86_svm_disable_virtualization_cpu();
+}
+EXPORT_SYMBOL_FOR_KVM(x86_svm_emergency_disable_virtualization_cpu);
+#endif
+
void __init x86_virt_init(void)
{
x86_vmx_init();
--
2.53.0.310.g728cabbaf7-goog