[PATCH v2 15/17] KVM: VMX: Add a helper for NMI handling

From: Binbin Wu
Date: Mon Feb 10 2025 - 22:01:06 EST


From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>

Add a helper to handles NMI exit.

TDX handles the NMI exit the same as VMX case. Add a helper to share the
code with TDX, expose the helper in common.h.

No functional change intended.

Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
Co-developed-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
Signed-off-by: Binbin Wu <binbin.wu@xxxxxxxxxxxxxxx>
---
TDX interrupts v2:
- Renamed from "KVM: VMX: Move NMI/exception handler to common helper".
- Revert the unnecessary move, because in later patch TDX will reuse
vmx_handle_exit_irqoff() as handle_exit_irqoff() callback.
- Add the check for NMI to __vmx_handle_nmi() and rename it to vmx_handle_nmi().
- Update change log according to the change.

TDX interrupts v1:
- Update change log with suggestions from (Binbin)
- Move the NMI handling code to common header and add a helper
__vmx_handle_nmi() for it. (Binbin)
---
arch/x86/kvm/vmx/common.h | 2 ++
arch/x86/kvm/vmx/vmx.c | 24 +++++++++++++++---------
2 files changed, 17 insertions(+), 9 deletions(-)

diff --git a/arch/x86/kvm/vmx/common.h b/arch/x86/kvm/vmx/common.h
index f26f7b1acbca..67b16bd8a788 100644
--- a/arch/x86/kvm/vmx/common.h
+++ b/arch/x86/kvm/vmx/common.h
@@ -180,4 +180,6 @@ static inline void __vmx_deliver_posted_interrupt(struct kvm_vcpu *vcpu,
kvm_vcpu_trigger_posted_interrupt(vcpu, POSTED_INTR_VECTOR);
}

+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu);
+
#endif /* __KVM_X86_VMX_COMMON_H */
diff --git a/arch/x86/kvm/vmx/vmx.c b/arch/x86/kvm/vmx/vmx.c
index 012649688e46..228a7e51b6a5 100644
--- a/arch/x86/kvm/vmx/vmx.c
+++ b/arch/x86/kvm/vmx/vmx.c
@@ -7212,6 +7212,20 @@ static fastpath_t vmx_exit_handlers_fastpath(struct kvm_vcpu *vcpu,
}
}

+noinstr void vmx_handle_nmi(struct kvm_vcpu *vcpu)
+{
+ if ((u16)vmx_get_exit_reason(vcpu).basic != EXIT_REASON_EXCEPTION_NMI ||
+ !is_nmi(vmx_get_intr_info(vcpu)))
+ return;
+
+ kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
+ if (cpu_feature_enabled(X86_FEATURE_FRED))
+ fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
+ else
+ vmx_do_nmi_irqoff();
+ kvm_after_interrupt(vcpu);
+}
+
static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
unsigned int flags)
{
@@ -7255,15 +7269,7 @@ static noinstr void vmx_vcpu_enter_exit(struct kvm_vcpu *vcpu,
if (likely(!vmx_get_exit_reason(vcpu).failed_vmentry))
vmx->idt_vectoring_info = vmcs_read32(IDT_VECTORING_INFO_FIELD);

- if ((u16)vmx_get_exit_reason(vcpu).basic == EXIT_REASON_EXCEPTION_NMI &&
- is_nmi(vmx_get_intr_info(vcpu))) {
- kvm_before_interrupt(vcpu, KVM_HANDLING_NMI);
- if (cpu_feature_enabled(X86_FEATURE_FRED))
- fred_entry_from_kvm(EVENT_TYPE_NMI, NMI_VECTOR);
- else
- vmx_do_nmi_irqoff();
- kvm_after_interrupt(vcpu);
- }
+ vmx_handle_nmi(vcpu);

out:
guest_state_exit_irqoff();
--
2.46.0