[PATCH v2 4/6] KVM: SVM: Inject NMIs when restricted injection is active

From: Melody Wang
Date: Tue Sep 10 2024 - 02:05:56 EST


When restricted injection is active, only #HV exceptions can be injected into
the SEV-SNP guest.

Detect that restricted injection feature is active for the guest, and then
follow the #HV doorbell communication from the GHCB specification to inject
NMIs.

Co-developed-by: Thomas Lendacky <thomas.lendacky@xxxxxxx>
Signed-off-by: Thomas Lendacky <thomas.lendacky@xxxxxxx>
Signed-off-by: Melody Wang <huibo.wang@xxxxxxx>
---
arch/x86/kvm/svm/sev.c | 19 ++++++++++++++++---
arch/x86/kvm/svm/svm.c | 8 ++++++++
arch/x86/kvm/svm/svm.h | 1 +
3 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f7623fa64307..9102f7e39c52 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -5070,7 +5070,10 @@ static bool __sev_snp_inject(enum inject_type type, struct kvm_vcpu *vcpu)
if (!hvdb)
return false;

- hvdb->events.vector = vcpu->arch.interrupt.nr;
+ if (type == INJECT_NMI)
+ hvdb->events.nmi = 1;
+ else
+ hvdb->events.vector = vcpu->arch.interrupt.nr;

prepare_hv_injection(svm, hvdb);

@@ -5148,10 +5151,17 @@ void sev_snp_cancel_injection(struct kvm_vcpu *vcpu)
/* Copy info back into event_inj field (replaces #HV) */
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID;

+ /*
+ * KVM only injects a single event each time (prepare_hv_injection),
+ * so when events.nmi is true, the vector will be zero
+ */
if (hvdb->events.vector)
svm->vmcb->control.event_inj |= hvdb->events.vector |
SVM_EVTINJ_TYPE_INTR;

+ if (hvdb->events.nmi)
+ svm->vmcb->control.event_inj |= SVM_EVTINJ_TYPE_NMI;
+
hvdb->events.pending_events = 0;

out:
@@ -5169,8 +5179,11 @@ bool sev_snp_blocked(enum inject_type type, struct kvm_vcpu *vcpu)
if (!hvdb)
return true;

- /* Indicate interrupts blocked based on guest acknowledgment */
- blocked = !!hvdb->events.vector;
+ /* Indicate NMIs and interrupts blocked based on guest acknowledgment */
+ if (type == INJECT_NMI)
+ blocked = hvdb->events.nmi;
+ else
+ blocked = !!hvdb->events.vector;

unmap_hvdb(vcpu, &hvdb_map);

diff --git a/arch/x86/kvm/svm/svm.c b/arch/x86/kvm/svm/svm.c
index a48388d99c97..d9c572344f0c 100644
--- a/arch/x86/kvm/svm/svm.c
+++ b/arch/x86/kvm/svm/svm.c
@@ -3602,6 +3602,9 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
{
struct vcpu_svm *svm = to_svm(vcpu);

+ if (sev_snp_inject(INJECT_NMI, vcpu))
+ goto status;
+
svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;

if (svm->nmi_l1_to_l2)
@@ -3616,6 +3619,8 @@ static void svm_inject_nmi(struct kvm_vcpu *vcpu)
svm->nmi_masked = true;
svm_set_iret_intercept(svm);
}
+
+status:
++vcpu->stat.nmi_injections;
}

@@ -3786,6 +3791,9 @@ bool svm_nmi_blocked(struct kvm_vcpu *vcpu)
if (!gif_set(svm))
return true;

+ if (sev_snp_is_rinj_active(vcpu))
+ return sev_snp_blocked(INJECT_NMI, vcpu);
+
if (is_guest_mode(vcpu) && nested_exit_on_nmi(svm))
return false;

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index 95c0a7070bd1..f60ff6229ff4 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -43,6 +43,7 @@ extern int lbrv;

enum inject_type {
INJECT_IRQ,
+ INJECT_NMI,
};

/*
--
2.34.1