[PATCH 2/2] KVM: SVM: Add support to handle AP reset MSR protocol

From: Sean Christopherson
Date: Wed Oct 20 2021 - 08:44:14 EST


From: Tom Lendacky <thomas.lendacky@xxxxxxx>

Add support for AP Reset Hold being invoked using the GHCB MSR protocol,
available in version 2 of the GHCB specification.

Signed-off-by: Tom Lendacky <thomas.lendacky@xxxxxxx>
Signed-off-by: Brijesh Singh <brijesh.singh@xxxxxxx>
Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Co-developed-by: Sean Christopherson <seanjc@xxxxxxxxxx>
Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/kvm/svm/sev.c | 53 +++++++++++++++++++++++++++++++++++++-----
arch/x86/kvm/svm/svm.h | 1 +
2 files changed, 48 insertions(+), 6 deletions(-)

diff --git a/arch/x86/kvm/svm/sev.c b/arch/x86/kvm/svm/sev.c
index f8dfa88993b8..1174270d18ee 100644
--- a/arch/x86/kvm/svm/sev.c
+++ b/arch/x86/kvm/svm/sev.c
@@ -2405,10 +2405,36 @@ static u64 ghcb_msr_version_info(void)
return msr;
}

-static int sev_emulate_ap_reset_hold(struct vcpu_svm *svm)
+
+static u64 ghcb_msr_ap_rst_resp(u64 value)
+{
+ return (u64)GHCB_MSR_AP_RESET_HOLD_RESP | (value << GHCB_DATA_LOW);
+}
+
+static int sev_emulate_ap_reset_hold(struct vcpu_svm *svm, u64 hold_type)
{
int ret = kvm_skip_emulated_instruction(&svm->vcpu);

+ if (hold_type == GHCB_MSR_AP_RESET_HOLD_REQ) {
+ /*
+ * Preset the result to a non-SIPI return and then only set
+ * the result to non-zero when delivering a SIPI.
+ */
+ svm->vmcb->control.ghcb_gpa = ghcb_msr_ap_rst_resp(0);
+ svm->reset_hold_msr_protocol = true;
+ } else {
+ WARN_ON_ONCE(hold_type != SVM_VMGEXIT_AP_HLT_LOOP);
+ svm->reset_hold_msr_protocol = false;
+ }
+
+ /*
+ * Ensure the writes to ghcb_gpa and reset_hold_msr_protocol are visible
+ * before the MP state change so that the INIT-SIPI doesn't misread
+ * reset_hold_msr_protocol or write ghcb_gpa before this. Pairs with
+ * the smp_rmb() in sev_vcpu_reset().
+ */
+ smp_wmb();
+
return __kvm_vcpu_halt(&svm->vcpu,
KVM_MP_STATE_AP_RESET_HOLD, KVM_EXIT_AP_RESET_HOLD) && ret;
}
@@ -2459,6 +2485,9 @@ static int sev_handle_vmgexit_msr_protocol(struct vcpu_svm *svm)

break;
}
+ case GHCB_MSR_AP_RESET_HOLD_REQ:
+ ret = sev_emulate_ap_reset_hold(svm, GHCB_MSR_AP_RESET_HOLD_REQ);
+ break;
case GHCB_MSR_TERM_REQ: {
u64 reason_set, reason_code;

@@ -2544,7 +2573,7 @@ int sev_handle_vmgexit(struct kvm_vcpu *vcpu)
ret = svm_invoke_exit_handler(vcpu, SVM_EXIT_IRET);
break;
case SVM_VMGEXIT_AP_HLT_LOOP:
- ret = sev_emulate_ap_reset_hold(svm);
+ ret = sev_emulate_ap_reset_hold(svm, SVM_VMGEXIT_AP_HLT_LOOP);
break;
case SVM_VMGEXIT_AP_JUMP_TABLE: {
struct kvm_sev_info *sev = &to_kvm_svm(vcpu->kvm)->sev_info;
@@ -2642,11 +2671,23 @@ void sev_es_vcpu_reset(struct vcpu_svm *svm, bool init_event)
if (init_event) {
/*
* If the vCPU is in a "reset" hold, signal via SW_EXIT_INFO_2
- * that, assuming it receives a SIPI, the vCPU was "released".
+ * (or the GHCB_GPA for the MSR protocol) that, assuming it
+ * receives a SIPI, the vCPU was "released".
*/
- if (svm->vcpu.arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD &&
- svm->ghcb)
- ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ if (svm->vcpu.arch.mp_state == KVM_MP_STATE_AP_RESET_HOLD) {
+ /*
+ * Ensure mp_state is read before reset_hold_msr_protocol
+ * and before writing ghcb_gpa to ensure KVM conumes the
+ * correct protocol. Pairs with the smp_wmb() in
+ * sev_emulate_ap_reset_hold().
+ */
+ smp_rmb();
+ if (svm->reset_hold_msr_protocol)
+ svm->vmcb->control.ghcb_gpa = ghcb_msr_ap_rst_resp(1);
+ else if (svm->ghcb)
+ ghcb_set_sw_exit_info_2(svm->ghcb, 1);
+ svm->reset_hold_msr_protocol = false;
+ }
return;
}

diff --git a/arch/x86/kvm/svm/svm.h b/arch/x86/kvm/svm/svm.h
index c1f3685db2e1..531d3258df58 100644
--- a/arch/x86/kvm/svm/svm.h
+++ b/arch/x86/kvm/svm/svm.h
@@ -198,6 +198,7 @@ struct vcpu_svm {
bool ghcb_sa_free;

bool guest_state_loaded;
+ bool reset_hold_msr_protocol;
};

struct svm_cpu_data {
--
2.33.0.1079.g6e70778dc9-goog


--0SdBen1GVMUEx1O4--