[PATCH v6 65/76] x86/paravirt: Allow hypervisor specific VMMCALL handling under SEV-ES
From: Joerg Roedel
Date: Mon Aug 24 2020 - 05:03:14 EST
From: Joerg Roedel <jroedel@xxxxxxx>
Add two new paravirt callbacks to provide hypervisor specific processor
state in the GHCB and to copy state from the hypervisor back to the
processor.
Signed-off-by: Joerg Roedel <jroedel@xxxxxxx>
Link: https://lore.kernel.org/r/20200724160336.5435-65-joro@xxxxxxxxxx
---
arch/x86/include/asm/x86_init.h | 16 +++++++++++++++-
arch/x86/kernel/sev-es.c | 12 ++++++++++++
2 files changed, 27 insertions(+), 1 deletion(-)
diff --git a/arch/x86/include/asm/x86_init.h b/arch/x86/include/asm/x86_init.h
index 6807153c0410..0304e2931cd3 100644
--- a/arch/x86/include/asm/x86_init.h
+++ b/arch/x86/include/asm/x86_init.h
@@ -4,8 +4,10 @@
#include <asm/bootparam.h>
+struct ghcb;
struct mpc_bus;
struct mpc_cpu;
+struct pt_regs;
struct mpc_table;
struct cpuinfo_x86;
@@ -236,10 +238,22 @@ struct x86_legacy_features {
/**
* struct x86_hyper_runtime - x86 hypervisor specific runtime callbacks
*
- * @pin_vcpu: pin current vcpu to specified physical cpu (run rarely)
+ * @pin_vcpu: pin current vcpu to specified physical
+ * cpu (run rarely)
+ * @sev_es_hcall_prepare: Load additional hypervisor-specific
+ * state into the GHCB when doing a VMMCALL under
+ * SEV-ES. Called from the #VC exception handler.
+ * @sev_es_hcall_finish: Copies state from the GHCB back into the
+ * processor (or pt_regs). Also runs checks on the
+ * state returned from the hypervisor after a
+ * VMMCALL under SEV-ES. Needs to return 'false'
+ * if the checks fail. Called from the #VC
+ * exception handler.
*/
struct x86_hyper_runtime {
void (*pin_vcpu)(int cpu);
+ void (*sev_es_hcall_prepare)(struct ghcb *ghcb, struct pt_regs *regs);
+ bool (*sev_es_hcall_finish)(struct ghcb *ghcb, struct pt_regs *regs);
};
/**
diff --git a/arch/x86/kernel/sev-es.c b/arch/x86/kernel/sev-es.c
index e1f3ebbcc122..28fe95ecd508 100644
--- a/arch/x86/kernel/sev-es.c
+++ b/arch/x86/kernel/sev-es.c
@@ -897,6 +897,9 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
ghcb_set_rax(ghcb, ctxt->regs->ax);
ghcb_set_cpl(ghcb, user_mode(ctxt->regs) ? 3 : 0);
+ if (x86_platform.hyper.sev_es_hcall_prepare)
+ x86_platform.hyper.sev_es_hcall_prepare(ghcb, ctxt->regs);
+
ret = sev_es_ghcb_hv_call(ghcb, ctxt, SVM_EXIT_VMMCALL, 0, 0);
if (ret != ES_OK)
return ret;
@@ -906,6 +909,15 @@ static enum es_result vc_handle_vmmcall(struct ghcb *ghcb,
ctxt->regs->ax = ghcb->save.rax;
+ /*
+ * Call sev_es_hcall_finish() after regs->ax is already set.
+ * This allows the hypervisor handler to overwrite it again if
+ * necessary.
+ */
+ if (x86_platform.hyper.sev_es_hcall_finish &&
+ !x86_platform.hyper.sev_es_hcall_finish(ghcb, ctxt->regs))
+ return ES_VMM_ERROR;
+
return ES_OK;
}
--
2.28.0