Entering a realm is done using a SMC call to the RMM. On exit the
exit-codes need to be handled slightly differently to the normal KVM
path so define our own functions for realm enter/exit and hook them
in if the guest is a realm guest.
Signed-off-by: Steven Price <steven.price@xxxxxxx>
---..
arch/arm64/include/asm/kvm_rme.h | 3 +
arch/arm64/kvm/Makefile | 2 +-
arch/arm64/kvm/arm.c | 19 +++-
arch/arm64/kvm/rme-exit.c | 180 +++++++++++++++++++++++++++++++
arch/arm64/kvm/rme.c | 11 ++
5 files changed, 209 insertions(+), 6 deletions(-)
create mode 100644 arch/arm64/kvm/rme-exit.c
/* Tell userspace about in-kernel device output levels */
diff --git a/arch/arm64/kvm/rme-exit.c b/arch/arm64/kvm/rme-exit.c
new file mode 100644
index 000000000000..5bf58c9b42b7
--- /dev/null
+++ b/arch/arm64/kvm/rme-exit.c
@@ -0,0 +1,180 @@
+// SPDX-License-Identifier: GPL-2.0-only
+/*
+ * Copyright (C) 2023 ARM Ltd.
+ */
+
+#include <linux/kvm_host.h>
+#include <kvm/arm_hypercalls.h>
+#include <kvm/arm_psci.h>
+
+#include <asm/rmi_smc.h>
+#include <asm/kvm_emulate.h>
+#include <asm/kvm_rme.h>
+#include <asm/kvm_mmu.h>
+
+typedef int (*exit_handler_fn)(struct kvm_vcpu *vcpu);
+
+static int rec_exit_reason_notimpl(struct kvm_vcpu *vcpu)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+
+ pr_err("[vcpu %d] Unhandled exit reason from realm (ESR: %#llx)\n",
+ vcpu->vcpu_id, rec->run->exit.esr);
+ return -ENXIO;
+}
+
+static int rec_exit_sync_dabt(struct kvm_vcpu *vcpu)
+{
+ return kvm_handle_guest_abort(vcpu);
+}
+
+static int rec_exit_sync_iabt(struct kvm_vcpu *vcpu)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+
+ pr_err("[vcpu %d] Unhandled instruction abort (ESR: %#llx).\n",
+ vcpu->vcpu_id, rec->run->exit.esr);
+ return -ENXIO;
+}
+
+static int rec_exit_sys_reg(struct kvm_vcpu *vcpu)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+ unsigned long esr = kvm_vcpu_get_esr(vcpu);
+ int rt = kvm_vcpu_sys_get_rt(vcpu);
+ bool is_write = !(esr & 1);
+ int ret;
+
+ if (is_write)
+ vcpu_set_reg(vcpu, rt, rec->run->exit.gprs[0]);
+
+ ret = kvm_handle_sys_reg(vcpu);
+
+ if (ret >= 0 && !is_write)
+ rec->run->entry.gprs[0] = vcpu_get_reg(vcpu, rt);
+
+ return ret;
+}
+
+static exit_handler_fn rec_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = rec_exit_reason_notimpl,
+ [ESR_ELx_EC_SYS64] = rec_exit_sys_reg,
+ [ESR_ELx_EC_DABT_LOW] = rec_exit_sync_dabt,
+ [ESR_ELx_EC_IABT_LOW] = rec_exit_sync_iabt
+};
+
+static int rec_exit_psci(struct kvm_vcpu *vcpu)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+ int i;
+ int ret;
+
+ for (i = 0; i < REC_RUN_GPRS; i++)
+ vcpu_set_reg(vcpu, i, rec->run->exit.gprs[i]);
+
+ ret = kvm_smccc_call_handler(vcpu);
+
+ for (i = 0; i < REC_RUN_GPRS; i++)
+ rec->run->entry.gprs[i] = vcpu_get_reg(vcpu, i);
+
+ return ret;
+}
+
+static int rec_exit_ripas_change(struct kvm_vcpu *vcpu)
+{
+ struct kvm *kvm = vcpu->kvm;
+ struct realm *realm = &kvm->arch.realm;
+ struct realm_rec *rec = &vcpu->arch.rec;
+ unsigned long base = rec->run->exit.ripas_base;
+ unsigned long top = rec->run->exit.ripas_top;
+ unsigned long ripas = rec->run->exit.ripas_value & 1;
+ int ret = -EINVAL;
+
+ if (realm_is_addr_protected(realm, base) &&
+ realm_is_addr_protected(realm, top - 1)) {
+ kvm_mmu_topup_memory_cache(&vcpu->arch.mmu_page_cache,
+ kvm_mmu_cache_min_pages(vcpu->arch.hw_mmu));
+ write_lock(&kvm->mmu_lock);
+ ret = realm_set_ipa_state(vcpu, base, top, ripas);
+ write_unlock(&kvm->mmu_lock);
+ }
+
+ WARN(ret && ret != -ENOMEM,
+ "Unable to satisfy SET_IPAS for %#lx - %#lx, ripas: %#lx\n",
+ base, top, ripas);
+
+ /* Exit to VMM to complete the change */
+ kvm_prepare_memory_fault_exit(vcpu, base, top - base, false, false,
+ ripas == 1);
+
+ return 0;
+}
+
+static void update_arch_timer_irq_lines(struct kvm_vcpu *vcpu)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+
+ __vcpu_sys_reg(vcpu, CNTV_CTL_EL0) = rec->run->exit.cntv_ctl;
+ __vcpu_sys_reg(vcpu, CNTV_CVAL_EL0) = rec->run->exit.cntv_cval;
+ __vcpu_sys_reg(vcpu, CNTP_CTL_EL0) = rec->run->exit.cntp_ctl;
+ __vcpu_sys_reg(vcpu, CNTP_CVAL_EL0) = rec->run->exit.cntp_cval;
+
+ kvm_realm_timers_update(vcpu);
+}
+
+/*
+ * Return > 0 to return to guest, < 0 on error, 0 (and set exit_reason) on
+ * proper exit to userspace.
+ */
+int handle_rme_exit(struct kvm_vcpu *vcpu, int rec_run_ret)
+{
+ struct realm_rec *rec = &vcpu->arch.rec;
+ u8 esr_ec = ESR_ELx_EC(rec->run->exit.esr);
+ unsigned long status, index;
+
+ status = RMI_RETURN_STATUS(rec_run_ret);
+ index = RMI_RETURN_INDEX(rec_run_ret);
+
+ /*
+ * If a PSCI_SYSTEM_OFF request raced with a vcpu executing, we might
+ * see the following status code and index indicating an attempt to run
+ * a REC when the RD state is SYSTEM_OFF. In this case, we just need to
+ * return to user space which can deal with the system event or will try
+ * to run the KVM VCPU again, at which point we will no longer attempt
+ * to enter the Realm because we will have a sleep request pending on
+ * the VCPU as a result of KVM's PSCI handling.
+ */
+ if (status == RMI_ERROR_REALM && index == 1) {
+ vcpu->run->exit_reason = KVM_EXIT_UNKNOWN;
+ return 0;
+ }
+
+ if (rec_run_ret)
+ return -ENXIO;
+
+ vcpu->arch.fault.esr_el2 = rec->run->exit.esr;
+ vcpu->arch.fault.far_el2 = rec->run->exit.far;
+ vcpu->arch.fault.hpfar_el2 = rec->run->exit.hpfar;
+
+ update_arch_timer_irq_lines(vcpu);
+
+ /* Reset the emulation flags for the next run of the REC */
+ rec->run->entry.flags = 0;
+
+ switch (rec->run->exit.exit_reason) {
+ case RMI_EXIT_SYNC:
+ return rec_exit_handlers[esr_ec](vcpu);
+ case RMI_EXIT_IRQ:
+ case RMI_EXIT_FIQ:
+ return 1;
+ case RMI_EXIT_PSCI:
+ return rec_exit_psci(vcpu);
+ case RMI_EXIT_RIPAS_CHANGE:
+ return rec_exit_ripas_change(vcpu);
+ }