[PATCH v1 23/27] KVM: s390: arm64: Implement required functions

From: Steffen Eiden

Date: Thu Apr 02 2026 - 00:50:03 EST


Implement the mostly trivial functions that the shared arm64 (kvm)
headers oblige s390 to implement.

Implement a very basic smccc handler that (non-compliantly) is just able
to stop a vcpu.

Signed-off-by: Steffen Eiden <seiden@xxxxxxxxxxxxx>
---
arch/s390/include/asm/kvm_emulate.h | 135 ++++++++++++++++++++++++++++
arch/s390/include/asm/kvm_mmu.h | 12 +++
arch/s390/include/asm/kvm_nested.h | 13 +++
arch/s390/kvm/arm64/handle_exit.c | 50 +++++++++++
arch/s390/kvm/arm64/inject_fault.c | 15 ++++
5 files changed, 225 insertions(+)
create mode 100644 arch/s390/include/asm/kvm_emulate.h
create mode 100644 arch/s390/include/asm/kvm_mmu.h
create mode 100644 arch/s390/include/asm/kvm_nested.h
create mode 100644 arch/s390/kvm/arm64/handle_exit.c
create mode 100644 arch/s390/kvm/arm64/inject_fault.c

diff --git a/arch/s390/include/asm/kvm_emulate.h b/arch/s390/include/asm/kvm_emulate.h
new file mode 100644
index 000000000000..bf019005e137
--- /dev/null
+++ b/arch/s390/include/asm/kvm_emulate.h
@@ -0,0 +1,135 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Emulation functionality for arm64 guests.
+ */
+
+#ifndef __S390_ARM64_KVM_EMULATE_H__
+#define __S390_ARM64_KVM_EMULATE_H__
+
+#include <asm/fault.h>
+#include <asm/ptrace.h>
+#include <linux/kvm_host.h>
+
+#include <kvm/arm64/kvm_arm.h>
+#include <kvm/arm64/kvm_emulate.h>
+
+static __always_inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.pc;
+}
+
+static __always_inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.pstate;
+}
+
+static __always_inline unsigned long *vcpu_sp_el0(const struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.sae_block.sp_el0;
+}
+
+static __always_inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static __always_inline u64 kvm_vcpu_get_esr(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.esr_elz;
+}
+
+static __always_inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.far_elz;
+}
+
+static __always_inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.teid.addr * PAGE_SIZE;
+}
+
+static inline u16 kvm_vcpu_fault_pic(const struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.pic & PGM_INT_CODE_MASK;
+}
+
+/* Should be unreachable, arm64 on s390 does not claim KVM_CAP_ARM_NISV_TO_USER*/
+static inline unsigned long kvm_vcpu_dabt_iss_nisv_sanitized(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_get_esr(vcpu) & (ESR_ELx_CM | ESR_ELx_WNR);
+}
+
+static __always_inline
+bool kvm_vcpu_trap_is_permission_fault(const struct kvm_vcpu *vcpu)
+{
+ return kvm_vcpu_fault_pic(vcpu) == PGM_PROTECTION;
+}
+
+static __always_inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
+{
+ return true;
+}
+
+static __always_inline bool vcpu_el1_is_32bit(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
+{
+ vcpu->arch.hcr_elz = HCR_E2H | HCR_RW | HCR_PTW;
+ /* traps */
+ vcpu->arch.hcr_elz |= HCR_TSC | HCR_TID1 | HCR_TID2 | HCR_TID3 |
+ HCR_TID4 | HCR_TID5 | HCR_TIDCP;
+}
+
+static inline unsigned long vcpu_get_vsesr(struct kvm_vcpu *vcpu)
+{
+ WARN(true, "not implemented, just feat RAS");
+
+ return 0L;
+}
+
+static inline void vcpu_set_vsesr(struct kvm_vcpu *vcpu, u64 vsesr)
+{
+ WARN(true, "not implemented, just feat RAS");
+}
+
+static inline unsigned long *vcpu_hcr(struct kvm_vcpu *vcpu)
+{
+ return (unsigned long *)&vcpu->arch.hcr_elz;
+}
+
+static inline bool vcpu_el2_tge_is_set(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline int kvm_vcpu_abt_gltl(struct kvm_vcpu *vcpu)
+{
+ return vcpu->arch.sae_block.hai.gltl;
+}
+
+static inline bool is_hyp_ctxt(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool is_nested_ctxt(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
+{
+ u32 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
+
+ return mode != PSR_MODE_EL0t;
+}
+
+#endif /* __S390_ARM64_KVM_EMULATE_H__ */
diff --git a/arch/s390/include/asm/kvm_mmu.h b/arch/s390/include/asm/kvm_mmu.h
new file mode 100644
index 000000000000..ac354fd5bac9
--- /dev/null
+++ b/arch/s390/include/asm/kvm_mmu.h
@@ -0,0 +1,12 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * KVM MMU for arm64 guests.
+ */
+#ifndef __S390_ARM64_KVM_MMU_H__
+#define __S390_ARM64_KVM_MMU_H__
+
+#include <linux/kvm_host.h>
+
+#include <kvm/arm64/kvm_mmu.h>
+
+#endif /* __S390_ARM64_KVM_MMU_H__ */
diff --git a/arch/s390/include/asm/kvm_nested.h b/arch/s390/include/asm/kvm_nested.h
new file mode 100644
index 000000000000..7158932e718b
--- /dev/null
+++ b/arch/s390/include/asm/kvm_nested.h
@@ -0,0 +1,13 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/*
+ * Nested KVM for arm64 guests. (Not supported by s390)
+ */
+#ifndef ASM_KVM_NESTED_H
+#define ASM_KVM_NESTED_H
+
+static inline bool vcpu_has_nv(const struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+#endif /* ASM_KVM_NESTED_H */
diff --git a/arch/s390/kvm/arm64/handle_exit.c b/arch/s390/kvm/arm64/handle_exit.c
new file mode 100644
index 000000000000..89933a604876
--- /dev/null
+++ b/arch/s390/kvm/arm64/handle_exit.c
@@ -0,0 +1,50 @@
+// SPDX-License-Identifier: GPL-2.0
+#include <linux/kvm_host.h>
+
+#include <asm/esr.h>
+#include <asm/kvm_emulate.h>
+
+#include <kvm/arm64/handle_exit.h>
+
+#define PSCI_0_2_FN_SYSTEM_OFF 0x84000008
+#define PSCI_RET_NOT_SUPPORTED -1
+#define PSCI_RET_INTERNAL_FAILURE -6
+/*
+ * Temporary smc/hvc handler. Non-compliant implementation (features missing).
+ * Implements only system off so that test programs are able to end their execution
+ */
+static int kvm_smccc_call_handler(struct kvm_vcpu *vcpu)
+{
+ u32 func_id = vcpu_get_reg(vcpu, 0);
+ u64 val = PSCI_RET_NOT_SUPPORTED;
+ int ret = 1;
+
+ if (func_id == PSCI_0_2_FN_SYSTEM_OFF) {
+ spin_lock(&vcpu->arch.mp_state_lock);
+ WRITE_ONCE(vcpu->arch.mp_state.mp_state, KVM_MP_STATE_STOPPED);
+ spin_unlock(&vcpu->arch.mp_state_lock);
+ kvm_make_all_cpus_request(vcpu->kvm, KVM_REQ_SLEEP);
+ memset(&vcpu->run->system_event, 0,
+ sizeof(vcpu->run->system_event));
+ vcpu->run->system_event.type = KVM_SYSTEM_EVENT_SHUTDOWN;
+ vcpu->run->system_event.ndata = 1;
+ vcpu->run->system_event.data[0] = 0;
+ vcpu->run->exit_reason = KVM_EXIT_SYSTEM_EVENT;
+ val = PSCI_RET_INTERNAL_FAILURE;
+ ret = 0;
+ }
+ vcpu_set_reg(vcpu, 0, val);
+
+ return ret;
+}
+
+static int handle_hvc(struct kvm_vcpu *vcpu)
+{
+ vcpu->stat.hvc_exit_stat++;
+ return kvm_smccc_call_handler(vcpu);
+}
+
+exit_handle_fn arm_exit_handlers[] = {
+ [0 ... ESR_ELx_EC_MAX] = kvm_handle_unknown_ec,
+ [ESR_ELx_EC_HVC64] = handle_hvc,
+};
diff --git a/arch/s390/kvm/arm64/inject_fault.c b/arch/s390/kvm/arm64/inject_fault.c
new file mode 100644
index 000000000000..d4058c3f226e
--- /dev/null
+++ b/arch/s390/kvm/arm64/inject_fault.c
@@ -0,0 +1,15 @@
+// SPDX-License-Identifier: GPL-2.0
+
+#include <asm/kvm_emulate.h>
+
+/**
+ * kvm_inject_undefined - inject an undefined instruction into the guest
+ * @vcpu: The vCPU in which to inject the exception
+ *
+ * It is assumed that this code is called from the VCPU thread and that the
+ * VCPU therefore is not currently executing guest code.
+ */
+void kvm_inject_undefined(struct kvm_vcpu *vcpu)
+{
+ /* Stub until s390 supports arm64 sysregs TODO sysregs*/
+}
--
2.51.0