[PATCH v1 24/27] KVM: s390: arm64: Implement vm/vcpu create destroy.

From: Steffen Eiden

Date: Thu Apr 02 2026 - 00:31:12 EST


Implement init and destroy IOCTLS for vcpu and vm.
Implement arch vm IOCTL. Use s390 gmap.

Co-developed-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
Signed-off-by: Janosch Frank <frankja@xxxxxxxxxxxxx>
Co-developed-by: Andreas Grapentin <gra@xxxxxxxxxxxxx>
Signed-off-by: Andreas Grapentin <gra@xxxxxxxxxxxxx>
Co-developed-by: Nina Schoetterl-Glausch <nsg@xxxxxxxxxxxxx>
Signed-off-by: Nina Schoetterl-Glausch <nsg@xxxxxxxxxxxxx>
Signed-off-by: Steffen Eiden <seiden@xxxxxxxxxxxxx>
---
arch/s390/kvm/arm64/arm.c | 160 ++++++++++++++++++++++++++++++++++++++
arch/s390/kvm/arm64/arm.h | 54 +++++++++++++
2 files changed, 214 insertions(+)

diff --git a/arch/s390/kvm/arm64/arm.c b/arch/s390/kvm/arm64/arm.c
index 8f94eb8fe288..962d23f4e469 100644
--- a/arch/s390/kvm/arm64/arm.c
+++ b/arch/s390/kvm/arm64/arm.c
@@ -9,6 +9,8 @@
#include <linux/kvm_host.h>

#include "arm.h"
+#include "reset.h"
+#include "gmap.h"

int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
{
@@ -41,6 +43,61 @@ static u64 kvm_max_guest_address(void)
return ALIGN_DOWN(max_addr + 1, 1 << 30) - 1;
}

+static int kvm_gmap_init(struct kvm *kvm)
+{
+ struct crst_table *table;
+
+ kvm->arch.gmap = gmap_new(kvm, kvm->arch.guest_phys_size);
+
+ if (!kvm->arch.gmap)
+ return -ENOMEM;
+
+ /* arm64 (on s390) do not have pfault */
+ clear_bit(GMAP_FLAG_PFAULT_ENABLED, &kvm->arch.gmap->flags);
+ set_bit(GMAP_FLAG_ALLOW_HPAGE_1M, &kvm->arch.gmap->flags);
+
+ table = dereference_asce(kvm->arch.gmap->asce);
+ crst_table_init((void *)table, _CRSTE_HOLE(table->crstes[0].h.tt).val);
+
+ return 0;
+}
+
+int kvm_arch_init_vm(struct kvm *kvm, unsigned long type)
+{
+ char debug_name[32];
+ int ret;
+
+ if (type & ~KVM_VM_TYPE_ARM_IPA_SIZE_MASK)
+ return -EINVAL;
+
+ ret = kvm_vm_type_ipa_size_shift(type);
+ if (ret < 0)
+ return ret;
+ kvm->arch.guest_phys_size = 1UL << ret;
+
+ mutex_init(&kvm->arch.config_lock);
+ bitmap_zero(kvm->arch.vcpu_features, KVM_VCPU_MAX_FEATURES);
+
+ snprintf(debug_name, sizeof(debug_name), "kvm-arm64-%u", current->pid);
+ kvm->arch.dbf = debug_register(debug_name, 32, 1, 7 * sizeof(long));
+ if (!kvm->arch.dbf)
+ return -ENOMEM;
+ debug_register_view(kvm->arch.dbf, &debug_sprintf_view);
+
+ ret = kvm_gmap_init(kvm);
+ if (ret)
+ goto out_err;
+ kvm->arch.mem_limit = kvm_max_guest_address();
+
+ VM_EVENT(kvm, 3, "vm created with type %lu", type);
+ return 0;
+
+out_err:
+ debug_unregister(kvm->arch.dbf);
+
+ return ret;
+}
+
vm_fault_t kvm_arch_vcpu_fault(struct kvm_vcpu *vcpu, struct vm_fault *vmf)
{
return VM_FAULT_SIGBUS;
@@ -52,6 +109,13 @@ long kvm_arch_dev_ioctl(struct file *filp,
return -EINVAL;
}

+void kvm_arch_destroy_vm(struct kvm *kvm)
+{
+ kvm_destroy_vcpus(kvm);
+ debug_unregister(kvm->arch.dbf);
+ kvm->arch.gmap = gmap_put(kvm->arch.gmap);
+}
+
u32 get_kvm_ipa_limit(void)
{
return fls64(kvm_max_guest_address() + 1) - 1;
@@ -62,10 +126,39 @@ int kvm_arch_vcpu_precreate(struct kvm *kvm, unsigned int id)
return 0;
}

+int kvm_arch_vcpu_create(struct kvm_vcpu *vcpu)
+{
+ struct kvm_sae_block *sae_block = &vcpu->arch.sae_block;
+
+ spin_lock_init(&vcpu->arch.mp_state_lock);
+
+ /* Force users to call KVM_ARM_VCPU_INIT */
+ vcpu_clear_flag(vcpu, VCPU_INITIALIZED);
+
+ vcpu->arch.mc = kvm_s390_new_mmu_cache();
+ if (!vcpu->arch.mc)
+ return -ENOMEM;
+
+ sae_block->hbasce = vcpu->kvm->arch.gmap->asce.val;
+ sae_block->mso = 0L;
+ sae_block->msl = kvm_max_guest_address();
+
+ VM_EVENT(vcpu->kvm, 3, "create cpu %d at 0x%p, sae block at 0x%p, satellite at 0x%p",
+ vcpu->vcpu_id, vcpu, &vcpu->arch.sae_block, &vcpu->arch.save_area);
+ return 0;
+}
+
void kvm_arch_vcpu_postcreate(struct kvm_vcpu *vcpu)
{
}

+void kvm_arch_vcpu_destroy(struct kvm_vcpu *vcpu)
+{
+ kvm_s390_free_mmu_cache(vcpu->arch.mc);
+
+ VCPU_EVENT(vcpu, 3, "%s", "free cpu");
+}
+
void kvm_arch_vcpu_blocking(struct kvm_vcpu *vcpu)
{
}
@@ -103,6 +196,52 @@ int kvm_vm_ioctl_irq_line(struct kvm *kvm, struct kvm_irq_level *irq_level,
return 0;
}

+int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
+ struct kvm_dirty_log *log)
+{
+ return gmap_get_dirty_log(kvm, log);
+}
+
+bool kvm_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ scoped_guard(read_lock, &kvm->mmu_lock)
+ return gmap_age_gfn(kvm->arch.gmap, range->start, range->end);
+}
+
+void kvm_arch_sync_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot)
+{
+ gfn_t last_gfn = memslot->base_gfn + memslot->npages;
+
+ scoped_guard(read_lock, &kvm->mmu_lock)
+ gmap_sync_dirty_log(kvm->arch.gmap, memslot->base_gfn, last_gfn);
+}
+
+int kvm_arch_prepare_memory_region(struct kvm *kvm,
+ const struct kvm_memory_slot *old,
+ struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ return gmap_prepare_memory_region(kvm, old, new, change);
+}
+
+void kvm_arch_commit_memory_region(struct kvm *kvm,
+ struct kvm_memory_slot *old,
+ const struct kvm_memory_slot *new,
+ enum kvm_mr_change change)
+{
+ gmap_commit_memory_region(kvm, old, new, change);
+}
+
+bool kvm_unmap_gfn_range(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return gmap_unmap_gfn_range(kvm->arch.gmap, range->slot, range->start, range->end);
+}
+
+bool kvm_test_age_gfn(struct kvm *kvm, struct kvm_gfn_range *range)
+{
+ return gmap_test_age_gfn(kvm, range);
+}
+
void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
struct kvm_memory_slot *slot,
gfn_t gfn_offset,
@@ -110,6 +249,27 @@ void kvm_arch_mmu_enable_log_dirty_pt_masked(struct kvm *kvm,
{
}

+int kvm_arch_vm_ioctl(struct file *filp, unsigned int ioctl, unsigned long arg)
+{
+ void __user *argp = (void __user *)arg;
+
+ switch (ioctl) {
+ case KVM_ARM_PREFERRED_TARGET: {
+ struct kvm_vcpu_init init = {
+ .target = KVM_ARM_TARGET_GENERIC_V8,
+ };
+
+ if (copy_to_user(argp, &init, sizeof(init)))
+ return -EFAULT;
+
+ return 0;
+ }
+
+ default:
+ return -EINVAL;
+ }
+}
+
bool kvm_arch_irqchip_in_kernel(struct kvm *kvm)
{
return false;
diff --git a/arch/s390/kvm/arm64/arm.h b/arch/s390/kvm/arm64/arm.h
index a3db254462c0..9571e6e1851a 100644
--- a/arch/s390/kvm/arm64/arm.h
+++ b/arch/s390/kvm/arm64/arm.h
@@ -4,4 +4,58 @@

#define KVM_DEV_NAME "kvm-arm64"

+#define VM_EVENT(d_kvm, d_loglevel, d_string, d_args...)\
+do { \
+ debug_sprintf_event((d_kvm)->arch.dbf, d_loglevel, KVM_DEV_NAME ": " d_string "\n", d_args); \
+} while (0)
+
+#define VCPU_EVENT(d_vcpu, d_loglevel, d_string, d_args...) \
+ do { \
+ debug_sprintf_event( \
+ (d_vcpu)->kvm->arch.dbf, d_loglevel, \
+ "KVM_DEV_NAME %02d[%016llx-%016llx]: " d_string "\n", \
+ (d_vcpu)->vcpu_id, (d_vcpu)->arch.sae_block.pstate, \
+ (d_vcpu)->arch.sae_block.pc, d_args); \
+ } while (0)
+
+static __always_inline bool kvm_s390_is_in_sie(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+static __always_inline int kvm_is_ucontrol(struct kvm *kvm)
+{
+ return 0;
+}
+
+static __always_inline int __kvm_s390_pv_destroy_page(struct page *page)
+{
+ return 0;
+}
+
+static __always_inline void kvm_s390_vsie_gmap_notifier(struct gmap *gmap, gpa_t start, gpa_t end)
+{
+}
+
+static __always_inline int kvm_s390_pv_get_handle(struct kvm *kvm)
+{
+ return 0;
+}
+
+static __always_inline int kvm_s390_is_migration_mode(struct kvm *kvm)
+{
+ return false;
+}
+
+static __always_inline bool kvm_arch_setup_async_pf(struct kvm_vcpu *vcpu)
+{
+ return false;
+}
+
+/* should never be called */
+static __always_inline int kvm_s390_vm_stop_migration(struct kvm *kvm)
+{
+ return -EINVAL;
+}
+
#endif /* ARCH_S390_KVM_ARM64_H */
--
2.51.0