[PATCH 6/7] KVM: x86: Implement kvm_arch_vcpu_map_memory()
From: Paolo Bonzini
Date: Wed Apr 17 2024 - 11:35:54 EST
From: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
Wire KVM_MAP_MEMORY ioctl to kvm_mmu_map_tdp_page() to populate guest
memory. When KVM_CREATE_VCPU creates vCPU, it initializes the x86
KVM MMU part by kvm_mmu_create() and kvm_init_mmu(). vCPU is ready to
invoke the KVM page fault handler.
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
Message-ID: <7138a3bc00ea8d3cbe0e59df15f8c22027005b59.1712785629.git.isaku.yamahata@xxxxxxxxx>
Signed-off-by: Paolo Bonzini <pbonzini@xxxxxxxxxx>
---
arch/x86/kvm/Kconfig | 1 +
arch/x86/kvm/x86.c | 43 +++++++++++++++++++++++++++++++++++++++++++
2 files changed, 44 insertions(+)
diff --git a/arch/x86/kvm/Kconfig b/arch/x86/kvm/Kconfig
index 7632fe6e4db9..e58360d368ec 100644
--- a/arch/x86/kvm/Kconfig
+++ b/arch/x86/kvm/Kconfig
@@ -44,6 +44,7 @@ config KVM
select KVM_VFIO
select HAVE_KVM_PM_NOTIFIER if PM
select KVM_GENERIC_HARDWARE_ENABLING
+ select KVM_GENERIC_MAP_MEMORY
help
Support hosting fully virtualized guest machines using hardware
virtualization extensions. You will need a fairly recent
diff --git a/arch/x86/kvm/x86.c b/arch/x86/kvm/x86.c
index 83b8260443a3..f84c75c2a47f 100644
--- a/arch/x86/kvm/x86.c
+++ b/arch/x86/kvm/x86.c
@@ -4715,6 +4715,9 @@ int kvm_vm_ioctl_check_extension(struct kvm *kvm, long ext)
case KVM_CAP_MEMORY_FAULT_INFO:
r = 1;
break;
+ case KVM_CAP_MAP_MEMORY:
+ r = tdp_enabled;
+ break;
case KVM_CAP_EXIT_HYPERCALL:
r = KVM_EXIT_HYPERCALL_VALID_MASK;
break;
@@ -5867,6 +5870,46 @@ static int kvm_vcpu_ioctl_enable_cap(struct kvm_vcpu *vcpu,
}
}
+int kvm_arch_vcpu_map_memory(struct kvm_vcpu *vcpu,
+ struct kvm_map_memory *mapping)
+{
+ u64 mapped, end, error_code = 0;
+ u8 level = PG_LEVEL_4K;
+ int r;
+
+ /*
+ * Shadow paging uses GVA for kvm page fault. The first implementation
+ * supports GPA only to avoid confusion.
+ */
+ if (!tdp_enabled)
+ return -EOPNOTSUPP;
+
+ /*
+ * reload is efficient when called repeatedly, so we can do it on
+ * every iteration.
+ */
+ kvm_mmu_reload(vcpu);
+
+ if (kvm_arch_has_private_mem(vcpu->kvm) &&
+ kvm_mem_is_private(vcpu->kvm, gpa_to_gfn(mapping->base_address)))
+ error_code |= PFERR_PRIVATE_ACCESS;
+
+ r = kvm_tdp_map_page(vcpu, mapping->base_address, error_code, &level);
+ if (r)
+ return r;
+
+ /*
+ * level can be more than the alignment of mapping->base_address if
+ * the mapping can use a huge page.
+ */
+ end = (mapping->base_address & KVM_HPAGE_MASK(level)) +
+ KVM_HPAGE_SIZE(level);
+ mapped = min(mapping->size, end - mapping->base_address);
+ mapping->size -= mapped;
+ mapping->base_address += mapped;
+ return r;
+}
+
long kvm_arch_vcpu_ioctl(struct file *filp,
unsigned int ioctl, unsigned long arg)
{
--
2.43.0