[PATCH v14 058/113] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by TDX

From: isaku . yamahata
Date: Mon May 29 2023 - 00:27:44 EST


From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>

Introduce a helper to directly (pun intended) fault-in a TDP page
without having to go through the full page fault path. This allows
TDX to get the resulting pfn and also allows the RET_PF_* enums to
stay in mmu.c where they belong.

Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
arch/x86/kvm/mmu.h | 3 +++
arch/x86/kvm/mmu/mmu.c | 49 ++++++++++++++++++++++++++++++++++++++++++
2 files changed, 52 insertions(+)

diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index c1781e0d29e0..a613ef581059 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -174,6 +174,9 @@ static inline void kvm_mmu_refresh_passthrough_bits(struct kvm_vcpu *vcpu,
__kvm_mmu_refresh_passthrough_bits(vcpu, mmu);
}

+kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u32 error_code, int max_level);
+
/*
* Check if a given access (described through the I/D, W/R and U/S bits of a
* page fault error code pfec) causes a permission fault with the given PTE
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index c61b77981403..d6e900fb3927 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4666,6 +4666,55 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return direct_page_fault(vcpu, fault);
}

+kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u32 error_code, int max_level)
+{
+ int r;
+ struct kvm_page_fault fault = (struct kvm_page_fault) {
+ .addr = gpa,
+ .error_code = error_code,
+ .exec = error_code & PFERR_FETCH_MASK,
+ .write = error_code & PFERR_WRITE_MASK,
+ .present = error_code & PFERR_PRESENT_MASK,
+ .rsvd = error_code & PFERR_RSVD_MASK,
+ .user = error_code & PFERR_USER_MASK,
+ .prefetch = false,
+ .is_tdp = true,
+ .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(vcpu->kvm),
+ .is_private = kvm_is_private_gpa(vcpu->kvm, gpa),
+ };
+
+ WARN_ON_ONCE(!vcpu->arch.mmu->root_role.direct);
+ fault.gfn = gpa_to_gfn(fault.addr) & ~kvm_gfn_shared_mask(vcpu->kvm);
+ fault.slot = kvm_vcpu_gfn_to_memslot(vcpu, fault.gfn);
+
+ if (mmu_topup_memory_caches(vcpu, false))
+ return KVM_PFN_ERR_FAULT;
+
+ /*
+ * Loop on the page fault path to handle the case where an mmu_notifier
+ * invalidation triggers RET_PF_RETRY. In the normal page fault path,
+ * KVM needs to resume the guest in case the invalidation changed any
+ * of the page fault properties, i.e. the gpa or error code. For this
+ * path, the gpa and error code are fixed by the caller, and the caller
+ * expects failure if and only if the page fault can't be fixed.
+ */
+ do {
+ fault.max_level = max_level;
+ fault.req_level = PG_LEVEL_4K;
+ fault.goal_level = PG_LEVEL_4K;
+
+#ifdef CONFIG_X86_64
+ if (tdp_mmu_enabled)
+ r = kvm_tdp_mmu_page_fault(vcpu, &fault);
+ else
+#endif
+ r = direct_page_fault(vcpu, &fault);
+ } while (r == RET_PF_RETRY && !is_error_noslot_pfn(fault.pfn));
+ return fault.pfn;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
+
static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
--
2.25.1