[RFC PATCH 4/6] KVM: x86/mmu: Introduce kvm_mmu_map_tdp_page() for use by SEV/TDX
From: Nikunj A Dadhania
Date: Tue Jan 18 2022 - 06:07:12 EST
From: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Introduce a helper to directly (pun intended) fault-in a TDP page
without having to go through the full page fault path. This allows
SEV/TDX to pin pages before booting the guest, provides the resulting
pfn to vendor code if should be needed in the future, and allows the
RET_PF_* enums to stay in mmu.c where they belong.
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
Signed-off-by: Nikunj A Dadhania <nikunj@xxxxxxx>
---
arch/x86/kvm/mmu.h | 3 +++
arch/x86/kvm/mmu/mmu.c | 38 ++++++++++++++++++++++++++++++++++++++
2 files changed, 41 insertions(+)
diff --git a/arch/x86/kvm/mmu.h b/arch/x86/kvm/mmu.h
index e9fbb2c8bbe2..0595891dd834 100644
--- a/arch/x86/kvm/mmu.h
+++ b/arch/x86/kvm/mmu.h
@@ -202,6 +202,9 @@ static inline int kvm_mmu_do_page_fault(struct kvm_vcpu *vcpu, gpa_t cr2_or_gpa,
return vcpu->arch.mmu->page_fault(vcpu, &fault);
}
+kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u32 error_code, int max_level);
+
/*
* Currently, we have two sorts of write-protection, a) the first one
* write-protects guest page to sync the guest modification, b) another one is
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 62dda588eb99..de5d390e0dcc 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -4111,6 +4111,44 @@ int kvm_tdp_page_fault(struct kvm_vcpu *vcpu, struct kvm_page_fault *fault)
return direct_page_fault(vcpu, fault);
}
+kvm_pfn_t kvm_mmu_map_tdp_page(struct kvm_vcpu *vcpu, gpa_t gpa,
+ u32 error_code, int max_level)
+{
+ int r;
+ struct kvm_page_fault fault = (struct kvm_page_fault) {
+ .addr = gpa,
+ .error_code = error_code,
+ .exec = error_code & PFERR_FETCH_MASK,
+ .write = error_code & PFERR_WRITE_MASK,
+ .present = error_code & PFERR_PRESENT_MASK,
+ .rsvd = error_code & PFERR_RSVD_MASK,
+ .user = error_code & PFERR_USER_MASK,
+ .prefetch = false,
+ .is_tdp = true,
+ .nx_huge_page_workaround_enabled = is_nx_huge_page_enabled(),
+ };
+
+ if (mmu_topup_memory_caches(vcpu, false))
+ return KVM_PFN_ERR_FAULT;
+
+ /*
+ * Loop on the page fault path to handle the case where an mmu_notifier
+ * invalidation triggers RET_PF_RETRY. In the normal page fault path,
+ * KVM needs to resume the guest in case the invalidation changed any
+ * of the page fault properties, i.e. the gpa or error code. For this
+ * path, the gpa and error code are fixed by the caller, and the caller
+ * expects failure if and only if the page fault can't be fixed.
+ */
+ do {
+ fault.max_level = max_level;
+ fault.req_level = PG_LEVEL_4K;
+ fault.goal_level = PG_LEVEL_4K;
+ r = direct_page_fault(vcpu, &fault);
+ } while (r == RET_PF_RETRY && !is_error_noslot_pfn(fault.pfn));
+ return fault.pfn;
+}
+EXPORT_SYMBOL_GPL(kvm_mmu_map_tdp_page);
+
static void nonpaging_init_context(struct kvm_mmu *context)
{
context->page_fault = nonpaging_page_fault;
--
2.32.0