[PATCH 06/14] KVM: x86/mmu: Refactor THP adjust to prep for changing query
From: Sean Christopherson
Date: Wed Jan 08 2020 - 15:27:53 EST
Refactor transparent_hugepage_adjust() in preparation for walking the
host page tables to identify hugepage mappings, initially for THP pages,
and eventualy for HugeTLB and DAX-backed pages as well. The latter
cases support 1gb pages, i.e. the adjustment logic needs access to the
max allowed level.
Signed-off-by: Sean Christopherson <sean.j.christopherson@xxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 44 +++++++++++++++++-----------------
arch/x86/kvm/mmu/paging_tmpl.h | 3 +--
2 files changed, 23 insertions(+), 24 deletions(-)
diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 8ca6cd04cdf1..30836899be73 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -3329,33 +3329,34 @@ static void direct_pte_prefetch(struct kvm_vcpu *vcpu, u64 *sptep)
__direct_pte_prefetch(vcpu, sp, sptep);
}
-static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu,
- gfn_t gfn, kvm_pfn_t *pfnp,
+static void transparent_hugepage_adjust(struct kvm_vcpu *vcpu, gfn_t gfn,
+ int max_level, kvm_pfn_t *pfnp,
int *levelp)
{
kvm_pfn_t pfn = *pfnp;
int level = *levelp;
+ kvm_pfn_t mask;
+
+ if (max_level == PT_PAGE_TABLE_LEVEL || level > PT_PAGE_TABLE_LEVEL)
+ return;
+
+ if (is_error_noslot_pfn(pfn) || kvm_is_reserved_pfn(pfn) ||
+ kvm_is_zone_device_pfn(pfn))
+ return;
+
+ if (!kvm_is_transparent_hugepage(pfn))
+ return;
+
+ level = PT_DIRECTORY_LEVEL;
/*
- * Check if it's a transparent hugepage. If this would be an
- * hugetlbfs page, level wouldn't be set to
- * PT_PAGE_TABLE_LEVEL and there would be no adjustment done
- * here.
+ * mmu_notifier_retry() was successful and mmu_lock is held, so
+ * the pmd can't be split from under us.
*/
- if (!is_error_noslot_pfn(pfn) && !kvm_is_reserved_pfn(pfn) &&
- !kvm_is_zone_device_pfn(pfn) && level == PT_PAGE_TABLE_LEVEL &&
- kvm_is_transparent_hugepage(pfn)) {
- unsigned long mask;
-
- /*
- * mmu_notifier_retry() was successful and mmu_lock is held, so
- * the pmd can't be split from under us.
- */
- *levelp = level = PT_DIRECTORY_LEVEL;
- mask = KVM_PAGES_PER_HPAGE(level) - 1;
- VM_BUG_ON((gfn & mask) != (pfn & mask));
- *pfnp = pfn & ~mask;
- }
+ *levelp = level;
+ mask = KVM_PAGES_PER_HPAGE(level) - 1;
+ VM_BUG_ON((gfn & mask) != (pfn & mask));
+ *pfnp = pfn & ~mask;
}
static void disallowed_hugepage_adjust(struct kvm_shadow_walk_iterator it,
@@ -3395,8 +3396,7 @@ static int __direct_map(struct kvm_vcpu *vcpu, gpa_t gpa, int write,
if (WARN_ON(!VALID_PAGE(vcpu->arch.mmu->root_hpa)))
return RET_PF_RETRY;
- if (likely(max_level > PT_PAGE_TABLE_LEVEL))
- transparent_hugepage_adjust(vcpu, gfn, &pfn, &level);
+ transparent_hugepage_adjust(vcpu, gfn, max_level, &pfn, &level);
trace_kvm_mmu_spte_requested(gpa, level, pfn);
for_each_shadow_entry(vcpu, gpa, it) {
diff --git a/arch/x86/kvm/mmu/paging_tmpl.h b/arch/x86/kvm/mmu/paging_tmpl.h
index b53bed3c901c..0029f7870865 100644
--- a/arch/x86/kvm/mmu/paging_tmpl.h
+++ b/arch/x86/kvm/mmu/paging_tmpl.h
@@ -673,8 +673,7 @@ static int FNAME(fetch)(struct kvm_vcpu *vcpu, gpa_t addr,
gfn = gw->gfn | ((addr & PT_LVL_OFFSET_MASK(gw->level)) >> PAGE_SHIFT);
base_gfn = gfn;
- if (max_level > PT_PAGE_TABLE_LEVEL)
- transparent_hugepage_adjust(vcpu, gw->gfn, &pfn, &hlevel);
+ transparent_hugepage_adjust(vcpu, gw->gfn, max_level, &pfn, &hlevel);
trace_kvm_mmu_spte_requested(addr, gw->level, pfn);
--
2.24.1