[PATCH v8 09/14] KVM: x86/tdp_mmu: Split the large page when zap leaf

From: isaku . yamahata
Date: Mon Feb 26 2024 - 04:14:05 EST


From: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>

When TDX enabled, a large page cannot be zapped if it contains mixed
pages. In this case, it has to split the large page.

Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
---
v7:
- remote unnecessary tlb shoot down in tdp_mmu_zap_leafs() to free unused
split_sp.
Signed-off-by: Isaku Yamahata <isaku.yamahata@xxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 6 ++--
arch/x86/kvm/mmu/mmu_internal.h | 9 +++++
arch/x86/kvm/mmu/tdp_mmu.c | 60 ++++++++++++++++++++++++++++++---
3 files changed, 68 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 081df7855065..fa7fabc410c4 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7473,8 +7473,8 @@ bool kvm_arch_pre_set_memory_attributes(struct kvm *kvm,
return kvm_unmap_gfn_range(kvm, range);
}

-static bool hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
- int level)
+bool kvm_hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn,
+ int level)
{
return lpage_info_slot(gfn, slot, level)->disallow_lpage & KVM_LPAGE_MIXED_FLAG;
}
@@ -7501,7 +7501,7 @@ static bool hugepage_has_attrs(struct kvm *kvm, struct kvm_memory_slot *slot,
return kvm_range_has_memory_attributes(kvm, start, end, attrs);

for (gfn = start; gfn < end; gfn += KVM_PAGES_PER_HPAGE(level - 1)) {
- if (hugepage_test_mixed(slot, gfn, level - 1) ||
+ if (kvm_hugepage_test_mixed(slot, gfn, level - 1) ||
attrs != kvm_get_memory_attributes(kvm, gfn))
return false;
}
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 9aa4c6ffa207..315c123affaf 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -430,4 +430,13 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);

+#ifdef CONFIG_KVM_GENERIC_MEMORY_ATTRIBUTES
+bool kvm_hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level);
+#else
+static inline bool kvm_hugepage_test_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level)
+{
+ return false;
+}
+#endif
+
#endif /* __KVM_X86_MMU_INTERNAL_H */
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index 66de875d3de1..e3682794adda 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -953,6 +953,14 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
return true;
}

+
+static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
+ struct tdp_iter *iter,
+ bool shared);
+
+static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_mmu_page *sp, bool shared);
+
/*
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -964,14 +972,16 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush,
bool zap_private)
{
+ bool is_private = is_private_sp(root);
+ struct kvm_mmu_page *split_sp = NULL;
struct tdp_iter iter;

end = min(end, tdp_mmu_max_gfn_exclusive());

lockdep_assert_held_write(&kvm->mmu_lock);

- WARN_ON_ONCE(zap_private && !is_private_sp(root));
- if (!zap_private && is_private_sp(root))
+ WARN_ON_ONCE(zap_private && !is_private);
+ if (!zap_private && is_private)
return false;

/*
@@ -995,12 +1005,56 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;

+ if (is_private && kvm_gfn_shared_mask(kvm) &&
+ is_large_pte(iter.old_spte)) {
+ gfn_t gfn = iter.gfn & ~kvm_gfn_shared_mask(kvm);
+ gfn_t mask = KVM_PAGES_PER_HPAGE(iter.level) - 1;
+ struct kvm_memory_slot *slot;
+ struct kvm_mmu_page *sp;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_hugepage_test_mixed(slot, gfn, iter.level) ||
+ (gfn & mask) < start ||
+ end < (gfn & mask) + KVM_PAGES_PER_HPAGE(iter.level)) {
+ WARN_ON_ONCE(!can_yield);
+ if (split_sp) {
+ sp = split_sp;
+ split_sp = NULL;
+ sp->role = tdp_iter_child_role(&iter);
+ } else {
+ WARN_ON(iter.yielded);
+ if (flush && can_yield) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+ sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, false);
+ if (iter.yielded) {
+ split_sp = sp;
+ continue;
+ }
+ }
+ KVM_BUG_ON(!sp, kvm);
+
+ tdp_mmu_init_sp(sp, iter.sptep, iter.gfn);
+ if (tdp_mmu_split_huge_page(kvm, &iter, sp, false)) {
+ /* force retry on this gfn. */
+ iter.yielded = true;
+ split_sp = sp;
+ } else
+ flush = true;
+ continue;
+ }
+ }
+
tdp_mmu_iter_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
flush = true;
}

rcu_read_unlock();

+ if (split_sp)
+ tdp_mmu_free_sp(split_sp);
+
/*
* Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
* to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
@@ -1617,8 +1671,6 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
kvm_lockdep_assert_mmu_lock_held(kvm, shared);
KVM_BUG_ON(kvm_mmu_page_role_is_private(role) !=
is_private_sptep(iter->sptep), kvm);
- /* TODO: Large page isn't supported for private SPTE yet. */
- KVM_BUG_ON(kvm_mmu_page_role_is_private(role), kvm);

/*
* Since we are allocating while under the MMU lock we have to be
--
2.25.1