[RFC PATCH v2 10/15] KVM: x86/tdp_mmu: Split the large page when zap leaf

From: isaku . yamahata
Date: Thu Dec 08 2022 - 18:36:37 EST


From: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>

When TDX enabled, a large page cannot be zapped if it contains mixed
pages. In this case, it has to split the large page.

Signed-off-by: Xiaoyao Li <xiaoyao.li@xxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 9 +++++
arch/x86/kvm/mmu/mmu_internal.h | 2 ++
arch/x86/kvm/mmu/tdp_mmu.c | 62 +++++++++++++++++++++++++++++++--
3 files changed, 71 insertions(+), 2 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 02adc3c23627..7f56b1dd76fa 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -7318,6 +7318,15 @@ static bool linfo_is_mixed(struct kvm_lpage_info *linfo)
return linfo->disallow_lpage & KVM_LPAGE_PRIVATE_SHARED_MIXED;
}

+bool kvm_mem_attr_is_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level)
+{
+ struct kvm_lpage_info *linfo = lpage_info_slot(gfn & KVM_HPAGE_MASK(level),
+ slot, level);
+
+ WARN_ON_ONCE(level == PG_LEVEL_4K);
+ return linfo_is_mixed(linfo);
+}
+
static void linfo_set_mixed(gfn_t gfn, struct kvm_memory_slot *slot,
int level, bool mixed)
{
diff --git a/arch/x86/kvm/mmu/mmu_internal.h b/arch/x86/kvm/mmu/mmu_internal.h
index 641afc4e90cb..2b7c16dfdf5e 100644
--- a/arch/x86/kvm/mmu/mmu_internal.h
+++ b/arch/x86/kvm/mmu/mmu_internal.h
@@ -435,6 +435,8 @@ void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc);
void track_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);
void untrack_possible_nx_huge_page(struct kvm *kvm, struct kvm_mmu_page *sp);

+bool kvm_mem_attr_is_mixed(struct kvm_memory_slot *slot, gfn_t gfn, int level);
+
#ifndef CONFIG_HAVE_KVM_RESTRICTED_MEM
static inline int kvm_restricted_mem_get_pfn(struct kvm_memory_slot *slot,
gfn_t gfn, kvm_pfn_t *pfn, int *order)
diff --git a/arch/x86/kvm/mmu/tdp_mmu.c b/arch/x86/kvm/mmu/tdp_mmu.c
index cb36089a40da..e9af8c95a3ae 100644
--- a/arch/x86/kvm/mmu/tdp_mmu.c
+++ b/arch/x86/kvm/mmu/tdp_mmu.c
@@ -1102,6 +1102,14 @@ bool kvm_tdp_mmu_zap_sp(struct kvm *kvm, struct kvm_mmu_page *sp)
return true;
}

+
+static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,
+ struct tdp_iter *iter,
+ bool shared);
+
+static int tdp_mmu_split_huge_page(struct kvm *kvm, struct tdp_iter *iter,
+ struct kvm_mmu_page *sp, bool shared);
+
/*
* If can_yield is true, will release the MMU lock and reschedule if the
* scheduler needs the CPU or there is contention on the MMU lock. If this
@@ -1113,6 +1121,7 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
gfn_t start, gfn_t end, bool can_yield, bool flush,
bool zap_private)
{
+ struct kvm_mmu_page *split_sp = NULL;
struct tdp_iter iter;

end = min(end, tdp_mmu_max_gfn_exclusive());
@@ -1144,12 +1153,63 @@ static bool tdp_mmu_zap_leafs(struct kvm *kvm, struct kvm_mmu_page *root,
!is_last_spte(iter.old_spte, iter.level))
continue;

+ if (kvm_gfn_shared_mask(kvm) && is_large_pte(iter.old_spte)) {
+ gfn_t gfn = iter.gfn & ~kvm_gfn_shared_mask(kvm);
+ gfn_t mask = KVM_PAGES_PER_HPAGE(iter.level) - 1;
+ struct kvm_memory_slot *slot;
+ struct kvm_mmu_page *sp;
+
+ slot = gfn_to_memslot(kvm, gfn);
+ if (kvm_mem_attr_is_mixed(slot, gfn, iter.level) ||
+ (gfn & mask) < start ||
+ end < (gfn & mask) + KVM_PAGES_PER_HPAGE(iter.level)) {
+ WARN_ON_ONCE(!can_yield);
+ if (split_sp) {
+ sp = split_sp;
+ split_sp = NULL;
+ } else {
+ WARN_ON(iter.yielded);
+ if (flush) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+ sp = tdp_mmu_alloc_sp_for_split(kvm, &iter, false);
+ if (iter.yielded) {
+ split_sp = sp;
+ continue;
+ }
+ }
+ KVM_BUG_ON(!sp, kvm);
+
+ if (tdp_mmu_split_huge_page(kvm, &iter, sp, false)) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ /* force retry on this gfn. */
+ iter.yielded = true;
+ } else
+ flush = true;
+ continue;
+ }
+ }
+
tdp_mmu_set_spte(kvm, &iter, SHADOW_NONPRESENT_VALUE);
flush = true;
}

rcu_read_unlock();

+ if (split_sp) {
+ WARN_ON(!can_yield);
+ if (flush) {
+ kvm_flush_remote_tlbs(kvm);
+ flush = false;
+ }
+
+ write_unlock(&kvm->mmu_lock);
+ tdp_mmu_free_sp(split_sp);
+ write_lock(&kvm->mmu_lock);
+ }
+
/*
* Because this flow zaps _only_ leaf SPTEs, the caller doesn't need
* to provide RCU protection as no 'struct kvm_mmu_page' will be freed.
@@ -1691,8 +1751,6 @@ static struct kvm_mmu_page *tdp_mmu_alloc_sp_for_split(struct kvm *kvm,

KVM_BUG_ON(kvm_mmu_page_role_is_private(role) !=
is_private_sptep(iter->sptep), kvm);
- /* TODO: Large page isn't supported for private SPTE yet. */
- KVM_BUG_ON(kvm_mmu_page_role_is_private(role), kvm);

/*
* Since we are allocating while under the MMU lock we have to be
--
2.25.1