[PATCH 1/2] KVM: x86/mmu: Separate "page vec is full" from adding a page to the array

From: Sean Christopherson
Date: Thu Jul 21 2022 - 11:38:35 EST


Move the check for a full "page vector" out of mmu_pages_add(), returning
true/false (effectively) looks a _lot_ like returning success/fail, which
is very misleading and will even be more misleading when a future patch
clears the unsync child bit upon a page being added to the vector (as
opposed to clearing the bit when the vector is processed by the caller).

Checking that the vector is full when adding a previous page is also
sub-optimal, e.g. KVM unnecessarily returns an error if the vector is
full but there are no more unsync pages to process. Separating the check
from the "add" will allow fixing this quirk in a future patch.

No functional change intended.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/x86/kvm/mmu/mmu.c | 24 +++++++++++++++++-------
1 file changed, 17 insertions(+), 7 deletions(-)

diff --git a/arch/x86/kvm/mmu/mmu.c b/arch/x86/kvm/mmu/mmu.c
index 52664c3caaab..ac60a52044ef 100644
--- a/arch/x86/kvm/mmu/mmu.c
+++ b/arch/x86/kvm/mmu/mmu.c
@@ -1741,20 +1741,26 @@ struct kvm_mmu_pages {
unsigned int nr;
};

-static int mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
+static bool mmu_is_page_vec_full(struct kvm_mmu_pages *pvec)
+{
+ return (pvec->nr == KVM_PAGE_ARRAY_NR);
+}
+
+static void mmu_pages_add(struct kvm_mmu_pages *pvec, struct kvm_mmu_page *sp,
int idx)
{
int i;

- if (sp->unsync)
- for (i=0; i < pvec->nr; i++)
+ if (sp->unsync) {
+ for (i = 0; i < pvec->nr; i++) {
if (pvec->page[i].sp == sp)
- return 0;
+ return;
+ }
+ }

pvec->page[pvec->nr].sp = sp;
pvec->page[pvec->nr].idx = idx;
pvec->nr++;
- return (pvec->nr == KVM_PAGE_ARRAY_NR);
}

static inline void clear_unsync_child_bit(struct kvm_mmu_page *sp, int idx)
@@ -1781,7 +1787,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
child = to_shadow_page(ent & SPTE_BASE_ADDR_MASK);

if (child->unsync_children) {
- if (mmu_pages_add(pvec, child, i))
+ mmu_pages_add(pvec, child, i);
+
+ if (mmu_is_page_vec_full(pvec))
return -ENOSPC;

ret = __mmu_unsync_walk(child, pvec);
@@ -1794,7 +1802,9 @@ static int __mmu_unsync_walk(struct kvm_mmu_page *sp,
return ret;
} else if (child->unsync) {
nr_unsync_leaf++;
- if (mmu_pages_add(pvec, child, i))
+ mmu_pages_add(pvec, child, i);
+
+ if (mmu_is_page_vec_full(pvec))
return -ENOSPC;
} else
clear_unsync_child_bit(sp, i);
--
2.37.1.359.gd136c6c3e2-goog


--JZkyYl2Tfi/vytoa
Content-Type: text/x-diff; charset=us-ascii
Content-Disposition: attachment;
filename="0002-KVM-x86-mmu-Check-for-full-page-vector-_before_-addi.patch"