Every time kvm_mmu_get_page() is called with a non-NULL parent_pte
argument, link_shadow_page() follows that to set the parent entry so
that the new mapping will point to the returned page table.
Moving parent_pte handling there allows to clean up the code because
parent_pte is passed to kvm_mmu_get_page() just for mark_unsync() and
mmu_page_add_parent_pte().
Signed-off-by: Takuya Yoshikawa <yoshikawa_takuya_b1@xxxxxxxxxxxxx>
---
arch/x86/kvm/mmu.c | 21 ++++++++-------------
arch/x86/kvm/paging_tmpl.h | 6 ++----
2 files changed, 10 insertions(+), 17 deletions(-)
diff --git a/arch/x86/kvm/mmu.c b/arch/x86/kvm/mmu.c
index 9273cd4..33fe720 100644
--- a/arch/x86/kvm/mmu.c
+++ b/arch/x86/kvm/mmu.c
@@ -2108,14 +2108,9 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
if (sp->unsync_children) {
kvm_make_request(KVM_REQ_MMU_SYNC, vcpu);
kvm_mmu_mark_parents_unsync(sp);
- if (parent_pte)
- mark_unsync(parent_pte);
} else if (sp->unsync) {
kvm_mmu_mark_parents_unsync(sp);
- if (parent_pte)
- mark_unsync(parent_pte);
}
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
__clear_sp_write_flooding_count(sp);
trace_kvm_mmu_get_page(sp, false);
@@ -2127,7 +2122,6 @@ static struct kvm_mmu_page *kvm_mmu_get_page(struct kvm_vcpu *vcpu,
sp = kvm_mmu_alloc_page(vcpu, direct);
sp->parent_ptes.val = 0;
- mmu_page_add_parent_pte(vcpu, sp, parent_pte);
sp->gfn = gfn;
sp->role = role;
@@ -2196,7 +2190,8 @@ static void shadow_walk_next(struct kvm_shadow_walk_iterator *iterator)
return __shadow_walk_next(iterator, *iterator->sptep);
}
-static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
+static void link_shadow_page(struct kvm_vcpu *vcpu, u64 *sptep,
+ struct kvm_mmu_page *sp, bool accessed)
{
u64 spte;
@@ -2210,6 +2205,11 @@ static void link_shadow_page(u64 *sptep, struct kvm_mmu_page *sp, bool accessed)
spte |= shadow_accessed_mask;
mmu_spte_set(sptep, spte);
+
+ if (sp->unsync_children || sp->unsync)
+ mark_unsync(sptep);