[PATCH v3 08/11] powerpc/kvm/book3s_hv: Applies counting method to monitor lockless pgtbl walks
From: Leonardo Bras
Date: Tue Sep 24 2019 - 17:26:15 EST
Applies the counting-based method for monitoring all book3s_hv related
functions that do lockless pagetable walks.
Signed-off-by: Leonardo Bras <leonardo@xxxxxxxxxxxxx>
---
arch/powerpc/kvm/book3s_hv_nested.c | 8 ++++++++
arch/powerpc/kvm/book3s_hv_rm_mmu.c | 9 ++++++++-
2 files changed, 16 insertions(+), 1 deletion(-)
diff --git a/arch/powerpc/kvm/book3s_hv_nested.c b/arch/powerpc/kvm/book3s_hv_nested.c
index 735e0ac6f5b2..ed68e57af3a3 100644
--- a/arch/powerpc/kvm/book3s_hv_nested.c
+++ b/arch/powerpc/kvm/book3s_hv_nested.c
@@ -804,6 +804,7 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
return;
/* Find the pte */
+ start_lockless_pgtbl_walk(kvm->mm);
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
/*
* If the pte is present and the pfn is still the same, update the pte.
@@ -815,6 +816,7 @@ static void kvmhv_update_nest_rmap_rc(struct kvm *kvm, u64 n_rmap,
__radix_pte_update(ptep, clr, set);
kvmppc_radix_tlbie_page(kvm, gpa, shift, lpid);
}
+ end_lockless_pgtbl_walk(kvm->mm);
}
/*
@@ -854,10 +856,12 @@ static void kvmhv_remove_nest_rmap(struct kvm *kvm, u64 n_rmap,
return;
/* Find and invalidate the pte */
+ start_lockless_pgtbl_walk(kvm->mm);
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
/* Don't spuriously invalidate ptes if the pfn has changed */
if (ptep && pte_present(*ptep) && ((pte_val(*ptep) & mask) == hpa))
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
+ end_lockless_pgtbl_walk(kvm->mm);
}
static void kvmhv_remove_nest_rmap_list(struct kvm *kvm, unsigned long *rmapp,
@@ -921,6 +925,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
int shift;
spin_lock(&kvm->mmu_lock);
+ start_lockless_pgtbl_walk(kvm->mm);
ptep = __find_linux_pte(gp->shadow_pgtable, gpa, NULL, &shift);
if (!shift)
shift = PAGE_SHIFT;
@@ -928,6 +933,7 @@ static bool kvmhv_invalidate_shadow_pte(struct kvm_vcpu *vcpu,
kvmppc_unmap_pte(kvm, ptep, gpa, shift, NULL, gp->shadow_lpid);
ret = true;
}
+ end_lockless_pgtbl_walk(kvm->mm);
spin_unlock(&kvm->mmu_lock);
if (shift_ret)
@@ -1362,11 +1368,13 @@ static long int __kvmhv_nested_page_fault(struct kvm_run *run,
/* See if can find translation in our partition scoped tables for L1 */
pte = __pte(0);
spin_lock(&kvm->mmu_lock);
+ start_lockless_pgtbl_walk(kvm->mm);
pte_p = __find_linux_pte(kvm->arch.pgtable, gpa, NULL, &shift);
if (!shift)
shift = PAGE_SHIFT;
if (pte_p)
pte = *pte_p;
+ end_lockless_pgtbl_walk(kvm->mm);
spin_unlock(&kvm->mmu_lock);
if (!pte_present(pte) || (writing && !(pte_val(pte) & _PAGE_WRITE))) {
diff --git a/arch/powerpc/kvm/book3s_hv_rm_mmu.c b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
index 63e0ce91e29d..53ca67492211 100644
--- a/arch/powerpc/kvm/book3s_hv_rm_mmu.c
+++ b/arch/powerpc/kvm/book3s_hv_rm_mmu.c
@@ -258,6 +258,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
* If called in real mode we have MSR_EE = 0. Otherwise
* we disable irq above.
*/
+ start_lockless_pgtbl_walk(kvm->mm);
ptep = __find_linux_pte(pgdir, hva, NULL, &hpage_shift);
if (ptep) {
pte_t pte;
@@ -311,6 +312,7 @@ long kvmppc_do_h_enter(struct kvm *kvm, unsigned long flags,
ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
ptel |= HPTE_R_M;
}
+ end_lockless_pgtbl_walk(kvm->mm);
/* Find and lock the HPTEG slot to use */
do_insert:
@@ -886,10 +888,15 @@ static int kvmppc_get_hpa(struct kvm_vcpu *vcpu, unsigned long gpa,
hva = __gfn_to_hva_memslot(memslot, gfn);
/* Try to find the host pte for that virtual address */
+ start_lockless_pgtbl_walk(kvm->mm);
ptep = __find_linux_pte(vcpu->arch.pgdir, hva, NULL, &shift);
- if (!ptep)
+ if (!ptep) {
+ end_lockless_pgtbl_walk(kvm->mm);
return H_TOO_HARD;
+ }
pte = kvmppc_read_update_linux_pte(ptep, writing);
+ end_lockless_pgtbl_walk(kvm->mm);
+
if (!pte_present(pte))
return H_TOO_HARD;
--
2.20.1