[PATCH v12 58/84] KVM: RISC-V: Use kvm_faultin_pfn() when mapping pfns into the guest

From: Sean Christopherson
Date: Fri Jul 26 2024 - 20:13:16 EST


Convert RISC-V to __kvm_faultin_pfn()+kvm_release_faultin_page(), which
are new APIs to consolidate arch code and provide consistent behavior
across all KVM architectures.

Signed-off-by: Sean Christopherson <seanjc@xxxxxxxxxx>
---
arch/riscv/kvm/mmu.c | 11 ++++-------
1 file changed, 4 insertions(+), 7 deletions(-)

diff --git a/arch/riscv/kvm/mmu.c b/arch/riscv/kvm/mmu.c
index 806f68e70642..f73d6a79a78c 100644
--- a/arch/riscv/kvm/mmu.c
+++ b/arch/riscv/kvm/mmu.c
@@ -601,6 +601,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
bool logging = (memslot->dirty_bitmap &&
!(memslot->flags & KVM_MEM_READONLY)) ? true : false;
unsigned long vma_pagesize, mmu_seq;
+ struct page *page;

/* We need minimum second+third level pages */
ret = kvm_mmu_topup_memory_cache(pcache, gstage_pgd_levels);
@@ -631,7 +632,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,

/*
* Read mmu_invalidate_seq so that KVM can detect if the results of
- * vma_lookup() or gfn_to_pfn_prot() become stale priort to acquiring
+ * vma_lookup() or __kvm_faultin_pfn() become stale priort to acquiring
* kvm->mmu_lock.
*
* Rely on mmap_read_unlock() for an implicit smp_rmb(), which pairs
@@ -647,7 +648,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
return -EFAULT;
}

- hfn = gfn_to_pfn_prot(kvm, gfn, is_write, &writable);
+ hfn = kvm_faultin_pfn(vcpu, gfn, is_write, &writable, &page);
if (hfn == KVM_PFN_ERR_HWPOISON) {
send_sig_mceerr(BUS_MCEERR_AR, (void __user *)hva,
vma_pageshift, current);
@@ -681,11 +682,7 @@ int kvm_riscv_gstage_map(struct kvm_vcpu *vcpu,
kvm_err("Failed to map in G-stage\n");

out_unlock:
- if ((!ret || ret == -EEXIST) && writable)
- kvm_set_pfn_dirty(hfn);
- else
- kvm_release_pfn_clean(hfn);
-
+ kvm_release_faultin_page(kvm, page, ret && ret != -EEXIST, writable);
spin_unlock(&kvm->mmu_lock);
return ret;
}
--
2.46.0.rc1.232.g9752f9e123-goog