[PATCH v6 3/4] KVM: arm64/mmu: use gfn_to_pfn_noref

From: David Stevens
Date: Thu Mar 30 2023 - 04:58:44 EST


From: David Stevens <stevensd@xxxxxxxxxxxx>

Switch the arm64 mmu to the new gfn_to_pfn_noref functions. This allows
IO and PFNMAP mappings backed with valid struct pages but without
refcounting (e.g. tail pages of non-compound higher order allocations)
to be mapped into the guest.

Signed-off-by: David Stevens <stevensd@xxxxxxxxxxxx>
---
arch/arm64/kvm/mmu.c | 21 ++++++++++++---------
1 file changed, 12 insertions(+), 9 deletions(-)

diff --git a/arch/arm64/kvm/mmu.c b/arch/arm64/kvm/mmu.c
index 7113587222ff..0fd726e82a19 100644
--- a/arch/arm64/kvm/mmu.c
+++ b/arch/arm64/kvm/mmu.c
@@ -1082,7 +1082,7 @@ static bool fault_supports_stage2_huge_mapping(struct kvm_memory_slot *memslot,
static unsigned long
transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
unsigned long hva, kvm_pfn_t *pfnp,
- phys_addr_t *ipap)
+ struct page **page, phys_addr_t *ipap)
{
kvm_pfn_t pfn = *pfnp;

@@ -1091,7 +1091,8 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* sure that the HVA and IPA are sufficiently aligned and that the
* block map is contained within the memslot.
*/
- if (fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
+ if (*page &&
+ fault_supports_stage2_huge_mapping(memslot, hva, PMD_SIZE) &&
get_user_mapping_size(kvm, hva) >= PMD_SIZE) {
/*
* The address we faulted on is backed by a transparent huge
@@ -1112,10 +1113,11 @@ transparent_hugepage_adjust(struct kvm *kvm, struct kvm_memory_slot *memslot,
* page accordingly.
*/
*ipap &= PMD_MASK;
- kvm_release_pfn_clean(pfn);
+ kvm_release_page_clean(*page);
pfn &= ~(PTRS_PER_PMD - 1);
- get_page(pfn_to_page(pfn));
*pfnp = pfn;
+ *page = pfn_to_page(pfn);
+ get_page(*page);

return PMD_SIZE;
}
@@ -1201,6 +1203,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
short vma_shift;
gfn_t gfn;
kvm_pfn_t pfn;
+ struct page *page;
bool logging_active = memslot_is_logging(memslot);
unsigned long fault_level = kvm_vcpu_trap_get_fault_level(vcpu);
unsigned long vma_pagesize, fault_granule;
@@ -1301,8 +1304,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
*/
smp_rmb();

- pfn = __gfn_to_pfn_memslot(memslot, gfn, false, false, NULL,
- write_fault, &writable, NULL);
+ pfn = __gfn_to_pfn_noref_memslot(memslot, gfn, false, false, NULL,
+ write_fault, &writable, NULL, &page);
if (pfn == KVM_PFN_ERR_HWPOISON) {
kvm_send_hwpoison_signal(hva, vma_shift);
return 0;
@@ -1348,7 +1351,8 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
vma_pagesize = fault_granule;
else
vma_pagesize = transparent_hugepage_adjust(kvm, memslot,
- hva, &pfn,
+ hva,
+ &pfn, &page,
&fault_ipa);
}

@@ -1395,8 +1399,7 @@ static int user_mem_abort(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,

out_unlock:
read_unlock(&kvm->mmu_lock);
- kvm_set_pfn_accessed(pfn);
- kvm_release_pfn_clean(pfn);
+ kvm_release_pfn_noref_clean(pfn, page);
return ret != -EAGAIN ? ret : 0;
}

--
2.40.0.348.gf938b09366-goog