[PATCH v1 11/16] mm/memory: inline unmap_page_range() into __zap_vma_range()
From: David Hildenbrand (Arm)
Date: Fri Feb 27 2026 - 15:20:27 EST
Let's inline it into the single caller to reduce the number of confusing
unmap/zap helpers.
Get rid of the unnecessary BUG_ON().
Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
---
mm/memory.c | 32 ++++++++++++--------------------
1 file changed, 12 insertions(+), 20 deletions(-)
diff --git a/mm/memory.c b/mm/memory.c
index 394b2e931974..1c0bcdfc73b7 100644
--- a/mm/memory.c
+++ b/mm/memory.c
@@ -2056,25 +2056,6 @@ static inline unsigned long zap_p4d_range(struct mmu_gather *tlb,
return addr;
}
-static void unmap_page_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
- unsigned long addr, unsigned long end,
- struct zap_details *details)
-{
- pgd_t *pgd;
- unsigned long next;
-
- BUG_ON(addr >= end);
- tlb_start_vma(tlb, vma);
- pgd = pgd_offset(vma->vm_mm, addr);
- do {
- next = pgd_addr_end(addr, end);
- if (pgd_none_or_clear_bad(pgd))
- continue;
- next = zap_p4d_range(tlb, vma, pgd, addr, next, details);
- } while (pgd++, addr = next, addr != end);
- tlb_end_vma(tlb, vma);
-}
-
static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
unsigned long start, unsigned long end,
struct zap_details *details)
@@ -2100,7 +2081,18 @@ static void __zap_vma_range(struct mmu_gather *tlb, struct vm_area_struct *vma,
return;
__unmap_hugepage_range(tlb, vma, start, end, NULL, zap_flags);
} else {
- unmap_page_range(tlb, vma, start, end, details);
+ unsigned long next, cur = start;
+ pgd_t *pgd;
+
+ tlb_start_vma(tlb, vma);
+ pgd = pgd_offset(vma->vm_mm, cur);
+ do {
+ next = pgd_addr_end(cur, end);
+ if (pgd_none_or_clear_bad(pgd))
+ continue;
+ next = zap_p4d_range(tlb, vma, pgd, cur, next, details);
+ } while (pgd++, cur = next, cur != end);
+ tlb_end_vma(tlb, vma);
}
}
--
2.43.0