[PATCH v2 1/2] mm/vmalloc: extract vmalloc_free_pages() helper from vfree()

From: Shivam Kalra via B4 Relay

Date: Wed Mar 04 2026 - 09:56:27 EST


From: Shivam Kalra <shivamkalra98@xxxxxxxxxxx>

Extract the page-freeing loop and NR_VMALLOC stat accounting from
vfree() into a reusable vmalloc_free_pages() helper. The helper operates
on a range [start, end) of pages from a vm_struct, making it suitable
for both full free (vfree) and partial free (upcoming vrealloc shrink).

No functional change.

Signed-off-by: Shivam Kalra <shivamkalra98@xxxxxxxxxxx>
---
mm/vmalloc.c | 42 ++++++++++++++++++++++++++++--------------
1 file changed, 28 insertions(+), 14 deletions(-)

diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index c607307c657a..e2aef0a79f2e 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -3416,6 +3416,32 @@ void vfree_atomic(const void *addr)
schedule_work(&p->wq);
}

+/*
+ * vmalloc_free_pages - free a range of pages from a vmalloc allocation
+ * @vm: the vm_struct containing the pages
+ * @start: first page index to free (inclusive)
+ * @end: last page index to free (exclusive)
+ *
+ * Free pages [start, end) updating NR_VMALLOC stat accounting.
+ * Caller is responsible for unmapping (vunmap_range) and KASAN
+ * poisoning before calling this.
+ */
+static void vmalloc_free_pages(struct vm_struct *vm, unsigned int start,
+ unsigned int end)
+{
+ unsigned int i;
+
+ for (i = start; i < end; i++) {
+ struct page *page = vm->pages[i];
+
+ BUG_ON(!page);
+ if (!(vm->flags & VM_MAP_PUT_PAGES))
+ mod_lruvec_page_state(page, NR_VMALLOC, -1);
+ __free_page(page);
+ cond_resched();
+ }
+}
+
/**
* vfree - Release memory allocated by vmalloc()
* @addr: Memory base address
@@ -3436,7 +3462,6 @@ void vfree_atomic(const void *addr)
void vfree(const void *addr)
{
struct vm_struct *vm;
- int i;

if (unlikely(in_interrupt())) {
vfree_atomic(addr);
@@ -3459,19 +3484,8 @@ void vfree(const void *addr)

if (unlikely(vm->flags & VM_FLUSH_RESET_PERMS))
vm_reset_perms(vm);
- for (i = 0; i < vm->nr_pages; i++) {
- struct page *page = vm->pages[i];
-
- BUG_ON(!page);
- /*
- * High-order allocs for huge vmallocs are split, so
- * can be freed as an array of order-0 allocations
- */
- if (!(vm->flags & VM_MAP_PUT_PAGES))
- mod_lruvec_page_state(page, NR_VMALLOC, -1);
- __free_page(page);
- cond_resched();
- }
+ if (vm->nr_pages)
+ vmalloc_free_pages(vm, 0, vm->nr_pages);
kvfree(vm->pages);
kfree(vm);
}

--
2.43.0