[PATCH v2 3/6] mm/sparse-vmemmap: Fix DAX vmemmap accounting with optimization
From: Muchun Song
Date: Wed Apr 15 2026 - 07:20:03 EST
When vmemmap optimization is enabled for DAX, the nr_memmap_pages
counter in /proc/vmstat is incorrect. The current code always accounts
for the full, non-optimized vmemmap size, but vmemmap optimization
reduces the actual number of vmemmap pages by reusing tail pages. This
causes the system to overcount vmemmap usage, leading to inaccurate
page statistics in /proc/vmstat.
Fix this by introducing section_vmemmap_pages(), which returns the exact
vmemmap page count for a given pfn range based on whether optimization
is in effect.
Fixes: 15995a352474 ("mm: report per-page metadata information")
Signed-off-by: Muchun Song <songmuchun@xxxxxxxxxxxxx>
---
mm/sparse-vmemmap.c | 32 ++++++++++++++++++++++++++++----
1 file changed, 28 insertions(+), 4 deletions(-)
diff --git a/mm/sparse-vmemmap.c b/mm/sparse-vmemmap.c
index 40290fbc1db4..05e3e2b94e32 100644
--- a/mm/sparse-vmemmap.c
+++ b/mm/sparse-vmemmap.c
@@ -652,6 +652,29 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
}
}
+static int __meminit section_vmemmap_pages(unsigned long pfn, unsigned long nr_pages,
+ struct vmem_altmap *altmap,
+ struct dev_pagemap *pgmap)
+{
+ unsigned int order = pgmap ? pgmap->vmemmap_shift : 0;
+ unsigned long pages_per_compound = 1L << order;
+
+ VM_WARN_ON_ONCE(!IS_ALIGNED(pfn | nr_pages, min(pages_per_compound,
+ PAGES_PER_SECTION)));
+ VM_WARN_ON_ONCE(pfn_to_section_nr(pfn) != pfn_to_section_nr(pfn + nr_pages - 1));
+
+ if (!vmemmap_can_optimize(altmap, pgmap))
+ return DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE);
+
+ if (order < PFN_SECTION_SHIFT)
+ return VMEMMAP_RESERVE_NR * nr_pages / pages_per_compound;
+
+ if (IS_ALIGNED(pfn, pages_per_compound))
+ return VMEMMAP_RESERVE_NR;
+
+ return 0;
+}
+
static struct page * __meminit populate_section_memmap(unsigned long pfn,
unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
struct dev_pagemap *pgmap)
@@ -659,7 +682,7 @@ static struct page * __meminit populate_section_memmap(unsigned long pfn,
struct page *page = __populate_section_memmap(pfn, nr_pages, nid, altmap,
pgmap);
- memmap_pages_add(DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE));
+ memmap_pages_add(section_vmemmap_pages(pfn, nr_pages, altmap, pgmap));
return page;
}
@@ -670,7 +693,7 @@ static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
unsigned long start = (unsigned long) pfn_to_page(pfn);
unsigned long end = start + nr_pages * sizeof(struct page);
- memmap_pages_add(-1L * (DIV_ROUND_UP(nr_pages * sizeof(struct page), PAGE_SIZE)));
+ memmap_pages_add(-section_vmemmap_pages(pfn, nr_pages, altmap, pgmap));
vmemmap_free(start, end, altmap);
}
@@ -679,9 +702,10 @@ static void free_map_bootmem(struct page *memmap, struct vmem_altmap *altmap,
{
unsigned long start = (unsigned long)memmap;
unsigned long end = (unsigned long)(memmap + PAGES_PER_SECTION);
+ unsigned long pfn = page_to_pfn(memmap);
- memmap_boot_pages_add(-1L * (DIV_ROUND_UP(PAGES_PER_SECTION * sizeof(struct page),
- PAGE_SIZE)));
+ memmap_boot_pages_add(-section_vmemmap_pages(pfn, PAGES_PER_SECTION,
+ altmap, pgmap));
vmemmap_free(start, end, NULL);
}
--
2.20.1