[PATCH RFC 11/13] mm/rmap: stop using the entire mapcount for hugetlb folios

From: David Hildenbrand (Arm)

Date: Sun Apr 12 2026 - 15:06:19 EST


There is no real reason why hugetlb still updates the entire mapcount:
the value always corresponds to folio_mapcount().

As we want to change the semantics of the entire mapcount in a way
incompatible with hugetlb, let's just stop using the entire mapcount
for hugetlb folios entirely.

We only have to teach folio_average_page_mapcount() about the change.

Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
---
fs/proc/internal.h | 3 +++
include/linux/mm.h | 2 ++
include/linux/rmap.h | 3 ---
mm/debug.c | 2 +-
mm/rmap.c | 4 +---
5 files changed, 7 insertions(+), 7 deletions(-)

diff --git a/fs/proc/internal.h b/fs/proc/internal.h
index a5908167ce2d..1dd46e55c850 100644
--- a/fs/proc/internal.h
+++ b/fs/proc/internal.h
@@ -186,6 +186,9 @@ static inline int folio_average_page_mapcount(struct folio *folio)
mapcount = folio_large_mapcount(folio);
if (unlikely(mapcount <= 0))
return 0;
+ if (folio_test_hugetlb(folio))
+ return mapcount;
+
entire_mapcount = folio_entire_mapcount(folio);
if (mapcount <= entire_mapcount)
return entire_mapcount;
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 6dd906585420..3092db64a009 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -1829,6 +1829,8 @@ static inline int is_vmalloc_or_module_addr(const void *x)
* How many times the entire folio is mapped as a single unit (eg by a
* PMD or PUD entry). This is probably not what you want, except for
* debugging purposes or implementation of other core folio_*() primitives.
+ *
+ * Always 0 for hugetlb folios.
*/
static inline int folio_entire_mapcount(const struct folio *folio)
{
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 4894e43e5f52..b81b1d9e1eaa 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -443,7 +443,6 @@ static inline int hugetlb_try_dup_anon_rmap(struct folio *folio,
return -EBUSY;
ClearPageAnonExclusive(&folio->page);
}
- atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
return 0;
}
@@ -477,7 +476,6 @@ static inline void hugetlb_add_file_rmap(struct folio *folio)
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(folio_test_anon(folio), folio);

- atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
}

@@ -485,7 +483,6 @@ static inline void hugetlb_remove_rmap(struct folio *folio)
{
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);

- atomic_dec(&folio->_entire_mapcount);
atomic_dec(&folio->_large_mapcount);
}

diff --git a/mm/debug.c b/mm/debug.c
index 80e050bf29ba..82baaf87ef3d 100644
--- a/mm/debug.c
+++ b/mm/debug.c
@@ -86,7 +86,7 @@ static void __dump_folio(const struct folio *folio, const struct page *page,
pr_warn("head: order:%u mapcount:%d entire_mapcount:%d pincount:%d\n",
folio_order(folio),
folio_mapcount(folio),
- folio_entire_mapcount(folio),
+ folio_entire_mapcount(folio);
pincount);
}

diff --git a/mm/rmap.c b/mm/rmap.c
index 27488183448b..d08927949284 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -3042,11 +3042,10 @@ void hugetlb_add_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
VM_WARN_ON_FOLIO(!folio_test_hugetlb(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);

- atomic_inc(&folio->_entire_mapcount);
atomic_inc(&folio->_large_mapcount);
if (flags & RMAP_EXCLUSIVE)
SetPageAnonExclusive(&folio->page);
- VM_WARN_ON_FOLIO(folio_entire_mapcount(folio) > 1 &&
+ VM_WARN_ON_FOLIO(folio_large_mapcount(folio) > 1 &&
PageAnonExclusive(&folio->page), folio);
}

@@ -3057,7 +3056,6 @@ void hugetlb_add_new_anon_rmap(struct folio *folio,

BUG_ON(address < vma->vm_start || address >= vma->vm_end);
/* increment count (starts at -1) */
- atomic_set(&folio->_entire_mapcount, 0);
atomic_set(&folio->_large_mapcount, 0);
folio_clear_hugetlb_restore_reserve(folio);
__folio_set_anon(folio, vma, address, true);

--
2.43.0