[PATCH RFC 07/13] mm/rmap: remove CONFIG_PAGE_MAPCOUNT
From: David Hildenbrand (Arm)
Date: Sun Apr 12 2026 - 15:04:31 EST
page->mapcount is still updated but essentially unused. So let's
remove CONFIG_PAGE_MAPCOUNT. Given that CONFIG_NO_PAGE_MAPCOUNT is the
only remaining variant, that Kconfig can go as well.
We can replace some instances of "orig_nr_pages" by the "nr_pages" as
the latter is no longer modified.
Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>
---
Documentation/mm/transhuge.rst | 3 ---
include/linux/rmap.h | 11 +----------
mm/Kconfig | 17 -----------------
mm/rmap.c | 36 ++++++------------------------------
4 files changed, 7 insertions(+), 60 deletions(-)
diff --git a/Documentation/mm/transhuge.rst b/Documentation/mm/transhuge.rst
index f200c1ac19cb..eb5ac076e4c6 100644
--- a/Documentation/mm/transhuge.rst
+++ b/Documentation/mm/transhuge.rst
@@ -129,9 +129,6 @@ pages:
corresponding mapcount), and the current status ("maybe mapped shared" vs.
"mapped exclusively").
- With CONFIG_PAGE_MAPCOUNT, we also increment/decrement
- page->_mapcount.
-
split_huge_page internally has to distribute the refcounts in the head
page to the tail pages before clearing all PG_head/tail bits from the page
structures. It can be done easily for refcounts taken by page table
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index e5569f5fdaec..4894e43e5f52 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -493,8 +493,6 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
struct page *page, int nr_pages, struct vm_area_struct *dst_vma,
enum pgtable_level level)
{
- const int orig_nr_pages = nr_pages;
-
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
switch (level) {
@@ -504,12 +502,7 @@ static __always_inline void __folio_dup_file_rmap(struct folio *folio,
break;
}
- if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
- do {
- atomic_inc(&page->_mapcount);
- } while (page++, --nr_pages > 0);
- }
- folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
+ folio_add_large_mapcount(folio, nr_pages, dst_vma);
break;
case PGTABLE_LEVEL_PMD:
case PGTABLE_LEVEL_PUD:
@@ -608,8 +601,6 @@ static __always_inline int __folio_try_dup_anon_rmap(struct folio *folio,
do {
if (PageAnonExclusive(page))
ClearPageAnonExclusive(page);
- if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
- atomic_inc(&page->_mapcount);
} while (page++, --nr_pages > 0);
folio_add_large_mapcount(folio, orig_nr_pages, dst_vma);
break;
diff --git a/mm/Kconfig b/mm/Kconfig
index bd283958d675..576db4fdf16e 100644
--- a/mm/Kconfig
+++ b/mm/Kconfig
@@ -948,25 +948,8 @@ config READ_ONLY_THP_FOR_FS
support of file THPs will be developed in the next few release
cycles.
-config NO_PAGE_MAPCOUNT
- bool "No per-page mapcount (EXPERIMENTAL)"
- help
- Do not maintain per-page mapcounts for pages part of larger
- allocations, such as transparent huge pages.
-
- When this config option is enabled, some interfaces that relied on
- this information will rely on less-precise per-allocation information
- instead: for example, using the average per-page mapcount in such
- a large allocation instead of the per-page mapcount.
-
- EXPERIMENTAL because the impact of some changes is still unclear.
-
endif # TRANSPARENT_HUGEPAGE
-# simple helper to make the code a bit easier to read
-config PAGE_MAPCOUNT
- def_bool !NO_PAGE_MAPCOUNT
-
#
# The architecture supports pgtable leaves that is larger than PAGE_SIZE
#
diff --git a/mm/rmap.c b/mm/rmap.c
index df42c38fe387..27488183448b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -1354,7 +1354,6 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
enum pgtable_level level)
{
int nr = 0, nr_pmdmapped = 0, mapcount;
- const int orig_nr_pages = nr_pages;
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
@@ -1365,14 +1364,8 @@ static __always_inline void __folio_add_rmap(struct folio *folio,
break;
}
- if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
- do {
- atomic_inc(&page->_mapcount);
- } while (page++, --nr_pages > 0);
- }
-
- mapcount = folio_add_return_large_mapcount(folio, orig_nr_pages, vma);
- if (mapcount == orig_nr_pages)
+ mapcount = folio_add_return_large_mapcount(folio, nr_pages, vma);
+ if (mapcount == nr_pages)
nr = folio_large_nr_pages(folio);
break;
case PGTABLE_LEVEL_PMD:
@@ -1518,15 +1511,6 @@ static __always_inline void __folio_add_anon_rmap(struct folio *folio,
VM_WARN_ON_FOLIO(folio_test_large(folio) &&
folio_entire_mapcount(folio) > 1 &&
PageAnonExclusive(cur_page), folio);
- if (IS_ENABLED(CONFIG_NO_PAGE_MAPCOUNT))
- continue;
-
- /*
- * While PTE-mapping a THP we have a PMD and a PTE
- * mapping.
- */
- VM_WARN_ON_FOLIO(atomic_read(&cur_page->_mapcount) > 0 &&
- PageAnonExclusive(cur_page), folio);
}
/*
@@ -1628,14 +1612,12 @@ void folio_add_new_anon_rmap(struct folio *folio, struct vm_area_struct *vma,
int i;
nr = folio_large_nr_pages(folio);
- for (i = 0; i < nr; i++) {
- struct page *page = folio_page(folio, i);
+ if (exclusive) {
+ for (i = 0; i < nr; i++) {
+ struct page *page = folio_page(folio, i);
- if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT))
- /* increment count (starts at -1) */
- atomic_set(&page->_mapcount, 0);
- if (exclusive)
SetPageAnonExclusive(page);
+ }
}
folio_set_large_mapcount(folio, nr, vma);
@@ -1769,12 +1751,6 @@ static __always_inline void __folio_remove_rmap(struct folio *folio,
if (!mapcount)
nr = folio_large_nr_pages(folio);
- if (IS_ENABLED(CONFIG_PAGE_MAPCOUNT)) {
- do {
- atomic_dec(&page->_mapcount);
- } while (page++, --nr_pages > 0);
- }
-
partially_mapped = __folio_certainly_partially_mapped(folio, mapcount);
break;
case PGTABLE_LEVEL_PMD:
--
2.43.0