[PATCH 8/9] mm/rmap: introduce folio_try_share_anon_rmap_ptes

From: Dev Jain

Date: Tue Mar 10 2026 - 03:38:28 EST


In the quest of enabling batched unmapping of anonymous folios, we need to
handle the sharing of exclusive pages. Hence, a batched version of
folio_try_share_anon_rmap_pte is required.

Currently, the sole purpose of nr_pages in __folio_try_share_anon_rmap is
to do some rmap sanity checks. Add helpers to set and clear the
PageAnonExclusive bit on a batch of nr_pages. Note that
__folio_try_share_anon_rmap can receive nr_pages == HPAGE_PMD_NR from the
PMD path, but currently we only clear the bit on the head page. Retain this
behaviour by setting nr_pages = 1 in case the caller is
folio_try_share_anon_rmap_pmd.

Signed-off-by: Dev Jain <dev.jain@xxxxxxx>
---
include/linux/page-flags.h | 11 +++++++++++
include/linux/rmap.h | 28 ++++++++++++++++++++++++++--
mm/rmap.c | 2 +-
3 files changed, 38 insertions(+), 3 deletions(-)

diff --git a/include/linux/page-flags.h b/include/linux/page-flags.h
index 0e03d816e8b9d..1d74ed9a28c41 100644
--- a/include/linux/page-flags.h
+++ b/include/linux/page-flags.h
@@ -1178,6 +1178,17 @@ static __always_inline void __ClearPageAnonExclusive(struct page *page)
__clear_bit(PG_anon_exclusive, &PF_ANY(page, 1)->flags.f);
}

+static __always_inline void ClearPagesAnonExclusive(struct page *page,
+ unsigned int nr)
+{
+ for (;;) {
+ ClearPageAnonExclusive(page);
+ if (--nr == 0)
+ break;
+ ++page;
+ }
+}
+
#ifdef CONFIG_MMU
#define __PG_MLOCKED (1UL << PG_mlocked)
#else
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 1b7720c66ac87..7a67776dca3fe 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -712,9 +712,13 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
__folio_rmap_sanity_checks(folio, page, nr_pages, level);

+ /* We only clear anon-exclusive from head page of PMD folio */
+ if (level == PGTABLE_LEVEL_PMD)
+ nr_pages = 1;
+
/* device private folios cannot get pinned via GUP. */
if (unlikely(folio_is_device_private(folio))) {
- ClearPageAnonExclusive(page);
+ ClearPagesAnonExclusive(page, nr_pages);
return 0;
}

@@ -766,7 +770,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,

if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
- ClearPageAnonExclusive(page);
+ ClearPagesAnonExclusive(page, nr_pages);

/*
* This is conceptually a smp_wmb() paired with the smp_rmb() in
@@ -804,6 +808,26 @@ static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
}

+/**
+ * folio_try_share_anon_rmap_ptes - try marking exclusive anonymous pages
+ * mapped by PTEs possibly shared to prepare
+ * for KSM or temporary unmapping
+ * @folio: The folio to share a mapping of
+ * @page: The first mapped exclusive page of the batch
+ * @nr_pages: The number of pages to share (batch size)
+ *
+ * See folio_try_share_anon_rmap_pte for full description.
+ *
+ * Context: The caller needs to hold the page table lock and has to have the
+ * page table entries cleared/invalidated. Those PTEs used to map consecutive
+ * pages of the folio passed here. The PTEs are all in the same PMD and VMA.
+ */
+static inline int folio_try_share_anon_rmap_ptes(struct folio *folio,
+ struct page *page, unsigned int nr)
+{
+ return __folio_try_share_anon_rmap(folio, page, nr, PGTABLE_LEVEL_PTE);
+}
+
/**
* folio_try_share_anon_rmap_pmd - try marking an exclusive anonymous page
* range mapped by a PMD possibly shared to
diff --git a/mm/rmap.c b/mm/rmap.c
index 42f6b00cced01..bba5b571946d8 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2300,7 +2300,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,

/* See folio_try_share_anon_rmap(): clear PTE first. */
if (anon_exclusive &&
- folio_try_share_anon_rmap_pte(folio, subpage)) {
+ folio_try_share_anon_rmap_ptes(folio, subpage, 1)) {
folio_put_swap(folio, subpage, 1);
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
--
2.34.1