[PATCH v2 8/9] mm/rmap: Add batched version of folio_try_share_anon_rmap_pte
From: Dev Jain
Date: Fri Apr 10 2026 - 06:39:45 EST
To enabe batched unmapping of anonymous folios, we need to handle the
sharing of exclusive pages. Hence, a batched version of
folio_try_share_anon_rmap_pte is required.
Currently, the sole purpose of nr_pages in __folio_try_share_anon_rmap is
to do some rmap sanity checks. Add helpers to set and clear the
PageAnonExclusive bit on a batch of nr_pages. Note that
__folio_try_share_anon_rmap can receive nr_pages == HPAGE_PMD_NR from the
PMD path, but currently we only clear the bit on the head page. Retain
this behaviour by setting nr_pages = 1 in case the caller is
folio_try_share_anon_rmap_pmd.
While at it, convert nr_pages to unsigned long to future-proof from
overflow in case P4D-huge mappings etc get supported down the road.
Signed-off-by: Dev Jain <dev.jain@xxxxxxx>
---
include/linux/mm.h | 11 +++++++++++
include/linux/rmap.h | 27 ++++++++++++++++++++-------
2 files changed, 31 insertions(+), 7 deletions(-)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 633bbf9a184a6..2d20954da652a 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -243,6 +243,17 @@ static inline unsigned long folio_page_idx(const struct folio *folio,
return page - &folio->page;
}
+static __always_inline void folio_clear_pages_anon_exclusive(struct page *page,
+ unsigned long nr_pages)
+{
+ for (;;) {
+ ClearPageAnonExclusive(page);
+ if (--nr_pages == 0)
+ break;
+ ++page;
+ }
+}
+
static inline struct folio *lru_to_folio(struct list_head *head)
{
return list_entry((head)->prev, struct folio, lru);
diff --git a/include/linux/rmap.h b/include/linux/rmap.h
index 8dc0871e5f001..f3b3ee3955afc 100644
--- a/include/linux/rmap.h
+++ b/include/linux/rmap.h
@@ -706,15 +706,19 @@ static inline int folio_try_dup_anon_rmap_pmd(struct folio *folio,
}
static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
- struct page *page, int nr_pages, enum pgtable_level level)
+ struct page *page, unsigned long nr_pages, enum pgtable_level level)
{
VM_WARN_ON_FOLIO(!folio_test_anon(folio), folio);
VM_WARN_ON_FOLIO(!PageAnonExclusive(page), folio);
__folio_rmap_sanity_checks(folio, page, nr_pages, level);
+ /* We only clear anon-exclusive from head page of PMD folio */
+ if (level == PGTABLE_LEVEL_PMD)
+ nr_pages = 1;
+
/* device private folios cannot get pinned via GUP. */
if (unlikely(folio_is_device_private(folio))) {
- ClearPageAnonExclusive(page);
+ folio_clear_pages_anon_exclusive(page, nr_pages);
return 0;
}
@@ -766,7 +770,7 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
if (unlikely(folio_maybe_dma_pinned(folio)))
return -EBUSY;
- ClearPageAnonExclusive(page);
+ folio_clear_pages_anon_exclusive(page, nr_pages);
/*
* This is conceptually a smp_wmb() paired with the smp_rmb() in
@@ -778,11 +782,12 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
}
/**
- * folio_try_share_anon_rmap_pte - try marking an exclusive anonymous page
- * mapped by a PTE possibly shared to prepare
+ * folio_try_share_anon_rmap_ptes - try marking exclusive anonymous pages
+ * mapped by PTEs possibly shared to prepare
* for KSM or temporary unmapping
* @folio: The folio to share a mapping of
- * @page: The mapped exclusive page
+ * @page: The first mapped exclusive page of the batch in the folio
+ * @nr_pages: The number of pages to share in the folio (batch size)
*
* The caller needs to hold the page table lock and has to have the page table
* entries cleared/invalidated.
@@ -797,11 +802,19 @@ static __always_inline int __folio_try_share_anon_rmap(struct folio *folio,
*
* Returns 0 if marking the mapped page possibly shared succeeded. Returns
* -EBUSY otherwise.
+ *
+ * The caller needs to hold the page table lock.
*/
+static inline int folio_try_share_anon_rmap_ptes(struct folio *folio,
+ struct page *page, unsigned long nr_pages)
+{
+ return __folio_try_share_anon_rmap(folio, page, nr_pages, PGTABLE_LEVEL_PTE);
+}
+
static inline int folio_try_share_anon_rmap_pte(struct folio *folio,
struct page *page)
{
- return __folio_try_share_anon_rmap(folio, page, 1, PGTABLE_LEVEL_PTE);
+ return folio_try_share_anon_rmap_ptes(folio, page, 1);
}
/**
--
2.34.1