[PATCH 6/9] mm/swapfile: Make folio_dup_swap batchable
From: Dev Jain
Date: Tue Mar 10 2026 - 03:35:58 EST
Teach folio_dup_swap to handle a batch of consecutive pages. Note that
folio_dup_swap already can handle a subset of this: nr_pages == 1 and
nr_pages == folio_nr_pages(folio). Generalize this to any nr_pages.
Currently we have a not-so-nice logic of passing in subpage == NULL if
we mean to exercise the logic on the entire folio, and subpage != NULL if
we want to exercise the logic on only that subpage. Remove this
indirection, and explicitly pass subpage != NULL, and the number of
pages required.
Signed-off-by: Dev Jain <dev.jain@xxxxxxx>
---
mm/rmap.c | 2 +-
mm/shmem.c | 2 +-
mm/swap.h | 5 +++--
mm/swapfile.c | 12 +++++-------
4 files changed, 10 insertions(+), 11 deletions(-)
diff --git a/mm/rmap.c b/mm/rmap.c
index dd638429c963e..f6d5b187cf09b 100644
--- a/mm/rmap.c
+++ b/mm/rmap.c
@@ -2282,7 +2282,7 @@ static bool try_to_unmap_one(struct folio *folio, struct vm_area_struct *vma,
goto discard;
}
- if (folio_dup_swap(folio, subpage) < 0) {
+ if (folio_dup_swap(folio, subpage, 1) < 0) {
set_pte_at(mm, address, pvmw.pte, pteval);
goto walk_abort;
}
diff --git a/mm/shmem.c b/mm/shmem.c
index 5e7dcf5bc5d3c..86ee34c9b40b3 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1695,7 +1695,7 @@ int shmem_writeout(struct folio *folio, struct swap_iocb **plug,
spin_unlock(&shmem_swaplist_lock);
}
- folio_dup_swap(folio, NULL);
+ folio_dup_swap(folio, folio_page(folio, 0), folio_nr_pages(folio));
shmem_delete_from_page_cache(folio, swp_to_radix_entry(folio->swap));
BUG_ON(folio_mapped(folio));
diff --git a/mm/swap.h b/mm/swap.h
index a77016f2423b9..d9cb58ebbddd1 100644
--- a/mm/swap.h
+++ b/mm/swap.h
@@ -206,7 +206,7 @@ extern int swap_retry_table_alloc(swp_entry_t entry, gfp_t gfp);
* folio_put_swap(): does the opposite thing of folio_dup_swap().
*/
int folio_alloc_swap(struct folio *folio);
-int folio_dup_swap(struct folio *folio, struct page *subpage);
+int folio_dup_swap(struct folio *folio, struct page *subpage, unsigned int nr_pages);
void folio_put_swap(struct folio *folio, struct page *subpage);
/* For internal use */
@@ -390,7 +390,8 @@ static inline int folio_alloc_swap(struct folio *folio)
return -EINVAL;
}
-static inline int folio_dup_swap(struct folio *folio, struct page *page)
+static inline int folio_dup_swap(struct folio *folio, struct page *page,
+ unsigned int nr_pages)
{
return -EINVAL;
}
diff --git a/mm/swapfile.c b/mm/swapfile.c
index 915bc93964dbd..eaf61ae6c3817 100644
--- a/mm/swapfile.c
+++ b/mm/swapfile.c
@@ -1738,7 +1738,8 @@ int folio_alloc_swap(struct folio *folio)
/**
* folio_dup_swap() - Increase swap count of swap entries of a folio.
* @folio: folio with swap entries bounded.
- * @subpage: if not NULL, only increase the swap count of this subpage.
+ * @subpage: Increase the swap count of this subpage till nr number of
+ * pages forward.
*
* Typically called when the folio is unmapped and have its swap entry to
* take its place: Swap entries allocated to a folio has count == 0 and pinned
@@ -1752,18 +1753,15 @@ int folio_alloc_swap(struct folio *folio)
* swap_put_entries_direct on its swap entry before this helper returns, or
* the swap count may underflow.
*/
-int folio_dup_swap(struct folio *folio, struct page *subpage)
+int folio_dup_swap(struct folio *folio, struct page *subpage,
+ unsigned int nr_pages)
{
swp_entry_t entry = folio->swap;
- unsigned long nr_pages = folio_nr_pages(folio);
VM_WARN_ON_FOLIO(!folio_test_locked(folio), folio);
VM_WARN_ON_FOLIO(!folio_test_swapcache(folio), folio);
- if (subpage) {
- entry.val += folio_page_idx(folio, subpage);
- nr_pages = 1;
- }
+ entry.val += folio_page_idx(folio, subpage);
return swap_dup_entries_cluster(swap_entry_to_info(entry),
swp_offset(entry), nr_pages);
--
2.34.1