[RFC PATCH v4 2/6] mm/migrate: skip data copy for already-copied folios
From: Shivank Garg
Date: Mon Mar 09 2026 - 08:22:35 EST
Add a PAGE_ALREADY_COPIED flag to the dst->private migration state.
When set, __migrate_folio() skips folio_mc_copy() and performs
metadata-only migration. All callers currently pass
already_copied=false. The batch-copy path enables it in a later patch.
Move the dst->private state enum earlier in the file so
__migrate_folio() and move_to_new_folio() can see PAGE_ALREADY_COPIED.
Signed-off-by: Shivank Garg <shivankg@xxxxxxx>
---
mm/migrate.c | 52 +++++++++++++++++++++++++++++++---------------------
1 file changed, 31 insertions(+), 21 deletions(-)
diff --git a/mm/migrate.c b/mm/migrate.c
index 1bf2cf8c44dd..1d8c1fb627c9 100644
--- a/mm/migrate.c
+++ b/mm/migrate.c
@@ -848,6 +848,18 @@ void folio_migrate_flags(struct folio *newfolio, struct folio *folio)
}
EXPORT_SYMBOL(folio_migrate_flags);
+/*
+ * To record some information during migration, we use unused private
+ * field of struct folio of the newly allocated destination folio.
+ * This is safe because nobody is using it except us.
+ */
+enum {
+ PAGE_WAS_MAPPED = BIT(0),
+ PAGE_WAS_MLOCKED = BIT(1),
+ PAGE_ALREADY_COPIED = BIT(2),
+ PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED | PAGE_ALREADY_COPIED,
+};
+
/************************************************************
* Migration functions
***********************************************************/
@@ -857,14 +869,20 @@ static int __migrate_folio(struct address_space *mapping, struct folio *dst,
enum migrate_mode mode)
{
int rc, expected_count = folio_expected_ref_count(src) + 1;
+ bool already_copied = ((unsigned long)dst->private & PAGE_ALREADY_COPIED);
+
+ if (already_copied)
+ dst->private = NULL;
/* Check whether src does not have extra refs before we do more work */
if (folio_ref_count(src) != expected_count)
return -EAGAIN;
- rc = folio_mc_copy(dst, src);
- if (unlikely(rc))
- return rc;
+ if (!already_copied) {
+ rc = folio_mc_copy(dst, src);
+ if (unlikely(rc))
+ return rc;
+ }
rc = __folio_migrate_mapping(mapping, dst, src, expected_count);
if (rc)
@@ -1088,7 +1106,7 @@ static int fallback_migrate_folio(struct address_space *mapping,
* 0 - success
*/
static int move_to_new_folio(struct folio *dst, struct folio *src,
- enum migrate_mode mode)
+ enum migrate_mode mode, bool already_copied)
{
struct address_space *mapping = folio_mapping(src);
int rc = -EAGAIN;
@@ -1096,6 +1114,9 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
VM_BUG_ON_FOLIO(!folio_test_locked(src), src);
VM_BUG_ON_FOLIO(!folio_test_locked(dst), dst);
+ if (already_copied)
+ dst->private = (void *)(unsigned long)PAGE_ALREADY_COPIED;
+
if (!mapping)
rc = migrate_folio(mapping, dst, src, mode);
else if (mapping_inaccessible(mapping))
@@ -1127,17 +1148,6 @@ static int move_to_new_folio(struct folio *dst, struct folio *src,
return rc;
}
-/*
- * To record some information during migration, we use unused private
- * field of struct folio of the newly allocated destination folio.
- * This is safe because nobody is using it except us.
- */
-enum {
- PAGE_WAS_MAPPED = BIT(0),
- PAGE_WAS_MLOCKED = BIT(1),
- PAGE_OLD_STATES = PAGE_WAS_MAPPED | PAGE_WAS_MLOCKED,
-};
-
static void __migrate_folio_record(struct folio *dst,
int old_page_state,
struct anon_vma *anon_vma)
@@ -1353,7 +1363,7 @@ static int migrate_folio_unmap(new_folio_t get_new_folio,
static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
struct folio *src, struct folio *dst,
enum migrate_mode mode, enum migrate_reason reason,
- struct list_head *ret)
+ struct list_head *ret, bool already_copied)
{
int rc;
int old_page_state = 0;
@@ -1371,7 +1381,7 @@ static int migrate_folio_move(free_folio_t put_new_folio, unsigned long private,
goto out_unlock_both;
}
- rc = move_to_new_folio(dst, src, mode);
+ rc = move_to_new_folio(dst, src, mode, already_copied);
if (rc)
goto out;
@@ -1519,7 +1529,7 @@ static int unmap_and_move_huge_page(new_folio_t get_new_folio,
}
if (!folio_mapped(src))
- rc = move_to_new_folio(dst, src, mode);
+ rc = move_to_new_folio(dst, src, mode, false);
if (page_was_mapped)
remove_migration_ptes(src, !rc ? dst : src, ttu);
@@ -1703,7 +1713,7 @@ static void migrate_folios_move(struct list_head *src_folios,
struct list_head *ret_folios,
struct migrate_pages_stats *stats,
int *retry, int *thp_retry, int *nr_failed,
- int *nr_retry_pages)
+ int *nr_retry_pages, bool already_copied)
{
struct folio *folio, *folio2, *dst, *dst2;
bool is_thp;
@@ -1720,7 +1730,7 @@ static void migrate_folios_move(struct list_head *src_folios,
rc = migrate_folio_move(put_new_folio, private,
folio, dst, mode,
- reason, ret_folios);
+ reason, ret_folios, already_copied);
/*
* The rules are:
* 0: folio will be freed
@@ -1977,7 +1987,7 @@ static int migrate_pages_batch(struct list_head *from,
migrate_folios_move(&unmap_folios, &dst_folios,
put_new_folio, private, mode, reason,
ret_folios, stats, &retry, &thp_retry,
- &nr_failed, &nr_retry_pages);
+ &nr_failed, &nr_retry_pages, false);
}
nr_failed += retry;
stats->nr_thp_failed += thp_retry;
--
2.43.0