Re: [RFC PATCH v4 3/6] mm/migrate: add batch-copy path in migrate_pages_batch

From: Huang, Ying

Date: Tue Mar 24 2026 - 04:50:23 EST


Shivank Garg <shivankg@xxxxxxx> writes:

> Split unmapped folios into batch-eligible (src_batch/dst_batch) and
> standard (src_std/dst_std) lists, gated by the migrate_offload_enabled
> which is off by default. So, when no offload driver is active, the
> branch is never taken and everything goes through the standard path.
>
> After TLB flush, batch copy the eligible folios via folios_mc_copy()
> and pass already_copied=true into migrate_folios_move() so
> __migrate_folio() skips the per-folio copy.
>
> On batch copy failure, already_copied flag stays false and each folio
> fall back to individual copy.
>
> Signed-off-by: Shivank Garg <shivankg@xxxxxxx>
> ---
> mm/migrate.c | 55 +++++++++++++++++++++++++++++++++++++++++-----------
> 1 file changed, 44 insertions(+), 11 deletions(-)
>
> diff --git a/mm/migrate.c b/mm/migrate.c
> index 1d8c1fb627c9..69daa16f9cf3 100644
> --- a/mm/migrate.c
> +++ b/mm/migrate.c
> @@ -43,6 +43,7 @@
> #include <linux/sched/sysctl.h>
> #include <linux/memory-tiers.h>
> #include <linux/pagewalk.h>
> +#include <linux/jump_label.h>
>
> #include <asm/tlbflush.h>
>
> @@ -51,6 +52,8 @@
> #include "internal.h"
> #include "swap.h"
>
> +DEFINE_STATIC_KEY_FALSE(migrate_offload_enabled);
> +
> static const struct movable_operations *offline_movable_ops;
> static const struct movable_operations *zsmalloc_movable_ops;
>
> @@ -1706,6 +1709,12 @@ static int migrate_hugetlbs(struct list_head *from, new_folio_t get_new_folio,
> return nr_failed;
> }
>
> +/* movable_ops folios have their own migrate path */
> +static bool folio_supports_batch_copy(struct folio *folio)
> +{
> + return likely(!page_has_movable_ops(&folio->page));
> +}
> +
> static void migrate_folios_move(struct list_head *src_folios,
> struct list_head *dst_folios,
> free_folio_t put_new_folio, unsigned long private,
> @@ -1805,8 +1814,12 @@ static int migrate_pages_batch(struct list_head *from,
> bool is_large = false;
> struct folio *folio, *folio2, *dst = NULL;
> int rc, rc_saved = 0, nr_pages;
> - LIST_HEAD(unmap_folios);
> - LIST_HEAD(dst_folios);
> + unsigned int nr_batch = 0;
> + bool batch_copied = false;
> + LIST_HEAD(src_batch);
> + LIST_HEAD(dst_batch);
> + LIST_HEAD(src_std);
> + LIST_HEAD(dst_std);

IMHO, the naming appears too copy centric, how about unmap_batch and
unmap_single? "unmap" is one step of migration.

> bool nosplit = (reason == MR_NUMA_MISPLACED);
>
> VM_WARN_ON_ONCE(mode != MIGRATE_ASYNC &&
> @@ -1943,7 +1956,7 @@ static int migrate_pages_batch(struct list_head *from,

unmap/dst_folios in comments need to be changed too.

rc = migrate_folio_unmap(get_new_folio, put_new_folio,
private, folio, &dst, mode, ret_folios);
/*
* The rules are:
* 0: folio will be put on unmap_folios list,
* dst folio put on dst_folios list
* -EAGAIN: stay on the from list
* -ENOMEM: stay on the from list
* Other errno: put on ret_folios list
*/


> /* nr_failed isn't updated for not used */
> stats->nr_thp_failed += thp_retry;
> rc_saved = rc;
> - if (list_empty(&unmap_folios))
> + if (list_empty(&src_batch) && list_empty(&src_std))
> goto out;
> else
> goto move;
> @@ -1953,8 +1966,15 @@ static int migrate_pages_batch(struct list_head *from,
> nr_retry_pages += nr_pages;
> break;
> case 0:
> - list_move_tail(&folio->lru, &unmap_folios);
> - list_add_tail(&dst->lru, &dst_folios);
> + if (static_branch_unlikely(&migrate_offload_enabled) &&
> + folio_supports_batch_copy(folio)) {
> + list_move_tail(&folio->lru, &src_batch);
> + list_add_tail(&dst->lru, &dst_batch);
> + nr_batch++;
> + } else {
> + list_move_tail(&folio->lru, &src_std);
> + list_add_tail(&dst->lru, &dst_std);
> + }
> break;
> default:
> /*
> @@ -1977,17 +1997,28 @@ static int migrate_pages_batch(struct list_head *from,
> /* Flush TLBs for all unmapped folios */
> try_to_unmap_flush();
>
> + /* Batch-copy eligible folios before the move phase */
> + if (!list_empty(&src_batch)) {
> + rc = folios_mc_copy(&dst_batch, &src_batch, nr_batch);
> + batch_copied = (rc == 0);
> + }
> +
> retry = 1;
> for (pass = 0; pass < nr_pass && retry; pass++) {
> retry = 0;
> thp_retry = 0;
> nr_retry_pages = 0;
>
> - /* Move the unmapped folios */
> - migrate_folios_move(&unmap_folios, &dst_folios,
> - put_new_folio, private, mode, reason,
> - ret_folios, stats, &retry, &thp_retry,
> - &nr_failed, &nr_retry_pages, false);
> + if (!list_empty(&src_batch))
> + migrate_folios_move(&src_batch, &dst_batch, put_new_folio,
> + private, mode, reason, ret_folios, stats,
> + &retry, &thp_retry, &nr_failed,
> + &nr_retry_pages, batch_copied);
> + if (!list_empty(&src_std))
> + migrate_folios_move(&src_std, &dst_std, put_new_folio,
> + private, mode, reason, ret_folios, stats,
> + &retry, &thp_retry, &nr_failed,
> + &nr_retry_pages, false);
> }
> nr_failed += retry;
> stats->nr_thp_failed += thp_retry;
> @@ -1996,7 +2027,9 @@ static int migrate_pages_batch(struct list_head *from,
> rc = rc_saved ? : nr_failed;
> out:
> /* Cleanup remaining folios */
> - migrate_folios_undo(&unmap_folios, &dst_folios,
> + migrate_folios_undo(&src_batch, &dst_batch,
> + put_new_folio, private, ret_folios);
> + migrate_folios_undo(&src_std, &dst_std,
> put_new_folio, private, ret_folios);
>
> return rc;

---
Best Regards,
Huang, Ying