[v7 06/16] mm/migrate_device: handle partially mapped folios during collection

From: Balbir Singh
Date: Wed Oct 01 2025 - 02:59:48 EST


Extend migrate_vma_collect_pmd() to handle partially mapped large folios
that require splitting before migration can proceed.

During PTE walk in the collection phase, if a large folio is only
partially mapped in the migration range, it must be split to ensure the
folio is correctly migrated.

Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: David Hildenbrand <david@xxxxxxxxxx>
Cc: Zi Yan <ziy@xxxxxxxxxx>
Cc: Joshua Hahn <joshua.hahnjy@xxxxxxxxx>
Cc: Rakie Kim <rakie.kim@xxxxxx>
Cc: Byungchul Park <byungchul@xxxxxx>
Cc: Gregory Price <gourry@xxxxxxxxxx>
Cc: Ying Huang <ying.huang@xxxxxxxxxxxxxxxxx>
Cc: Alistair Popple <apopple@xxxxxxxxxx>
Cc: Oscar Salvador <osalvador@xxxxxxx>
Cc: Lorenzo Stoakes <lorenzo.stoakes@xxxxxxxxxx>
Cc: Baolin Wang <baolin.wang@xxxxxxxxxxxxxxxxx>
Cc: "Liam R. Howlett" <Liam.Howlett@xxxxxxxxxx>
Cc: Nico Pache <npache@xxxxxxxxxx>
Cc: Ryan Roberts <ryan.roberts@xxxxxxx>
Cc: Dev Jain <dev.jain@xxxxxxx>
Cc: Barry Song <baohua@xxxxxxxxxx>
Cc: Lyude Paul <lyude@xxxxxxxxxx>
Cc: Danilo Krummrich <dakr@xxxxxxxxxx>
Cc: David Airlie <airlied@xxxxxxxxx>
Cc: Simona Vetter <simona@xxxxxxxx>
Cc: Ralph Campbell <rcampbell@xxxxxxxxxx>
Cc: Mika Penttilä <mpenttil@xxxxxxxxxx>
Cc: Matthew Brost <matthew.brost@xxxxxxxxx>
Cc: Francois Dugast <francois.dugast@xxxxxxxxx>
Signed-off-by: Balbir Singh <balbirs@xxxxxxxxxx>
---
mm/migrate_device.c | 63 +++++++++++++++++++++++++++++++++++++++++++++
1 file changed, 63 insertions(+)

diff --git a/mm/migrate_device.c b/mm/migrate_device.c
index abd9f6850db6..1c70d937ba44 100644
--- a/mm/migrate_device.c
+++ b/mm/migrate_device.c
@@ -54,6 +54,53 @@ static int migrate_vma_collect_hole(unsigned long start,
return 0;
}

+/**
+ * migrate_vma_split_folio() - Helper function to split a THP folio
+ * @folio: the folio to split
+ * @fault_page: struct page associated with the fault if any
+ *
+ * Returns 0 on success
+ */
+static int migrate_vma_split_folio(struct folio *folio,
+ struct page *fault_page)
+{
+ int ret;
+ struct folio *fault_folio = fault_page ? page_folio(fault_page) : NULL;
+ struct folio *new_fault_folio = NULL;
+
+ if (folio != fault_folio) {
+ folio_get(folio);
+ folio_lock(folio);
+ }
+
+ ret = split_folio(folio);
+ if (ret) {
+ if (folio != fault_folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+ return ret;
+ }
+
+ new_fault_folio = fault_page ? page_folio(fault_page) : NULL;
+
+ /*
+ * Ensure the lock is held on the correct
+ * folio after the split
+ */
+ if (!new_fault_folio) {
+ folio_unlock(folio);
+ folio_put(folio);
+ } else if (folio != new_fault_folio) {
+ folio_get(new_fault_folio);
+ folio_lock(new_fault_folio);
+ folio_unlock(folio);
+ folio_put(folio);
+ }
+
+ return 0;
+}
+
static int migrate_vma_collect_pmd(pmd_t *pmdp,
unsigned long start,
unsigned long end,
@@ -171,6 +218,22 @@ static int migrate_vma_collect_pmd(pmd_t *pmdp,
pgmap->owner != migrate->pgmap_owner)
goto next;
}
+ folio = page ? page_folio(page) : NULL;
+ if (folio && folio_test_large(folio)) {
+ int ret;
+
+ pte_unmap_unlock(ptep, ptl);
+ ret = migrate_vma_split_folio(folio,
+ migrate->fault_page);
+
+ if (ret) {
+ ptep = pte_offset_map_lock(mm, pmdp, addr, &ptl);
+ goto next;
+ }
+
+ addr = start;
+ goto again;
+ }
mpfn = migrate_pfn(pfn) | MIGRATE_PFN_MIGRATE;
mpfn |= pte_write(pte) ? MIGRATE_PFN_WRITE : 0;
}
--
2.51.0