[PATCH] mm/hugetlb: Convert &folio->page to folio_page(folio, 0)

From: nifan . cxl
Date: Tue Apr 08 2025 - 20:50:02 EST


From: Fan Ni <fan.ni@xxxxxxxxxxx>

Convert the use of &folio->page to folio_page(folio, 0) where struct
filio fits in. This is part of the efforts to move some fields out of
struct page to reduce its size.

Signed-off-by: Fan Ni <fan.ni@xxxxxxxxxxx>
---
mm/huge_memory.c | 30 +++++++++++++++++-------------
mm/hugetlb.c | 20 +++++++++++---------
mm/hugetlb_vmemmap.c | 12 ++++++------
mm/khugepaged.c | 17 ++++++++++-------
4 files changed, 44 insertions(+), 35 deletions(-)

diff --git a/mm/huge_memory.c b/mm/huge_memory.c
index 28c87e0e036f..4c91fc594b6c 100644
--- a/mm/huge_memory.c
+++ b/mm/huge_memory.c
@@ -1492,7 +1492,7 @@ vm_fault_t vmf_insert_folio_pmd(struct vm_fault *vmf, struct folio *folio,
ptl = pmd_lock(mm, vmf->pmd);
if (pmd_none(*vmf->pmd)) {
folio_get(folio);
- folio_add_file_rmap_pmd(folio, &folio->page, vma);
+ folio_add_file_rmap_pmd(folio, folio_page(folio, 0), vma);
add_mm_counter(mm, mm_counter_file(folio), HPAGE_PMD_NR);
}
error = insert_pfn_pmd(vma, addr, vmf->pmd,
@@ -1620,7 +1620,7 @@ vm_fault_t vmf_insert_folio_pud(struct vm_fault *vmf, struct folio *folio,
*/
if (pud_none(*vmf->pud)) {
folio_get(folio);
- folio_add_file_rmap_pud(folio, &folio->page, vma);
+ folio_add_file_rmap_pud(folio, folio_page(folio, 0), vma);
add_mm_counter(mm, mm_counter_file(folio), HPAGE_PUD_NR);
}
insert_pfn_pud(vma, addr, vmf->pud, pfn_to_pfn_t(folio_pfn(folio)),
@@ -2262,8 +2262,10 @@ int zap_huge_pmd(struct mmu_gather *tlb, struct vm_area_struct *vma,
}

spin_unlock(ptl);
- if (flush_needed)
- tlb_remove_page_size(tlb, &folio->page, HPAGE_PMD_SIZE);
+ if (flush_needed) {
+ tlb_remove_page_size(tlb, folio_page(folio, 0),
+ HPAGE_PMD_SIZE);
+ }
}
return 1;
}
@@ -2630,7 +2632,7 @@ int move_pages_huge_pmd(struct mm_struct *mm, pmd_t *dst_pmd, pmd_t *src_pmd, pm
}
if (src_folio) {
if (folio_maybe_dma_pinned(src_folio) ||
- !PageAnonExclusive(&src_folio->page)) {
+ !PageAnonExclusive(folio_page(src_folio, 0))) {
err = -EBUSY;
goto unlock_ptls;
}
@@ -3316,7 +3318,7 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
* the flags from the original folio.
*/
for (i = new_nr_pages; i < nr_pages; i += new_nr_pages) {
- struct page *new_head = &folio->page + i;
+ struct page *new_head = folio_page(folio, i);

/*
* Careful: new_folio is not a "real" folio before we cleared PageTail.
@@ -3403,7 +3405,7 @@ static void __split_folio_to_order(struct folio *folio, int old_order,
if (new_order)
folio_set_order(folio, new_order);
else
- ClearPageCompound(&folio->page);
+ ClearPageCompound(folio_page(folio, 0));
}

/*
@@ -3528,7 +3530,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
}

folio_split_memcg_refs(folio, old_order, split_order);
- split_page_owner(&folio->page, old_order, split_order);
+ split_page_owner(folio_page(folio, 0), old_order, split_order);
pgalloc_tag_split(folio, old_order, split_order);

__split_folio_to_order(folio, old_order, split_order);
@@ -3640,7 +3642,7 @@ static int __split_unmapped_folio(struct folio *folio, int new_order,
* requires taking the lru_lock so we do the put_page
* of the tail pages after the split is complete.
*/
- free_page_and_swap_cache(&new_folio->page);
+ free_page_and_swap_cache(folio_page(new_folio, 0));
}
return ret;
}
@@ -3971,7 +3973,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
{
struct folio *folio = page_folio(page);

- return __folio_split(folio, new_order, &folio->page, page, list, true);
+ return __folio_split(folio, new_order, folio_page(folio, 0), page,
+ list, true);
}

/*
@@ -3999,8 +4002,8 @@ int split_huge_page_to_list_to_order(struct page *page, struct list_head *list,
int folio_split(struct folio *folio, unsigned int new_order,
struct page *split_at, struct list_head *list)
{
- return __folio_split(folio, new_order, split_at, &folio->page, list,
- false);
+ return __folio_split(folio, new_order, split_at, folio_page(folio, 0),
+ list, false);
}

int min_order_for_split(struct folio *folio)
@@ -4024,7 +4027,8 @@ int split_folio_to_list(struct folio *folio, struct list_head *list)
if (ret < 0)
return ret;

- return split_huge_page_to_list_to_order(&folio->page, list, ret);
+ return split_huge_page_to_list_to_order(folio_page(folio, 0), list,
+ ret);
}

/*
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index 73417bea33f5..efbf5013986e 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1306,7 +1306,7 @@ static struct folio *dequeue_hugetlb_folio_node_exact(struct hstate *h,
if (folio_test_hwpoison(folio))
continue;

- if (is_migrate_isolate_page(&folio->page))
+ if (is_migrate_isolate_page(folio_page(folio, 0)))
continue;

list_move(&folio->lru, &h->hugepage_activelist);
@@ -1840,7 +1840,7 @@ void free_huge_folio(struct folio *folio)

hugetlb_set_folio_subpool(folio, NULL);
if (folio_test_anon(folio))
- __ClearPageAnonExclusive(&folio->page);
+ __ClearPageAnonExclusive(folio_page(folio, 0));
folio->mapping = NULL;
restore_reserve = folio_test_hugetlb_restore_reserve(folio);
folio_clear_hugetlb_restore_reserve(folio);
@@ -4050,7 +4050,8 @@ static long demote_free_hugetlb_folios(struct hstate *src, struct hstate *dst,

list_del(&folio->lru);

- split_page_owner(&folio->page, huge_page_order(src), huge_page_order(dst));
+ split_page_owner(folio_page(folio, 0), huge_page_order(src),
+ huge_page_order(dst));
pgalloc_tag_split(folio, huge_page_order(src), huge_page_order(dst));

for (i = 0; i < pages_per_huge_page(src); i += pages_per_huge_page(dst)) {
@@ -6185,9 +6186,9 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
* be leaks between processes, for example, with FOLL_GET users.
*/
if (folio_mapcount(old_folio) == 1 && folio_test_anon(old_folio)) {
- if (!PageAnonExclusive(&old_folio->page)) {
+ if (!PageAnonExclusive(folio_page(old_folio, 0))) {
folio_move_anon_rmap(old_folio, vma);
- SetPageAnonExclusive(&old_folio->page);
+ SetPageAnonExclusive(folio_page(old_folio, 0));
}
if (likely(!unshare))
set_huge_ptep_maybe_writable(vma, vmf->address,
@@ -6197,7 +6198,8 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
return 0;
}
VM_BUG_ON_PAGE(folio_test_anon(old_folio) &&
- PageAnonExclusive(&old_folio->page), &old_folio->page);
+ PageAnonExclusive(folio_page(old_folio, 0)),
+ folio_page(old_folio, 0));

/*
* If the process that created a MAP_PRIVATE mapping is about to
@@ -6249,8 +6251,8 @@ static vm_fault_t hugetlb_wp(struct folio *pagecache_folio,
hugetlb_vma_unlock_read(vma);
mutex_unlock(&hugetlb_fault_mutex_table[hash]);

- unmap_ref_private(mm, vma, &old_folio->page,
- vmf->address);
+ unmap_ref_private(mm, vma, folio_page(old_folio, 0),
+ vmf->address);

mutex_lock(&hugetlb_fault_mutex_table[hash]);
hugetlb_vma_lock_read(vma);
@@ -7832,7 +7834,7 @@ void move_hugetlb_state(struct folio *old_folio, struct folio *new_folio, int re
struct hstate *h = folio_hstate(old_folio);

hugetlb_cgroup_migrate(old_folio, new_folio);
- set_page_owner_migrate_reason(&new_folio->page, reason);
+ set_page_owner_migrate_reason(folio_page(new_folio, 0), reason);

/*
* transfer temporary state of the new hugetlb folio. This is
diff --git a/mm/hugetlb_vmemmap.c b/mm/hugetlb_vmemmap.c
index 9a99dfa3c495..97a7a67515cd 100644
--- a/mm/hugetlb_vmemmap.c
+++ b/mm/hugetlb_vmemmap.c
@@ -454,7 +454,7 @@ static int __hugetlb_vmemmap_restore_folio(const struct hstate *h,
struct folio *folio, unsigned long flags)
{
int ret;
- unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)folio_page(folio, 0), vmemmap_end;
unsigned long vmemmap_reuse;

VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
@@ -566,7 +566,7 @@ static int __hugetlb_vmemmap_optimize_folio(const struct hstate *h,
unsigned long flags)
{
int ret = 0;
- unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)folio_page(folio, 0), vmemmap_end;
unsigned long vmemmap_reuse;

VM_WARN_ON_ONCE_FOLIO(!folio_test_hugetlb(folio), folio);
@@ -632,7 +632,7 @@ void hugetlb_vmemmap_optimize_folio(const struct hstate *h, struct folio *folio)

static int hugetlb_vmemmap_split_folio(const struct hstate *h, struct folio *folio)
{
- unsigned long vmemmap_start = (unsigned long)&folio->page, vmemmap_end;
+ unsigned long vmemmap_start = (unsigned long)folio_page(folio, 0), vmemmap_end;
unsigned long vmemmap_reuse;

if (!vmemmap_should_optimize_folio(h, folio))
@@ -668,13 +668,13 @@ static void __hugetlb_vmemmap_optimize_folios(struct hstate *h,
* Already optimized by pre-HVO, just map the
* mirrored tail page structs RO.
*/
- spfn = (unsigned long)&folio->page;
+ spfn = (unsigned long)folio_page(folio, 0);
epfn = spfn + pages_per_huge_page(h);
vmemmap_wrprotect_hvo(spfn, epfn, folio_nid(folio),
HUGETLB_VMEMMAP_RESERVE_SIZE);
register_page_bootmem_memmap(pfn_to_section_nr(spfn),
- &folio->page,
- HUGETLB_VMEMMAP_RESERVE_SIZE);
+ folio_page(folio, 0),
+ HUGETLB_VMEMMAP_RESERVE_SIZE);
static_branch_inc(&hugetlb_optimize_vmemmap_key);
continue;
}
diff --git a/mm/khugepaged.c b/mm/khugepaged.c
index b8838ba8207a..79d18ff5cda5 100644
--- a/mm/khugepaged.c
+++ b/mm/khugepaged.c
@@ -696,14 +696,16 @@ static int __collapse_huge_page_isolate(struct vm_area_struct *vma,
result = SCAN_LACK_REFERENCED_PAGE;
} else {
result = SCAN_SUCCEED;
- trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
- referenced, writable, result);
+ trace_mm_collapse_huge_page_isolate(folio_page(folio, 0),
+ none_or_zero, referenced,
+ writable, result);
return result;
}
out:
release_pte_pages(pte, _pte, compound_pagelist);
- trace_mm_collapse_huge_page_isolate(&folio->page, none_or_zero,
- referenced, writable, result);
+ trace_mm_collapse_huge_page_isolate(folio_page(folio, 0),
+ none_or_zero, referenced,
+ writable, result);
return result;
}

@@ -1435,8 +1437,9 @@ static int hpage_collapse_scan_pmd(struct mm_struct *mm,
*mmap_locked = false;
}
out:
- trace_mm_khugepaged_scan_pmd(mm, &folio->page, writable, referenced,
- none_or_zero, result, unmapped);
+ trace_mm_khugepaged_scan_pmd(mm, folio_page(folio, 0), writable,
+ referenced, none_or_zero, result,
+ unmapped);
return result;
}

@@ -1689,7 +1692,7 @@ int collapse_pte_mapped_thp(struct mm_struct *mm, unsigned long addr,
maybe_install_pmd:
/* step 5: install pmd entry */
result = install_pmd
- ? set_huge_pmd(vma, haddr, pmd, &folio->page)
+ ? set_huge_pmd(vma, haddr, pmd, folio_page(folio, 0))
: SCAN_SUCCEED;
goto drop_folio;
abort:
--
2.47.2