[RFC PATCH v1 3/7] mm: hugetlb: Move mpol interpretation out of dequeue_hugetlb_folio_vma()
From: Ackerley Tng
Date: Wed Feb 11 2026 - 19:38:39 EST
Move memory policy interpretation out of dequeue_hugetlb_folio_vma() and
into alloc_hugetlb_folio() to separate reading and interpretation of memory
policy from actual allocation.
Also rename dequeue_hugetlb_folio_vma() to
dequeue_hugetlb_folio_with_mpol() to remove association with vma and to
align with alloc_buddy_hugetlb_folio_with_mpol().
This will later allow memory policy to be interpreted outside of the
process of allocating a hugetlb folio entirely. This opens doors for other
callers of the HugeTLB folio allocation function, such as guest_memfd,
where memory may not always be mapped and hence may not have an associated
vma.
No functional change intended.
Signed-off-by: Ackerley Tng <ackerleytng@xxxxxxxxxx>
---
mm/hugetlb.c | 34 +++++++++++++++-------------------
1 file changed, 15 insertions(+), 19 deletions(-)
diff --git a/mm/hugetlb.c b/mm/hugetlb.c
index aaa23d995b65c..74b5136fdeb54 100644
--- a/mm/hugetlb.c
+++ b/mm/hugetlb.c
@@ -1378,18 +1378,11 @@ static unsigned long available_huge_pages(struct hstate *h)
return h->free_huge_pages - h->resv_huge_pages;
}
-static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
- struct vm_area_struct *vma,
- unsigned long address)
+static struct folio *dequeue_hugetlb_folio_with_mpol(struct hstate *h,
+ struct mempolicy *mpol, int nid, nodemask_t *nodemask)
{
struct folio *folio = NULL;
- struct mempolicy *mpol;
- gfp_t gfp_mask;
- nodemask_t *nodemask;
- int nid;
-
- gfp_mask = htlb_alloc_mask(h);
- nid = huge_node(vma, address, gfp_mask, &mpol, &nodemask);
+ gfp_t gfp_mask = htlb_alloc_mask(h);
if (mpol_is_preferred_many(mpol)) {
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
@@ -1403,7 +1396,6 @@ static struct folio *dequeue_hugetlb_folio_vma(struct hstate *h,
folio = dequeue_hugetlb_folio_nodemask(h, gfp_mask,
nid, nodemask);
- mpol_cond_put(mpol);
return folio;
}
@@ -2889,6 +2881,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
int ret, idx;
struct hugetlb_cgroup *h_cg = NULL;
gfp_t gfp = htlb_alloc_mask(h);
+ struct mempolicy *mpol;
+ nodemask_t *nodemask;
+ int nid;
idx = hstate_index(h);
@@ -2949,6 +2944,9 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
spin_lock_irq(&hugetlb_lock);
+ /* Takes reference on mpol. */
+ nid = huge_node(vma, addr, gfp, &mpol, &nodemask);
+
/*
* gbl_chg == 0 indicates a reservation exists for the allocation - so
* try dequeuing a page. If there are available_huge_pages(), try using
@@ -2956,25 +2954,23 @@ struct folio *alloc_hugetlb_folio(struct vm_area_struct *vma,
*/
folio = NULL;
if (!gbl_chg || available_huge_pages(h))
- folio = dequeue_hugetlb_folio_vma(h, vma, addr);
+ folio = dequeue_hugetlb_folio_with_mpol(h, mpol, nid, nodemask);
if (!folio) {
- struct mempolicy *mpol;
- nodemask_t *nodemask;
- int nid;
-
spin_unlock_irq(&hugetlb_lock);
- nid = huge_node(vma, addr, gfp, &mpol, &nodemask);
folio = alloc_buddy_hugetlb_folio_with_mpol(h, mpol, nid, nodemask);
- mpol_cond_put(mpol);
- if (!folio)
+ if (!folio) {
+ mpol_cond_put(mpol);
goto out_uncharge_cgroup;
+ }
spin_lock_irq(&hugetlb_lock);
list_add(&folio->lru, &h->hugepage_activelist);
folio_ref_unfreeze(folio, 1);
/* Fall through */
}
+ mpol_cond_put(mpol);
+
/*
* Either dequeued or buddy-allocated folio needs to add special
* mark to the folio when it consumes a global reservation.
--
2.53.0.310.g728cabbaf7-goog