[PATCH v2 03/11] mm/huge_memory: move THP gfp limit helper into header
From: Kairui Song via B4 Relay
Date: Thu Apr 16 2026 - 14:37:08 EST
From: Kairui Song <kasong@xxxxxxxxxxx>
Shmem has some special requirements for THP GFP and has to limit it in
certain zones or more lenient fallback.
We'll use this helper for generic swap THP allocation, which needs to
support shmem. For typical GFP_HIGHUSER_MOVABLE swap in this helper is
basically a noop but it's necessary for certain shmem users, mostly
drivers.
No feature change.
Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
include/linux/huge_mm.h | 30 ++++++++++++++++++++++++++++++
mm/shmem.c | 30 +++---------------------------
2 files changed, 33 insertions(+), 27 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index 2949e5acff35..4c16e5d9756f 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -237,6 +237,31 @@ static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
return true;
}
+/*
+ * Make sure huge_gfp is always more limited than limit_gfp.
+ * Some shmem users want THP allocation to be done less aggresively
+ * and only in certain zone.
+ */
+static inline gfp_t thp_limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
+{
+ gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
+ gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
+ gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
+
+ /* Allow allocations only from the originally specified zones. */
+ result |= zoneflags;
+
+ /*
+ * Minimize the result gfp by taking the union with the deny flags,
+ * and the intersection of the allow flags.
+ */
+ result |= (limit_gfp & denyflags);
+ result |= (huge_gfp & limit_gfp) & allowflags;
+
+ return result;
+}
+
/*
* Filter the bitfield of input orders to the ones suitable for use in the vma.
* See thp_vma_suitable_order().
@@ -581,6 +606,11 @@ static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
return false;
}
+static inline gfp_t thp_limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
+{
+ return huge_gfp;
+}
+
static inline unsigned long thp_vma_suitable_orders(struct vm_area_struct *vma,
unsigned long addr, unsigned long orders)
{
diff --git a/mm/shmem.c b/mm/shmem.c
index 5aa43657886c..62473ec6928d 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1788,30 +1788,6 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
return folio;
}
-/*
- * Make sure huge_gfp is always more limited than limit_gfp.
- * Some of the flags set permissions, while others set limitations.
- */
-static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
-{
- gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
- gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
- gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
- gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
-
- /* Allow allocations only from the originally specified zones. */
- result |= zoneflags;
-
- /*
- * Minimize the result gfp by taking the union with the deny flags,
- * and the intersection of the allow flags.
- */
- result |= (limit_gfp & denyflags);
- result |= (huge_gfp & limit_gfp) & allowflags;
-
- return result;
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool shmem_hpage_pmd_enabled(void)
{
@@ -2062,7 +2038,7 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode,
non_swapcache_batch(entry, nr_pages) != nr_pages)
goto fallback;
- alloc_gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
+ alloc_gfp = thp_limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
}
retry:
new = shmem_alloc_folio(alloc_gfp, order, info, index);
@@ -2138,7 +2114,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
if (nr_pages > 1) {
gfp_t huge_gfp = vma_thp_gfp_mask(vma);
- gfp = limit_gfp_mask(huge_gfp, gfp);
+ gfp = thp_limit_gfp_mask(huge_gfp, gfp);
}
#endif
@@ -2545,7 +2521,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
gfp_t huge_gfp;
huge_gfp = vma_thp_gfp_mask(vma);
- huge_gfp = limit_gfp_mask(huge_gfp, gfp);
+ huge_gfp = thp_limit_gfp_mask(huge_gfp, gfp);
folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
inode, index, fault_mm, orders);
if (!IS_ERR(folio)) {
--
2.53.0