[PATCH RFC 01/15] mm: move thp_limit_gfp_mask to header
From: Kairui Song via B4 Relay
Date: Thu Feb 19 2026 - 18:43:35 EST
From: Kairui Song <kasong@xxxxxxxxxxx>
No feature change, to be used later.
Signed-off-by: Kairui Song <kasong@xxxxxxxxxxx>
---
include/linux/huge_mm.h | 24 ++++++++++++++++++++++++
mm/shmem.c | 30 +++---------------------------
2 files changed, 27 insertions(+), 27 deletions(-)
diff --git a/include/linux/huge_mm.h b/include/linux/huge_mm.h
index a4d9f964dfde..d522e798822d 100644
--- a/include/linux/huge_mm.h
+++ b/include/linux/huge_mm.h
@@ -237,6 +237,30 @@ static inline bool thp_vma_suitable_order(struct vm_area_struct *vma,
return true;
}
+/*
+ * Make sure huge_gfp is always more limited than limit_gfp.
+ * Some of the flags set permissions, while others set limitations.
+ */
+static inline gfp_t thp_limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
+{
+ gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
+ gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
+ gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
+ gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
+
+ /* Allow allocations only from the originally specified zones. */
+ result |= zoneflags;
+
+ /*
+ * Minimize the result gfp by taking the union with the deny flags,
+ * and the intersection of the allow flags.
+ */
+ result |= (limit_gfp & denyflags);
+ result |= (huge_gfp & limit_gfp) & allowflags;
+
+ return result;
+}
+
/*
* Filter the bitfield of input orders to the ones suitable for use in the vma.
* See thp_vma_suitable_order().
diff --git a/mm/shmem.c b/mm/shmem.c
index b976b40fd442..9f054b5aae8e 100644
--- a/mm/shmem.c
+++ b/mm/shmem.c
@@ -1788,30 +1788,6 @@ static struct folio *shmem_swapin_cluster(swp_entry_t swap, gfp_t gfp,
return folio;
}
-/*
- * Make sure huge_gfp is always more limited than limit_gfp.
- * Some of the flags set permissions, while others set limitations.
- */
-static gfp_t limit_gfp_mask(gfp_t huge_gfp, gfp_t limit_gfp)
-{
- gfp_t allowflags = __GFP_IO | __GFP_FS | __GFP_RECLAIM;
- gfp_t denyflags = __GFP_NOWARN | __GFP_NORETRY;
- gfp_t zoneflags = limit_gfp & GFP_ZONEMASK;
- gfp_t result = huge_gfp & ~(allowflags | GFP_ZONEMASK);
-
- /* Allow allocations only from the originally specified zones. */
- result |= zoneflags;
-
- /*
- * Minimize the result gfp by taking the union with the deny flags,
- * and the intersection of the allow flags.
- */
- result |= (limit_gfp & denyflags);
- result |= (huge_gfp & limit_gfp) & allowflags;
-
- return result;
-}
-
#ifdef CONFIG_TRANSPARENT_HUGEPAGE
bool shmem_hpage_pmd_enabled(void)
{
@@ -2062,7 +2038,7 @@ static struct folio *shmem_swap_alloc_folio(struct inode *inode,
non_swapcache_batch(entry, nr_pages) != nr_pages)
goto fallback;
- alloc_gfp = limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
+ alloc_gfp = thp_limit_gfp_mask(vma_thp_gfp_mask(vma), gfp);
}
retry:
new = shmem_alloc_folio(alloc_gfp, order, info, index);
@@ -2138,7 +2114,7 @@ static int shmem_replace_folio(struct folio **foliop, gfp_t gfp,
if (nr_pages > 1) {
gfp_t huge_gfp = vma_thp_gfp_mask(vma);
- gfp = limit_gfp_mask(huge_gfp, gfp);
+ gfp = thp_limit_gfp_mask(huge_gfp, gfp);
}
#endif
@@ -2545,7 +2521,7 @@ static int shmem_get_folio_gfp(struct inode *inode, pgoff_t index,
gfp_t huge_gfp;
huge_gfp = vma_thp_gfp_mask(vma);
- huge_gfp = limit_gfp_mask(huge_gfp, gfp);
+ huge_gfp = thp_limit_gfp_mask(huge_gfp, gfp);
folio = shmem_alloc_and_add_folio(vmf, huge_gfp,
inode, index, fault_mm, orders);
if (!IS_ERR(folio)) {
--
2.53.0