[PATCH RFC 14/19] mm/page_alloc: separate pcplists by freetype flags

From: Brendan Jackman

Date: Wed Feb 25 2026 - 11:42:11 EST


The normal freelists are already separated by this flag, so now update
the pcplists accordingly. This follows the most "obvious" design where
__GFP_UNMAPPED is supported at arbitrary orders.

If necessary, it would be possible to avoid the proliferation of
pcplists by restricting orders that can be allocated from them with this
FREETYPE_UNMAPPED.

On the other hand, there's currently no usecase for movable/reclaimable
unmapped memory, and constraining the migratetype doesn't have any
tricky plumbing implications. So, take advantage of that and assume that
FREETYPE_UNMAPPED implies MIGRATE_UNMOVABLE.

Overall, this just takes the existing space of pindices and tacks
another bank on the end. For !THP this is just 4 more lists, with THP
there is a single additional list for hugepages.

Signed-off-by: Brendan Jackman <jackmanb@xxxxxxxxxx>
---
include/linux/mmzone.h | 11 ++++++++++-
mm/page_alloc.c | 44 +++++++++++++++++++++++++++++++++-----------
2 files changed, 43 insertions(+), 12 deletions(-)

diff --git a/include/linux/mmzone.h b/include/linux/mmzone.h
index 301328cbb8449..fc242b4090441 100644
--- a/include/linux/mmzone.h
+++ b/include/linux/mmzone.h
@@ -692,8 +692,17 @@ enum zone_watermarks {
#else
#define NR_PCP_THP 0
#endif
+/*
+ * FREETYPE_UNMAPPED can currently only be used with MIGRATE_UNMOVABLE, no for
+ * those there's no need to encode the migratetype in the pindex.
+ */
+#ifdef CONFIG_PAGE_ALLOC_UNMAPPED
+#define NR_UNMAPPED_PCP_LISTS (PAGE_ALLOC_COSTLY_ORDER + 1 + !!NR_PCP_THP)
+#else
+#define NR_UNMAPPED_PCP_LISTS 0
+#endif
#define NR_LOWORDER_PCP_LISTS (MIGRATE_PCPTYPES * (PAGE_ALLOC_COSTLY_ORDER + 1))
-#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP)
+#define NR_PCP_LISTS (NR_LOWORDER_PCP_LISTS + NR_PCP_THP + NR_UNMAPPED_PCP_LISTS)

/*
* Flags used in pcp->flags field.
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index fa12fff2182c7..14098474afd07 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -729,18 +729,30 @@ static void bad_page(struct page *page, const char *reason)
add_taint(TAINT_BAD_PAGE, LOCKDEP_NOW_UNRELIABLE);
}

-static inline unsigned int order_to_pindex(int migratetype, int order)
+static inline unsigned int order_to_pindex(freetype_t freetype, int order)
{
+ int migratetype = free_to_migratetype(freetype);
+
+ VM_BUG_ON(migratetype >= MIGRATE_PCPTYPES);
+ VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER &&
+ (!IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) || order != HPAGE_PMD_ORDER));
+
+ /* FREETYPE_UNMAPPED currently always means MIGRATE_UNMOVABLE. */
+ if (freetype_flags(freetype) & FREETYPE_UNMAPPED) {
+ int order_offset = order;
+
+ VM_BUG_ON(migratetype != MIGRATE_UNMOVABLE);
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
+ order_offset = PAGE_ALLOC_COSTLY_ORDER + 1;
+
+ return NR_LOWORDER_PCP_LISTS + NR_PCP_THP + order_offset;
+ }
+
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
bool movable = migratetype == MIGRATE_MOVABLE;

- if (order > PAGE_ALLOC_COSTLY_ORDER) {
- VM_BUG_ON(order != HPAGE_PMD_ORDER);
-
+ if (order > PAGE_ALLOC_COSTLY_ORDER)
return NR_LOWORDER_PCP_LISTS + movable;
- }
- } else {
- VM_BUG_ON(order > PAGE_ALLOC_COSTLY_ORDER);
}

return (MIGRATE_PCPTYPES * order) + migratetype;
@@ -748,8 +760,18 @@ static inline unsigned int order_to_pindex(int migratetype, int order)

static inline int pindex_to_order(unsigned int pindex)
{
- int order = pindex / MIGRATE_PCPTYPES;
+ unsigned int unmapped_base = NR_LOWORDER_PCP_LISTS + NR_PCP_THP;
+ int order;

+ if (pindex >= unmapped_base) {
+ order = pindex - unmapped_base;
+ if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE) &&
+ order > PAGE_ALLOC_COSTLY_ORDER)
+ return HPAGE_PMD_ORDER;
+ return order;
+ }
+
+ order = pindex / MIGRATE_PCPTYPES;
if (IS_ENABLED(CONFIG_TRANSPARENT_HUGEPAGE)) {
if (pindex >= NR_LOWORDER_PCP_LISTS)
order = HPAGE_PMD_ORDER;
@@ -2970,7 +2992,7 @@ static bool free_frozen_page_commit(struct zone *zone,
*/
pcp->alloc_factor >>= 1;
__count_vm_events(PGFREE, 1 << order);
- pindex = order_to_pindex(free_to_migratetype(freetype), order);
+ pindex = order_to_pindex(freetype, order);
list_add(&page->pcp_list, &pcp->lists[pindex]);
pcp->count += 1 << order;

@@ -3490,7 +3512,7 @@ static struct page *rmqueue_pcplist(struct zone *preferred_zone,
* frees.
*/
pcp->free_count >>= 1;
- list = &pcp->lists[order_to_pindex(free_to_migratetype(freetype), order)];
+ list = &pcp->lists[order_to_pindex(freetype, order)];
page = __rmqueue_pcplist(zone, order, freetype, alloc_flags, pcp, list);
pcp_spin_unlock(pcp, UP_flags);
if (page) {
@@ -5275,7 +5297,7 @@ unsigned long alloc_pages_bulk_noprof(gfp_t gfp, int preferred_nid,
goto failed;

/* Attempt the batch allocation */
- pcp_list = &pcp->lists[order_to_pindex(free_to_migratetype(ac.freetype), 0)];
+ pcp_list = &pcp->lists[order_to_pindex(ac.freetype, 0)];
while (nr_populated < nr_pages) {

/* Skip existing pages */

--
2.51.2