Re: [PATCH RFC 17/19] mm/page_alloc: implement __GFP_UNMAPPED allocations
From: Brendan Jackman
Date: Fri Feb 27 2026 - 05:57:16 EST
More bugs found by AI...
(I am not relaying all of the issues, just the interesting ones/ones in
the most interesting bits of code).
On Wed Feb 25, 2026 at 4:34 PM UTC, Brendan Jackman wrote:
> +/* Try to allocate a page by mapping/unmapping a block from the direct map. */
> +static inline struct page *
> +__rmqueue_direct_map(struct zone *zone, unsigned int request_order,
> + unsigned int alloc_flags, freetype_t freetype)
> +{
> + unsigned int ft_flags_other = freetype_flags(freetype) ^ FREETYPE_UNMAPPED;
> + freetype_t ft_other = migrate_to_freetype(free_to_migratetype(freetype),
> + ft_flags_other);
> + bool want_mapped = !(freetype_flags(freetype) & FREETYPE_UNMAPPED);
> + enum rmqueue_mode rmqm = RMQUEUE_NORMAL;
> + unsigned long irq_flags;
> + int nr_pageblocks;
> + struct page *page;
> + int alloc_order;
> + int err;
> +
> + if (freetype_idx(ft_other) < 0)
> + return NULL;
> +
> + /*
> + * Might need a TLB shootdown. Even if IRQs are on this isn't
> + * safe if the caller holds a lock (in case the other CPUs need that
> + * lock to handle the shootdown IPI).
> + */
> + if (alloc_flags & ALLOC_NOBLOCK)
> + return NULL;
> +
> + if (!can_set_direct_map())
> + return NULL;
> +
> + lockdep_assert(!irqs_disabled() || unlikely(early_boot_irqs_disabled));
> +
> + /*
> + * Need to [un]map a whole pageblock (otherwise it might require
> + * allocating pagetables). First allocate it.
> + */
> + alloc_order = max(request_order, pageblock_order);
> + nr_pageblocks = 1 << (alloc_order - pageblock_order);
> + spin_lock_irqsave(&zone->lock, irq_flags);
> + page = __rmqueue(zone, alloc_order, ft_other, alloc_flags, &rmqm);
> + spin_unlock_irqrestore(&zone->lock, irq_flags);
> + if (!page)
> + return NULL;
> +
> + /*
> + * Now that IRQs are on it's safe to do a TLB shootdown, and now that we
> + * released the zone lock it's possible to allocate a pagetable if
> + * needed to split up a huge page.
> + *
> + * Note that modifying the direct map may need to allocate pagetables.
> + * What about unbounded recursion? Here are the assumptions that make it
> + * safe:
> + *
> + * - The direct map starts out fully mapped at boot. (This is not really
> + * an assumption" as its in direct control of page_alloc.c).
> + *
> + * - Once pages in the direct map are broken down, they are not
> + * re-aggregated into larger pages again.
> + *
> + * - Pagetables are never allocated with __GFP_UNMAPPED.
> + *
> + * Under these assumptions, a pagetable might need to be allocated while
> + * _unmapping_ stuff from the direct map during a __GFP_UNMAPPED
> + * allocation. But, the allocation of that pagetable never requires
> + * allocating a further pagetable.
> + */
> + err = set_direct_map_valid_noflush(page,
> + nr_pageblocks << pageblock_order, want_mapped);
> + if (err == -ENOMEM || WARN_ONCE(err, "err=%d\n", err)) {
> + __free_one_page(page, page_to_pfn(page), zone,
> + alloc_order, freetype, FPI_SKIP_REPORT_NOTIFY);
Forgot to take the zone lock.
> + return NULL;
> + }
> +
> + if (!want_mapped) {
> + unsigned long start = (unsigned long)page_address(page);
> + unsigned long end = start + (nr_pageblocks << (pageblock_order + PAGE_SHIFT));
> +
> + flush_tlb_kernel_range(start, end);
> + }
> +
> + for (int i = 0; i < nr_pageblocks; i++) {
> + struct page *block_page = page + (pageblock_nr_pages * i);
> +
> + set_pageblock_freetype_flags(block_page, freetype_flags(freetype));
> + }
> +
> + if (request_order >= alloc_order)
> + return page;
> +
> + /* Free any remaining pages in the block. */
> + spin_lock_irqsave(&zone->lock, irq_flags);
> + for (unsigned int i = request_order; i < alloc_order; i++) {
> + struct page *page_to_free = page + (1 << i);
> +
> + __free_one_page(page_to_free, page_to_pfn(page_to_free), zone,
> + i, freetype, FPI_SKIP_REPORT_NOTIFY);
> + }
> + spin_unlock_irqrestore(&zone->lock, irq_flags);
> +
> + return page;
> +}