Re: [PATCH v4 1/3] mm/page_alloc: Optimize free_contig_range()
From: David Hildenbrand (Arm)
Date: Mon Mar 30 2026 - 10:42:41 EST
>
> +static void free_prepared_contig_range(struct page *page,
> + unsigned long nr_pages)
Prefer two-tab indent in MM land.
> +{
> + while (nr_pages) {
> + unsigned int order;
> + unsigned long pfn;
> +
> + pfn = page_to_pfn(page);
I'd just do above
const unsigned long pfn = page_to_pfn(page);
unsigned long order;
> + /* We are limited by the largest buddy order. */
> + order = pfn ? __ffs(pfn) : MAX_PAGE_ORDER;
> + /* Don't exceed the number of pages to free. */
> + order = min_t(unsigned int, order, ilog2(nr_pages));
> + order = min_t(unsigned int, order, MAX_PAGE_ORDER);
> +
> + /*
> + * Free the chunk as a single block. Our caller has already
> + * called free_pages_prepare() for each order-0 page.
> + */
> + __free_frozen_pages(page, order, FPI_PREPARED);
> +
> + page += 1UL << order;
> + nr_pages -= 1UL << order;
> + }
> +}
> +
> +static void __free_contig_range_common(unsigned long pfn, unsigned long nr_pages,
> + bool is_frozen)
Dito.
> +{
> + struct page *page = pfn_to_page(pfn);
> + struct page *start = NULL;
> + unsigned long start_sec;
> + bool can_free = true;
> + unsigned long i;
> +
> + /*
> + * Contiguous PFNs might not have a contiguous "struct pages" in some
> + * kernel config. Therefore, check memdesc_section(), and stop batching
> + * once it changes, see num_pages_contiguous().
> + */
> + for (i = 0; i < nr_pages; i++, page++) {
As Vlasta says, page++ needs a thought. (you have to redo the
pfn_to_page with the next pfn in case the section changes).
> + VM_WARN_ON_ONCE(PageHead(page));
> + VM_WARN_ON_ONCE(PageTail(page));
> +
> + if (!is_frozen)
> + can_free = put_page_testzero(page);
> +
> + if (can_free)
> + can_free = free_pages_prepare(page, 0);
> +
> + if (!can_free) {
> + if (start) {
> + free_prepared_contig_range(start, page - start);
> + start = NULL;
> + }
> + continue;
> + }
> +
> + if (start && memdesc_section(page->flags) != start_sec) {
> + free_prepared_contig_range(start, page - start);
> + start = page;
> + start_sec = memdesc_section(page->flags);
> + } else if (!start) {
> + start = page;
> + start_sec = memdesc_section(page->flags);
> + }
> + }
> +
> + if (start)
> + free_prepared_contig_range(start, page - start);
> +}
> +
> +/**
> + * __free_contig_range - Free contiguous range of order-0 pages.
> + * @pfn: Page frame number of the first page in the range.
> + * @nr_pages: Number of pages to free.
> + *
> + * For each order-0 struct page in the physically contiguous range, put a
> + * reference. Free any page who's reference count falls to zero. The
> + * implementation is functionally equivalent to, but significantly faster than
> + * calling __free_page() for each struct page in a loop.
> + *
> + * Memory allocated with alloc_pages(order>=1) then subsequently split to
> + * order-0 with split_page() is an example of appropriate contiguous pages that
> + * can be freed with this API.
> + *
> + * Context: May be called in interrupt context or while holding a normal
> + * spinlock, but not in NMI context or while holding a raw spinlock.
Interesting that we didn't have a cond_resched() somewhere in there.
--
Cheers,
David