Re: [PATCH] mm/memory hotplug/unplug: Optimize zone contiguous check when changing pfn range
From: Mike Rapoport
Date: Mon Mar 23 2026 - 07:59:09 EST
Hi,
On Thu, Mar 19, 2026 at 05:56:22AM -0400, Yuan Liu wrote:
...
> diff --git a/mm/mm_init.c b/mm/mm_init.c
> index df34797691bd..96690e550024 100644
> --- a/mm/mm_init.c
> +++ b/mm/mm_init.c
> @@ -946,6 +946,7 @@ static void __init memmap_init_zone_range(struct zone *zone,
> unsigned long zone_start_pfn = zone->zone_start_pfn;
> unsigned long zone_end_pfn = zone_start_pfn + zone->spanned_pages;
> int nid = zone_to_nid(zone), zone_id = zone_idx(zone);
> + unsigned long zone_hole_start, zone_hole_end;
>
> start_pfn = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
> end_pfn = clamp(end_pfn, zone_start_pfn, zone_end_pfn);
> @@ -957,8 +958,19 @@ static void __init memmap_init_zone_range(struct zone *zone,
> zone_end_pfn, MEMINIT_EARLY, NULL, MIGRATE_MOVABLE,
> false);
>
> - if (*hole_pfn < start_pfn)
> + WRITE_ONCE(zone->pages_with_online_memmap,
> + READ_ONCE(zone->pages_with_online_memmap) +
> + (end_pfn - start_pfn));
> +
> + if (*hole_pfn < start_pfn) {
> init_unavailable_range(*hole_pfn, start_pfn, zone_id, nid);
> + zone_hole_start = clamp(*hole_pfn, zone_start_pfn, zone_end_pfn);
> + zone_hole_end = clamp(start_pfn, zone_start_pfn, zone_end_pfn);
> + if (zone_hole_start < zone_hole_end)
> + WRITE_ONCE(zone->pages_with_online_memmap,
> + READ_ONCE(zone->pages_with_online_memmap) +
> + (zone_hole_end - zone_hole_start));
> + }
I didn't have time to review it, but it really jumped at me.
memmap_init_zone_range() runs before SMP, there is no need for
WRITE_ONCE()/READ_ONCE() here.
--
Sincerely yours,
Mike.