Re: [PATCH 05/14] mm/sparse: remove !CONFIG_SPARSEMEM_VMEMMAP leftovers for CONFIG_MEMORY_HOTPLUG

From: Lorenzo Stoakes (Oracle)

Date: Tue Mar 17 2026 - 13:56:16 EST


On Tue, Mar 17, 2026 at 05:56:43PM +0100, David Hildenbrand (Arm) wrote:
> CONFIG_MEMORY_HOTPLUG now depends on CONFIG_SPARSEMEM_SPARSEMEM. So
> let's remove the !CONFIG_SPARSEMEM_VMEMMAP leftovers.

(As said on 6/14 that I inexplicably reviewed before this one) - might be worth
explicitly saying 'dead code' here to underline it.

>
> Signed-off-by: David Hildenbrand (Arm) <david@xxxxxxxxxx>

Sparsemem? More like sparsecode now! Right? RIGHT? Anyway,

Reviewed-by: Lorenzo Stoakes (Oracle) <ljs@xxxxxxxxxx>

> ---
> mm/sparse.c | 61 -----------------------------------------------------
> 1 file changed, 61 deletions(-)
>
> diff --git a/mm/sparse.c b/mm/sparse.c
> index 93252112860e..636a4a0f1199 100644
> --- a/mm/sparse.c
> +++ b/mm/sparse.c
> @@ -657,7 +657,6 @@ void offline_mem_sections(unsigned long start_pfn, unsigned long end_pfn)
> }
> }
>
> -#ifdef CONFIG_SPARSEMEM_VMEMMAP
> static struct page * __meminit populate_section_memmap(unsigned long pfn,
> unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
> struct dev_pagemap *pgmap)
> @@ -729,66 +728,6 @@ static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
>
> return rc;
> }
> -#else
> -static struct page * __meminit populate_section_memmap(unsigned long pfn,
> - unsigned long nr_pages, int nid, struct vmem_altmap *altmap,
> - struct dev_pagemap *pgmap)
> -{
> - return kvmalloc_node(array_size(sizeof(struct page),
> - PAGES_PER_SECTION), GFP_KERNEL, nid);
> -}
> -
> -static void depopulate_section_memmap(unsigned long pfn, unsigned long nr_pages,
> - struct vmem_altmap *altmap)
> -{
> - kvfree(pfn_to_page(pfn));
> -}
> -
> -static void free_map_bootmem(struct page *memmap)
> -{
> - unsigned long maps_section_nr, removing_section_nr, i;
> - unsigned long type, nr_pages;
> - struct page *page = virt_to_page(memmap);
> -
> - nr_pages = PAGE_ALIGN(PAGES_PER_SECTION * sizeof(struct page))
> - >> PAGE_SHIFT;
> -
> - for (i = 0; i < nr_pages; i++, page++) {
> - type = bootmem_type(page);
> -
> - BUG_ON(type == NODE_INFO);
> -
> - maps_section_nr = pfn_to_section_nr(page_to_pfn(page));
> - removing_section_nr = bootmem_info(page);
> -
> - /*
> - * When this function is called, the removing section is
> - * logical offlined state. This means all pages are isolated
> - * from page allocator. If removing section's memmap is placed
> - * on the same section, it must not be freed.
> - * If it is freed, page allocator may allocate it which will
> - * be removed physically soon.
> - */
> - if (maps_section_nr != removing_section_nr)
> - put_page_bootmem(page);
> - }
> -}
> -
> -static int clear_subsection_map(unsigned long pfn, unsigned long nr_pages)
> -{
> - return 0;
> -}
> -
> -static bool is_subsection_map_empty(struct mem_section *ms)
> -{
> - return true;
> -}
> -
> -static int fill_subsection_map(unsigned long pfn, unsigned long nr_pages)
> -{
> - return 0;
> -}
> -#endif /* CONFIG_SPARSEMEM_VMEMMAP */

So this was all dead code again? Ugh.

>
> /*
> * To deactivate a memory region, there are 3 cases to handle across
> --
> 2.43.0
>