Re: [PATCH v4] sh: Restructure setup code to reserve memory regions earlier

From: John Paul Adrian Glaubitz
Date: Sat Jul 13 2024 - 03:58:42 EST


Hi Oreoluwa,

review follows below.

On Thu, 2024-07-11 at 14:44 -0700, Oreoluwa Babatunde wrote:
> The unflatten_device_tree() function contains a call to
> memblock_alloc(). This is a problem because this allocation is done
> before any of the reserved memory regions are set aside in
> paging_init().
> As a result, there is a possibility for memblock to unknowingly allocate
> from any of the memory regions that are meant to be reserved.
>
> Hence, restructure the setup code to set aside reserved memory
> regions before any allocations are done using memblock.
>
> Signed-off-by: Oreoluwa Babatunde <quic_obabatun@xxxxxxxxxxx>
> ---
> v4:
> - Rebase patch ontop of v6.10-rc1 as requested by Maintainer.
> - Add missing include in arch/sh/kernel/setup.c
>
> v3:
> https://lore.kernel.org/all/20240520175802.2002183-1-quic_obabatun@xxxxxxxxxxx/
> - Instead of moving all of paging_init(), move only the parts
> that are responsible for setting aside the reserved memory
> regions.
>
> v2:
> https://lore.kernel.org/all/20240423233150.74302-1-quic_obabatun@xxxxxxxxxxx/
> - Add Rob Herrings Reviewed-by.
> - cc Andrew Morton to assist with merging this for sh architecture.
> Similar change made for loongarch and openrisc in v1 have already
> been merged.
>
> v1:
> https://lore.kernel.org/all/1707524971-146908-4-git-send-email-quic_obabatun@xxxxxxxxxxx/
> arch/sh/include/asm/setup.h | 1 -
> arch/sh/kernel/setup.c | 44 ++++++++++++++++++++++++++++++++++++-
> arch/sh/mm/init.c | 44 -------------------------------------
> 3 files changed, 43 insertions(+), 46 deletions(-)
>
> diff --git a/arch/sh/include/asm/setup.h b/arch/sh/include/asm/setup.h
> index 84bb23a771f3..f8b814fb1c7f 100644
> --- a/arch/sh/include/asm/setup.h
> +++ b/arch/sh/include/asm/setup.h
> @@ -19,7 +19,6 @@
> #define COMMAND_LINE ((char *) (PARAM+0x100))
>
> void sh_mv_setup(void);
> -void check_for_initrd(void);
> void per_cpu_trap_init(void);
> void sh_fdt_init(phys_addr_t dt_phys);
>
> diff --git a/arch/sh/kernel/setup.c b/arch/sh/kernel/setup.c
> index 620e5cf8ae1e..8477491f4ffd 100644
> --- a/arch/sh/kernel/setup.c
> +++ b/arch/sh/kernel/setup.c
> @@ -35,6 +35,7 @@
> #include <asm/io.h>
> #include <asm/page.h>
> #include <asm/elf.h>
> +#include <asm/kexec.h>
> #include <asm/sections.h>
> #include <asm/irq.h>
> #include <asm/setup.h>
> @@ -114,7 +115,7 @@ static int __init early_parse_mem(char *p)
> }
> early_param("mem", early_parse_mem);
>
> -void __init check_for_initrd(void)
> +static void __init check_for_initrd(void)
> {
> #ifdef CONFIG_BLK_DEV_INITRD
> unsigned long start, end;
> @@ -172,6 +173,42 @@ void __init check_for_initrd(void)
> #endif
> }

Making check_for_initrd() static seems like an unrelated change to me or am
I missing something? If yes, it should go into a separate patch.

> +static void __init early_reserve_mem(void)
> +{
> + unsigned long start_pfn;
> + u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
> + u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
> +
> + /*
> + * Partially used pages are not usable - thus
> + * we are rounding upwards:
> + */
> + start_pfn = PFN_UP(__pa(_end));
> +
> + /*
> + * Reserve the kernel text and Reserve the bootmem bitmap. We do
> + * this in two steps (first step was init_bootmem()), because
> + * this catches the (definitely buggy) case of us accidentally
> + * initializing the bootmem allocator with an invalid RAM area.
> + */
> + memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
> +
> + /*
> + * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
> + */
> + if (CONFIG_ZERO_PAGE_OFFSET != 0)
> + memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
> +
> + /*
> + * Handle additional early reservations
> + */
> + check_for_initrd();
> + reserve_crashkernel();
> +
> + if (sh_mv.mv_mem_reserve)
> + sh_mv.mv_mem_reserve();
> +}
> +
> #ifndef CONFIG_GENERIC_CALIBRATE_DELAY
> void calibrate_delay(void)
> {

I'm not really happy with moving early_reserve_mem() from mm/init.c to
kernel/setup.c. Can't we just leave it where it is while still keeping
the changes to paging_init()?

> @@ -319,9 +356,14 @@ void __init setup_arch(char **cmdline_p)
>
> sh_mv_setup();
>
> + sh_mv.mv_mem_init();
> +
> /* Let earlyprintk output early console messages */
> sh_early_platform_driver_probe("earlyprintk", 1, 1);
>
> + /* set aside reserved memory regions */
> + early_reserve_mem();
> +
> #ifdef CONFIG_OF_EARLY_FLATTREE
> #ifdef CONFIG_USE_BUILTIN_DTB
> unflatten_and_copy_device_tree();
> diff --git a/arch/sh/mm/init.c b/arch/sh/mm/init.c
> index bf1b54055316..4559f5bea782 100644
> --- a/arch/sh/mm/init.c
> +++ b/arch/sh/mm/init.c
> @@ -242,55 +242,11 @@ static void __init do_init_bootmem(void)
> sparse_init();
> }
>
> -static void __init early_reserve_mem(void)
> -{
> - unsigned long start_pfn;
> - u32 zero_base = (u32)__MEMORY_START + (u32)PHYSICAL_OFFSET;
> - u32 start = zero_base + (u32)CONFIG_ZERO_PAGE_OFFSET;
> -
> - /*
> - * Partially used pages are not usable - thus
> - * we are rounding upwards:
> - */
> - start_pfn = PFN_UP(__pa(_end));
> -
> - /*
> - * Reserve the kernel text and Reserve the bootmem bitmap. We do
> - * this in two steps (first step was init_bootmem()), because
> - * this catches the (definitely buggy) case of us accidentally
> - * initializing the bootmem allocator with an invalid RAM area.
> - */
> - memblock_reserve(start, (PFN_PHYS(start_pfn) + PAGE_SIZE - 1) - start);
> -
> - /*
> - * Reserve physical pages below CONFIG_ZERO_PAGE_OFFSET.
> - */
> - if (CONFIG_ZERO_PAGE_OFFSET != 0)
> - memblock_reserve(zero_base, CONFIG_ZERO_PAGE_OFFSET);
> -
> - /*
> - * Handle additional early reservations
> - */
> - check_for_initrd();
> - reserve_crashkernel();
> -}
> -
> void __init paging_init(void)
> {
> unsigned long max_zone_pfns[MAX_NR_ZONES];
> unsigned long vaddr, end;
>
> - sh_mv.mv_mem_init();
> -
> - early_reserve_mem();
> -
> - /*
> - * Once the early reservations are out of the way, give the
> - * platforms a chance to kick out some memory.
> - */
> - if (sh_mv.mv_mem_reserve)
> - sh_mv.mv_mem_reserve();
> -
> memblock_enforce_memory_limit(memory_limit);
> memblock_allow_resize();
>

Thanks,
Adrian

--
.''`. John Paul Adrian Glaubitz
: :' : Debian Developer
`. `' Physicist
`- GPG: 62FF 8A75 84E0 2956 9546 0006 7426 3B37 F5B5 F913