Re: [PATCH v7 07/10] RISC-V: Move relocate and few other functions out of __init

From: Anup Patel
Date: Mon Jan 27 2020 - 23:38:41 EST


On Tue, Jan 28, 2020 at 7:58 AM Atish Patra <atish.patra@xxxxxxx> wrote:
>
> The secondary hart booting and relocation code are under .init section.
> As a result, it will be freed once kernel booting is done. However,
> ordered booting protocol and CPU hotplug always requires these sections
> to be present to bringup harts after initial kernel boot.
>
> Move the required sections to a different section and make sure that
> they are in memory within first 2MB offset as trampoline page directory
> only maps first 2MB.
>
> Signed-off-by: Atish Patra <atish.patra@xxxxxxx>
> ---
> arch/riscv/kernel/head.S | 73 +++++++++++++++++++--------------
> arch/riscv/kernel/vmlinux.lds.S | 9 +++-
> 2 files changed, 50 insertions(+), 32 deletions(-)
>
> diff --git a/arch/riscv/kernel/head.S b/arch/riscv/kernel/head.S
> index a4242be66966..9d7f084a50cc 100644
> --- a/arch/riscv/kernel/head.S
> +++ b/arch/riscv/kernel/head.S
> @@ -14,7 +14,7 @@
> #include <asm/hwcap.h>
> #include <asm/image.h>
>
> -__INIT
> +__HEAD
> ENTRY(_start)
> /*
> * Image header expected by Linux boot-loaders. The image header data
> @@ -44,9 +44,10 @@ ENTRY(_start)
> .balign 4
> .ascii RISCV_IMAGE_MAGIC2
> .word 0
> +END(_start)
>
> -.global _start_kernel
> -_start_kernel:
> + __INIT
> +ENTRY(_start_kernel)
> /* Mask all interrupts */
> csrw CSR_IE, zero
> csrw CSR_IP, zero
> @@ -125,6 +126,37 @@ clear_bss_done:
> call parse_dtb
> tail start_kernel
>
> +.Lsecondary_start:
> +#ifdef CONFIG_SMP
> + /* Set trap vector to spin forever to help debug */
> + la a3, .Lsecondary_park
> + csrw CSR_TVEC, a3
> +
> + slli a3, a0, LGREG
> + la a1, __cpu_up_stack_pointer
> + la a2, __cpu_up_task_pointer
> + add a1, a3, a1
> + add a2, a3, a2
> +
> + /*
> + * This hart didn't win the lottery, so we wait for the winning hart to
> + * get far enough along the boot process that it should continue.
> + */
> +.Lwait_for_cpu_up:
> + /* FIXME: We should WFI to save some energy here. */
> + REG_L sp, (a1)
> + REG_L tp, (a2)
> + beqz sp, .Lwait_for_cpu_up
> + beqz tp, .Lwait_for_cpu_up
> + fence
> +
> + tail secondary_start_common
> +#endif
> +
> +END(_start_kernel)
> +
> +.section ".noinit.text","ax",@progbits
> +.align 2

Try to use __HEAD here (if possible).

> #ifdef CONFIG_MMU
> relocate:
> /* Relocate return address */
> @@ -177,41 +209,27 @@ relocate:
>
> ret
> #endif /* CONFIG_MMU */
> -
> -.Lsecondary_start:
> #ifdef CONFIG_SMP
> /* Set trap vector to spin forever to help debug */
> la a3, .Lsecondary_park
> csrw CSR_TVEC, a3
>
> slli a3, a0, LGREG
> - la a1, __cpu_up_stack_pointer
> - la a2, __cpu_up_task_pointer
> - add a1, a3, a1
> - add a2, a3, a2
> -
> - /*
> - * This hart didn't win the lottery, so we wait for the winning hart to
> - * get far enough along the boot process that it should continue.
> - */
> -.Lwait_for_cpu_up:
> - /* FIXME: We should WFI to save some energy here. */
> - REG_L sp, (a1)
> - REG_L tp, (a2)
> - beqz sp, .Lwait_for_cpu_up
> - beqz tp, .Lwait_for_cpu_up
> - fence
> + .global secondary_start_common
> +secondary_start_common:
>
> #ifdef CONFIG_MMU
> /* Enable virtual memory and relocate to virtual address */
> la a0, swapper_pg_dir
> call relocate
> #endif
> -
> tail smp_callin
> -#endif
> +#endif /* CONFIG_SMP */
>
> -END(_start)
> +.Lsecondary_park:
> + /* We lack SMP support or have too many harts, so park this hart */
> + wfi
> + j .Lsecondary_park
>
> #ifdef CONFIG_RISCV_M_MODE
> ENTRY(reset_regs)
> @@ -292,13 +310,6 @@ ENTRY(reset_regs)
> END(reset_regs)
> #endif /* CONFIG_RISCV_M_MODE */
>
> -.section ".text", "ax",@progbits
> -.align 2
> -.Lsecondary_park:
> - /* We lack SMP support or have too many harts, so park this hart */
> - wfi
> - j .Lsecondary_park
> -
> __PAGE_ALIGNED_BSS
> /* Empty zero page */
> .balign PAGE_SIZE
> diff --git a/arch/riscv/kernel/vmlinux.lds.S b/arch/riscv/kernel/vmlinux.lds.S
> index 12f42f96d46e..c8a88326df9e 100644
> --- a/arch/riscv/kernel/vmlinux.lds.S
> +++ b/arch/riscv/kernel/vmlinux.lds.S
> @@ -10,6 +10,7 @@
> #include <asm/cache.h>
> #include <asm/thread_info.h>
>
> +#include <linux/sizes.h>
> OUTPUT_ARCH(riscv)
> ENTRY(_start)
>
> @@ -20,8 +21,14 @@ SECTIONS
> /* Beginning of code and text segment */
> . = LOAD_OFFSET;
> _start = .;
> - __init_begin = .;
> HEAD_TEXT_SECTION
> + .noinit.text :
> + {
> + *(.noinit.text)
> + }

Can we try using HEAD_TEXT_SECTION for SMP booting
related functions instead of new ".noinit.text" section ??

> + . = ALIGN(SZ_4K);

Change this to PAGE aligned:
. = ALIGN(PAGE_SIZE)

> +
> + __init_begin = .;
> INIT_TEXT_SECTION(PAGE_SIZE)
> INIT_DATA_SECTION(16)
> /* we have to discard exit text and such at runtime, not link time */
> --
> 2.24.0
>

Regards,
Anup