Re: [PATCH v4 4/7] RISC-V: Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED

From: Charlie Jenkins
Date: Thu Jul 11 2024 - 18:39:32 EST


On Thu, Jul 11, 2024 at 05:58:43PM -0400, Jesse Taube wrote:
> Replace RISCV_MISALIGNED with RISCV_SCALAR_MISALIGNED to allow
> for the addition of RISCV_VECTOR_MISALIGNED in a later patch.
>
> Signed-off-by: Jesse Taube <jesse@xxxxxxxxxxxx>
> Reviewed-by: Conor Dooley <conor.dooley@xxxxxxxxxxxxx>
> ---
> V2 -> V3:
> - New patch
> V3 -> V4:
> - No changes
> ---
> arch/riscv/Kconfig | 6 +++---
> arch/riscv/include/asm/cpufeature.h | 2 +-
> arch/riscv/include/asm/entry-common.h | 2 +-
> arch/riscv/kernel/Makefile | 4 ++--
> arch/riscv/kernel/fpu.S | 4 ++--
> 5 files changed, 9 insertions(+), 9 deletions(-)
>
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index b94176e25be1..34d24242e37a 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -717,7 +717,7 @@ config THREAD_SIZE_ORDER
> Specify the Pages of thread stack size (from 4KB to 64KB), which also
> affects irq stack size, which is equal to thread stack size.
>
> -config RISCV_MISALIGNED
> +config RISCV_SCALAR_MISALIGNED
> bool
> select SYSCTL_ARCH_UNALIGN_ALLOW
> help
> @@ -734,7 +734,7 @@ choice
>
> config RISCV_PROBE_UNALIGNED_ACCESS
> bool "Probe for hardware unaligned access support"
> - select RISCV_MISALIGNED
> + select RISCV_SCALAR_MISALIGNED
> help
> During boot, the kernel will run a series of tests to determine the
> speed of unaligned accesses. This probing will dynamically determine
> @@ -745,7 +745,7 @@ config RISCV_PROBE_UNALIGNED_ACCESS
>
> config RISCV_EMULATED_UNALIGNED_ACCESS
> bool "Emulate unaligned access where system support is missing"
> - select RISCV_MISALIGNED
> + select RISCV_SCALAR_MISALIGNED
> help
> If unaligned memory accesses trap into the kernel as they are not
> supported by the system, the kernel will emulate the unaligned
> diff --git a/arch/riscv/include/asm/cpufeature.h b/arch/riscv/include/asm/cpufeature.h
> index 347805446151..0ed7d99c14dd 100644
> --- a/arch/riscv/include/asm/cpufeature.h
> +++ b/arch/riscv/include/asm/cpufeature.h
> @@ -33,8 +33,8 @@ extern struct riscv_isainfo hart_isa[NR_CPUS];
>
> void riscv_user_isa_enable(void);
>
> -#if defined(CONFIG_RISCV_MISALIGNED)
> bool check_unaligned_access_emulated_all_cpus(void);
> +#if defined(CONFIG_RISCV_SCALAR_MISALIGNED)
> void unaligned_emulation_finish(void);
> bool unaligned_ctl_available(void);
> DECLARE_PER_CPU(long, misaligned_access_speed);
> diff --git a/arch/riscv/include/asm/entry-common.h b/arch/riscv/include/asm/entry-common.h
> index 2293e535f865..0a4e3544c877 100644
> --- a/arch/riscv/include/asm/entry-common.h
> +++ b/arch/riscv/include/asm/entry-common.h
> @@ -25,7 +25,7 @@ static inline void arch_exit_to_user_mode_prepare(struct pt_regs *regs,
> void handle_page_fault(struct pt_regs *regs);
> void handle_break(struct pt_regs *regs);
>
> -#ifdef CONFIG_RISCV_MISALIGNED
> +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
> int handle_misaligned_load(struct pt_regs *regs);
> int handle_misaligned_store(struct pt_regs *regs);
> #else
> diff --git a/arch/riscv/kernel/Makefile b/arch/riscv/kernel/Makefile
> index 5b243d46f4b1..8d4e7d40e42f 100644
> --- a/arch/riscv/kernel/Makefile
> +++ b/arch/riscv/kernel/Makefile
> @@ -62,8 +62,8 @@ obj-y += probes/
> obj-y += tests/
> obj-$(CONFIG_MMU) += vdso.o vdso/
>
> -obj-$(CONFIG_RISCV_MISALIGNED) += traps_misaligned.o
> -obj-$(CONFIG_RISCV_MISALIGNED) += unaligned_access_speed.o
> +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += traps_misaligned.o
> +obj-$(CONFIG_RISCV_SCALAR_MISALIGNED) += unaligned_access_speed.o
> obj-$(CONFIG_RISCV_PROBE_UNALIGNED_ACCESS) += copy-unaligned.o
>
> obj-$(CONFIG_FPU) += fpu.o
> diff --git a/arch/riscv/kernel/fpu.S b/arch/riscv/kernel/fpu.S
> index 327cf527dd7e..f74f6b60e347 100644
> --- a/arch/riscv/kernel/fpu.S
> +++ b/arch/riscv/kernel/fpu.S
> @@ -170,7 +170,7 @@ SYM_FUNC_END(__fstate_restore)
> __access_func(f31)
>
>
> -#ifdef CONFIG_RISCV_MISALIGNED
> +#ifdef CONFIG_RISCV_SCALAR_MISALIGNED
>
> /*
> * Disable compressed instructions set to keep a constant offset between FP
> @@ -224,4 +224,4 @@ SYM_FUNC_START(get_f64_reg)
> fp_access_epilogue
> SYM_FUNC_END(get_f64_reg)
>
> -#endif /* CONFIG_RISCV_MISALIGNED */
> +#endif /* CONFIG_RISCV_SCALAR_MISALIGNED */
> --
> 2.45.2
>

Thank you for making this change!

Reviewed-by: Charlie Jenkins <charlie@xxxxxxxxxxxx>