Re: [PATCH v3 11/11] riscv: Add qspinlock support

From: Guo Ren
Date: Wed Jul 17 2024 - 05:30:38 EST


On Wed, Jul 17, 2024 at 2:31 PM Alexandre Ghiti <alexghiti@xxxxxxxxxxxx> wrote:
>
> In order to produce a generic kernel, a user can select
> CONFIG_COMBO_SPINLOCKS which will fallback at runtime to the ticket
> spinlock implementation if Zabha or Ziccrse are not present.
>
> Note that we can't use alternatives here because the discovery of
> extensions is done too late and we need to start with the qspinlock
> implementation because the ticket spinlock implementation would pollute
> the spinlock value, so let's use static keys.
>
> This is largely based on Guo's work and Leonardo reviews at [1].
>
> Link: https://lore.kernel.org/linux-riscv/20231225125847.2778638-1-guoren@xxxxxxxxxx/ [1]
> Signed-off-by: Guo Ren <guoren@xxxxxxxxxx>
> Signed-off-by: Alexandre Ghiti <alexghiti@xxxxxxxxxxxx>
> ---
> .../locking/queued-spinlocks/arch-support.txt | 2 +-
> arch/riscv/Kconfig | 29 ++++++++++++++
> arch/riscv/include/asm/Kbuild | 4 +-
> arch/riscv/include/asm/spinlock.h | 39 +++++++++++++++++++
> arch/riscv/kernel/setup.c | 33 ++++++++++++++++
> include/asm-generic/qspinlock.h | 2 +
> include/asm-generic/ticket_spinlock.h | 2 +
> 7 files changed, 109 insertions(+), 2 deletions(-)
> create mode 100644 arch/riscv/include/asm/spinlock.h
>
> diff --git a/Documentation/features/locking/queued-spinlocks/arch-support.txt b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> index 22f2990392ff..cf26042480e2 100644
> --- a/Documentation/features/locking/queued-spinlocks/arch-support.txt
> +++ b/Documentation/features/locking/queued-spinlocks/arch-support.txt
> @@ -20,7 +20,7 @@
> | openrisc: | ok |
> | parisc: | TODO |
> | powerpc: | ok |
> - | riscv: | TODO |
> + | riscv: | ok |
> | s390: | TODO |
> | sh: | TODO |
> | sparc: | ok |
> diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> index 0bbaec0444d0..5040c7eac70d 100644
> --- a/arch/riscv/Kconfig
> +++ b/arch/riscv/Kconfig
> @@ -72,6 +72,7 @@ config RISCV
> select ARCH_WANT_OPTIMIZE_HUGETLB_VMEMMAP
> select ARCH_WANTS_NO_INSTR
> select ARCH_WANTS_THP_SWAP if HAVE_ARCH_TRANSPARENT_HUGEPAGE
> + select ARCH_WEAK_RELEASE_ACQUIRE if ARCH_USE_QUEUED_SPINLOCKS
> select BINFMT_FLAT_NO_DATA_START_OFFSET if !MMU
> select BUILDTIME_TABLE_SORT if MMU
> select CLINT_TIMER if RISCV_M_MODE
> @@ -482,6 +483,34 @@ config NODES_SHIFT
> Specify the maximum number of NUMA Nodes available on the target
> system. Increases memory reserved to accommodate various tables.
>
> +choice
> + prompt "RISC-V spinlock type"
> + default RISCV_COMBO_SPINLOCKS
> +
> +config RISCV_TICKET_SPINLOCKS
> + bool "Using ticket spinlock"
> +
> +config RISCV_QUEUED_SPINLOCKS
> + bool "Using queued spinlock"
> + depends on SMP && MMU
> + select ARCH_USE_QUEUED_SPINLOCKS
> + help
> + The queued spinlock implementation requires the forward progress
> + guarantee of cmpxchg()/xchg() atomic operations: CAS with Zabha or
> + LR/SC with Ziccrse provide such guarantee.
> +
> + Select this if and only if Zabha or Ziccrse is available on your
> + platform.
> +
> +config RISCV_COMBO_SPINLOCKS
> + bool "Using combo spinlock"
> + depends on SMP && MMU
> + select ARCH_USE_QUEUED_SPINLOCKS
> + help
> + Embed both queued spinlock and ticket lock so that the spinlock
> + implementation can be chosen at runtime.
> +endchoice
> +
> config RISCV_ALTERNATIVE
> bool
> depends on !XIP_KERNEL
> diff --git a/arch/riscv/include/asm/Kbuild b/arch/riscv/include/asm/Kbuild
> index 504f8b7e72d4..ad72f2bd4cc9 100644
> --- a/arch/riscv/include/asm/Kbuild
> +++ b/arch/riscv/include/asm/Kbuild
> @@ -2,10 +2,12 @@
> generic-y += early_ioremap.h
> generic-y += flat.h
> generic-y += kvm_para.h
> +generic-y += mcs_spinlock.h
> generic-y += parport.h
> -generic-y += spinlock.h
> generic-y += spinlock_types.h
> +generic-y += ticket_spinlock.h
> generic-y += qrwlock.h
> generic-y += qrwlock_types.h
> +generic-y += qspinlock.h
> generic-y += user.h
> generic-y += vmlinux.lds.h
> diff --git a/arch/riscv/include/asm/spinlock.h b/arch/riscv/include/asm/spinlock.h
> new file mode 100644
> index 000000000000..4856d50006f2
> --- /dev/null
> +++ b/arch/riscv/include/asm/spinlock.h
> @@ -0,0 +1,39 @@
> +/* SPDX-License-Identifier: GPL-2.0 */
> +
> +#ifndef __ASM_RISCV_SPINLOCK_H
> +#define __ASM_RISCV_SPINLOCK_H
> +
> +#ifdef CONFIG_RISCV_COMBO_SPINLOCKS
> +#define _Q_PENDING_LOOPS (1 << 9)
> +
> +#define __no_arch_spinlock_redefine
> +#include <asm/ticket_spinlock.h>
> +#include <asm/qspinlock.h>
> +#include <asm/alternative.h>
> +
> +DECLARE_STATIC_KEY_TRUE(qspinlock_key);
> +
> +#define SPINLOCK_BASE_DECLARE(op, type, type_lock) \
> +static __always_inline type arch_spin_##op(type_lock lock) \
> +{ \
> + if (static_branch_unlikely(&qspinlock_key)) \
> + return queued_spin_##op(lock); \
> + return ticket_spin_##op(lock); \
> +}
> +
> +SPINLOCK_BASE_DECLARE(lock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(unlock, void, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_locked, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(is_contended, int, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(trylock, bool, arch_spinlock_t *)
> +SPINLOCK_BASE_DECLARE(value_unlocked, int, arch_spinlock_t)
> +
> +#else
> +
> +#include <asm/ticket_spinlock.h>
> +
> +#endif
> +
> +#include <asm/qrwlock.h>
> +
> +#endif /* __ASM_RISCV_SPINLOCK_H */
> diff --git a/arch/riscv/kernel/setup.c b/arch/riscv/kernel/setup.c
> index 4f73c0ae44b2..d7c31c9b8ead 100644
> --- a/arch/riscv/kernel/setup.c
> +++ b/arch/riscv/kernel/setup.c
> @@ -244,6 +244,38 @@ static void __init parse_dtb(void)
> #endif
> }
>
> +DEFINE_STATIC_KEY_TRUE(qspinlock_key);
> +EXPORT_SYMBOL(qspinlock_key);
> +
> +static void __init riscv_spinlock_init(void)
> +{
> + char *using_ext;
> +
> + if (IS_ENABLED(CONFIG_RISCV_ISA_ZACAS) &&
> + IS_ENABLED(CONFIG_RISCV_ISA_ZABHA)) {
> + using_ext = "using Zabha";
> +
> + asm goto(ALTERNATIVE("j %[no_zacas]", "nop", 0, RISCV_ISA_EXT_ZACAS, 1)
> + : : : : no_zacas);
> + asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0, RISCV_ISA_EXT_ZABHA, 1)
> + : : : : qspinlock);
> + }
I'm okay with this patch.
I suggest putting an arg such as "enable_qspinlock," which people
could use on the non-ZABHA machines. I hope it could happen in this
series. That's all I need, thank you very much.

> +
> +no_zacas:
> + using_ext = "using Ziccrse";
> + asm goto(ALTERNATIVE("nop", "j %[qspinlock]", 0,
> + RISCV_ISA_EXT_ZICCRSE, 1)
> + : : : : qspinlock);
> +
> + static_branch_disable(&qspinlock_key);
> + pr_info("Ticket spinlock: enabled\n");
> +
> + return;
> +
> +qspinlock:
> + pr_info("Queued spinlock %s: enabled\n", using_ext);
> +}
> +
> extern void __init init_rt_signal_env(void);
>
> void __init setup_arch(char **cmdline_p)
> @@ -295,6 +327,7 @@ void __init setup_arch(char **cmdline_p)
> riscv_set_dma_cache_alignment();
>
> riscv_user_isa_enable();
> + riscv_spinlock_init();
> }
>
> bool arch_cpu_is_hotpluggable(int cpu)
> diff --git a/include/asm-generic/qspinlock.h b/include/asm-generic/qspinlock.h
> index 0655aa5b57b2..bf47cca2c375 100644
> --- a/include/asm-generic/qspinlock.h
> +++ b/include/asm-generic/qspinlock.h
> @@ -136,6 +136,7 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
> }
> #endif
>
> +#ifndef __no_arch_spinlock_redefine
> /*
> * Remapping spinlock architecture specific functions to the corresponding
> * queued spinlock functions.
> @@ -146,5 +147,6 @@ static __always_inline bool virt_spin_lock(struct qspinlock *lock)
> #define arch_spin_lock(l) queued_spin_lock(l)
> #define arch_spin_trylock(l) queued_spin_trylock(l)
> #define arch_spin_unlock(l) queued_spin_unlock(l)
> +#endif
>
> #endif /* __ASM_GENERIC_QSPINLOCK_H */
> diff --git a/include/asm-generic/ticket_spinlock.h b/include/asm-generic/ticket_spinlock.h
> index cfcff22b37b3..325779970d8a 100644
> --- a/include/asm-generic/ticket_spinlock.h
> +++ b/include/asm-generic/ticket_spinlock.h
> @@ -89,6 +89,7 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
> return (s16)((val >> 16) - (val & 0xffff)) > 1;
> }
>
> +#ifndef __no_arch_spinlock_redefine
> /*
> * Remapping spinlock architecture specific functions to the corresponding
> * ticket spinlock functions.
> @@ -99,5 +100,6 @@ static __always_inline int ticket_spin_is_contended(arch_spinlock_t *lock)
> #define arch_spin_lock(l) ticket_spin_lock(l)
> #define arch_spin_trylock(l) ticket_spin_trylock(l)
> #define arch_spin_unlock(l) ticket_spin_unlock(l)
> +#endif
>
> #endif /* __ASM_GENERIC_TICKET_SPINLOCK_H */
> --
> 2.39.2
>


--
Best Regards
Guo Ren