Re: [PATCH v10 3/6] riscv: mm: dma-noncoherent: nonstandard cache operations support
From: Lad, Prabhakar
Date: Mon Jul 31 2023 - 07:31:19 EST
Hi Jisheng,
Thank you for the review.
On Sun, Jul 30, 2023 at 4:09 PM Jisheng Zhang <jszhang@xxxxxxxxxx> wrote:
>
> On Sun, Jul 02, 2023 at 09:34:26PM +0100, Prabhakar wrote:
> > From: Lad Prabhakar <prabhakar.mahadev-lad.rj@xxxxxxxxxxxxxx>
> >
> > Introduce support for nonstandard noncoherent systems in the RISC-V
> > architecture. It enables function pointer support to handle cache
> > management in such systems.
> >
> > This patch adds a new configuration option called
> > "RISCV_NONSTANDARD_CACHE_OPS." This option is a boolean flag that
> > depends on "RISCV_DMA_NONCOHERENT" and enables the function pointer
> > support for cache management in nonstandard noncoherent systems.
> >
> > Signed-off-by: Lad Prabhakar <prabhakar.mahadev-lad.rj@xxxxxxxxxxxxxx>
> > Reviewed-by: Conor Dooley <conor.dooley@xxxxxxxxxxxxx>
> > Tested-by: Conor Dooley <conor.dooley@xxxxxxxxxxxxx> # tyre-kicking on a d1
> > ---
> > v9 -> v10
> > * Added __ro_after_init compiler attribute for noncoherent_cache_ops
> > * Renamed clean -> wback
> > * Renamed inval -> inv
> > * Renamed flush -> wback_inv
> >
> > v8 -> v9
> > * New patch
> > ---
> > arch/riscv/Kconfig | 7 ++++
> > arch/riscv/include/asm/dma-noncoherent.h | 28 +++++++++++++++
> > arch/riscv/mm/dma-noncoherent.c | 43 ++++++++++++++++++++++++
> > arch/riscv/mm/pmem.c | 13 +++++++
> > 4 files changed, 91 insertions(+)
> > create mode 100644 arch/riscv/include/asm/dma-noncoherent.h
> >
> > diff --git a/arch/riscv/Kconfig b/arch/riscv/Kconfig
> > index d9e451ac862a..42c86b13c5e1 100644
> > --- a/arch/riscv/Kconfig
> > +++ b/arch/riscv/Kconfig
> > @@ -265,6 +265,13 @@ config RISCV_DMA_NONCOHERENT
> > select ARCH_HAS_SYNC_DMA_FOR_DEVICE
> > select DMA_DIRECT_REMAP
> >
> > +config RISCV_NONSTANDARD_CACHE_OPS
> > + bool
> > + depends on RISCV_DMA_NONCOHERENT
> > + help
> > + This enables function pointer support for non-standard noncoherent
> > + systems to handle cache management.
>
> Per Documentation/riscv/patch-acceptance.rst:
>
> "we'll only consider patches for extensions that either:
>
> - Have been officially frozen or ratified by the RISC-V Foundation, or
> - Have been implemented in hardware that is widely available, per standard
> Linux practice."
>
> I'm not sure which item this patch series belongs to.
>
Maybe Conor can help me here ;)
> > +
> > config AS_HAS_INSN
> > def_bool $(as-instr,.insn r 51$(comma) 0$(comma) 0$(comma) t0$(comma) t0$(comma) zero)
> >
> > diff --git a/arch/riscv/include/asm/dma-noncoherent.h b/arch/riscv/include/asm/dma-noncoherent.h
> > new file mode 100644
> > index 000000000000..969cf1f1363a
> > --- /dev/null
> > +++ b/arch/riscv/include/asm/dma-noncoherent.h
> > @@ -0,0 +1,28 @@
> > +/* SPDX-License-Identifier: GPL-2.0-only */
> > +/*
> > + * Copyright (C) 2023 Renesas Electronics Corp.
> > + */
> > +
> > +#ifndef __ASM_DMA_NONCOHERENT_H
> > +#define __ASM_DMA_NONCOHERENT_H
> > +
> > +#include <linux/dma-direct.h>
> > +
> > +/*
> > + * struct riscv_cache_ops - Structure for CMO function pointers
>
> can we reword this line as
> "struct riscv_nonstd_cache_ops - Structure for non-standard CMO function
> pointers" to explictly note this is only for non-standard CMO.
>
Sure I will update it.
Cheers,
Prabhakar
> > + *
> > + * @wback: Function pointer for cache writeback
> > + * @inv: Function pointer for invalidating cache
> > + * @wback_inv: Function pointer for flushing the cache (writeback + invalidating)
> > + */
> > +struct riscv_cache_ops {
> > + void (*wback)(phys_addr_t paddr, unsigned long size);
> > + void (*inv)(phys_addr_t paddr, unsigned long size);
> > + void (*wback_inv)(phys_addr_t paddr, unsigned long size);
> > +};
> > +
> > +extern struct riscv_cache_ops noncoherent_cache_ops;
> > +
> > +void riscv_noncoherent_register_cache_ops(const struct riscv_cache_ops *ops);
> > +
> > +#endif /* __ASM_DMA_NONCOHERENT_H */
> > diff --git a/arch/riscv/mm/dma-noncoherent.c b/arch/riscv/mm/dma-noncoherent.c
> > index b9a9f57e02be..4c2e3f1cdfe6 100644
> > --- a/arch/riscv/mm/dma-noncoherent.c
> > +++ b/arch/riscv/mm/dma-noncoherent.c
> > @@ -9,13 +9,26 @@
> > #include <linux/dma-map-ops.h>
> > #include <linux/mm.h>
> > #include <asm/cacheflush.h>
> > +#include <asm/dma-noncoherent.h>
> >
> > static bool noncoherent_supported;
> >
> > +struct riscv_cache_ops noncoherent_cache_ops __ro_after_init = {
> > + .wback = NULL,
> > + .inv = NULL,
> > + .wback_inv = NULL,
> > +};
> > +
> > static inline void arch_dma_cache_wback(phys_addr_t paddr, size_t size)
> > {
> > void *vaddr = phys_to_virt(paddr);
> >
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.wback)) {
>
> I'm worried about the performance impact here.
> For unified kernel Image reason, RISCV_NONSTANDARD_CACHE_OPS will be
> enabled by default, so standard CMO and T-HEAD's CMO platform's
> performance will be impacted, because even an unlikely is put
> here, the check action still needs to be done.
>
> > + noncoherent_cache_ops.wback(paddr, size);
> > + return;
> > + }
> > +#endif
> > ALT_CMO_OP(clean, vaddr, size, riscv_cbom_block_size);
> > }
> >
> > @@ -23,6 +36,13 @@ static inline void arch_dma_cache_inv(phys_addr_t paddr, size_t size)
> > {
> > void *vaddr = phys_to_virt(paddr);
> >
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.inv)) {
> > + noncoherent_cache_ops.inv(paddr, size);
> > + return;
> > + }
> > +#endif
> > +
> > ALT_CMO_OP(inval, vaddr, size, riscv_cbom_block_size);
> > }
> >
> > @@ -30,6 +50,13 @@ static inline void arch_dma_cache_wback_inv(phys_addr_t paddr, size_t size)
> > {
> > void *vaddr = phys_to_virt(paddr);
> >
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.wback_inv)) {
> > + noncoherent_cache_ops.wback_inv(paddr, size);
> > + return;
> > + }
> > +#endif
> > +
> > ALT_CMO_OP(flush, vaddr, size, riscv_cbom_block_size);
> > }
> >
> > @@ -50,6 +77,13 @@ void arch_dma_prep_coherent(struct page *page, size_t size)
> > {
> > void *flush_addr = page_address(page);
> >
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.wback_inv)) {
> > + noncoherent_cache_ops.wback_inv(page_to_phys(page), size);
> > + return;
> > + }
> > +#endif
> > +
> > ALT_CMO_OP(flush, flush_addr, size, riscv_cbom_block_size);
> > }
> >
> > @@ -75,3 +109,12 @@ void riscv_noncoherent_supported(void)
> > "Non-coherent DMA support enabled without a block size\n");
> > noncoherent_supported = true;
> > }
> > +
> > +void riscv_noncoherent_register_cache_ops(const struct riscv_cache_ops *ops)
> > +{
> > + if (!ops)
> > + return;
> > +
> > + noncoherent_cache_ops = *ops;
> > +}
> > +EXPORT_SYMBOL_GPL(riscv_noncoherent_register_cache_ops);
> > diff --git a/arch/riscv/mm/pmem.c b/arch/riscv/mm/pmem.c
> > index 089df92ae876..c5fc5ec96f6d 100644
> > --- a/arch/riscv/mm/pmem.c
> > +++ b/arch/riscv/mm/pmem.c
> > @@ -7,15 +7,28 @@
> > #include <linux/libnvdimm.h>
> >
> > #include <asm/cacheflush.h>
> > +#include <asm/dma-noncoherent.h>
> >
> > void arch_wb_cache_pmem(void *addr, size_t size)
> > {
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.wback)) {
> > + noncoherent_cache_ops.wback(virt_to_phys(addr), size);
> > + return;
> > + }
> > +#endif
> > ALT_CMO_OP(clean, addr, size, riscv_cbom_block_size);
> > }
> > EXPORT_SYMBOL_GPL(arch_wb_cache_pmem);
> >
> > void arch_invalidate_pmem(void *addr, size_t size)
> > {
> > +#ifdef CONFIG_RISCV_NONSTANDARD_CACHE_OPS
> > + if (unlikely(noncoherent_cache_ops.inv)) {
> > + noncoherent_cache_ops.inv(virt_to_phys(addr), size);
> > + return;
> > + }
> > +#endif
> > ALT_CMO_OP(inval, addr, size, riscv_cbom_block_size);
> > }
> > EXPORT_SYMBOL_GPL(arch_invalidate_pmem);
> > --
> > 2.34.1
> >
> >
> > _______________________________________________
> > linux-riscv mailing list
> > linux-riscv@xxxxxxxxxxxxxxxxxxx
> > http://lists.infradead.org/mailman/listinfo/linux-riscv