Re: [PATCH 3/3] riscv: optimized memset
From: Jisheng Zhang
Date: Tue Jan 30 2024 - 08:38:53 EST
On Tue, Jan 30, 2024 at 02:07:37PM +0200, Nick Kossifidis wrote:
> On 1/28/24 13:10, Jisheng Zhang wrote:
> > diff --git a/arch/riscv/lib/string.c b/arch/riscv/lib/string.c
> > index 20677c8067da..022edda68f1c 100644
> > --- a/arch/riscv/lib/string.c
> > +++ b/arch/riscv/lib/string.c
> > @@ -144,3 +144,44 @@ void *memmove(void *dest, const void *src, size_t count) __weak __alias(__memmov
> > EXPORT_SYMBOL(memmove);
> > void *__pi_memmove(void *dest, const void *src, size_t count) __alias(__memmove);
> > void *__pi___memmove(void *dest, const void *src, size_t count) __alias(__memmove);
> > +
> > +void *__memset(void *s, int c, size_t count)
> > +{
> > + union types dest = { .as_u8 = s };
> > +
> > + if (count >= MIN_THRESHOLD) {
> > + unsigned long cu = (unsigned long)c;
> > +
> > + /* Compose an ulong with 'c' repeated 4/8 times */
> > +#ifdef CONFIG_ARCH_HAS_FAST_MULTIPLIER
> > + cu *= 0x0101010101010101UL;
Here we need to check BITS_PER_LONG, use 0x01010101UL for rv32
> > +#else
> > + cu |= cu << 8;
> > + cu |= cu << 16;
> > + /* Suppress warning on 32 bit machines */
> > + cu |= (cu << 16) << 16;
> > +#endif
>
> I guess you could check against __SIZEOF_LONG__ here.
Hmm I believe we can remove the | and shift totally, and fall
back to ARCH_HAS_FAST_MULTIPLIER, see
https://lore.kernel.org/linux-riscv/20240125145703.913-1-jszhang@xxxxxxxxxx/
>
> > + if (!IS_ENABLED(CONFIG_HAVE_EFFICIENT_UNALIGNED_ACCESS)) {
> > + /*
> > + * Fill the buffer one byte at time until
> > + * the destination is word aligned.
> > + */
> > + for (; count && dest.as_uptr & WORD_MASK; count--)
> > + *dest.as_u8++ = c;
> > + }
> > +
> > + /* Copy using the largest size allowed */
> > + for (; count >= BYTES_LONG; count -= BYTES_LONG)
> > + *dest.as_ulong++ = cu;
> > + }
> > +
> > + /* copy the remainder */
> > + while (count--)
> > + *dest.as_u8++ = c;
> > +
> > + return s;
> > +}
> > +EXPORT_SYMBOL(__memset);
>
> BTW a similar approach could be used for memchr, e.g.:
>
> #if __SIZEOF_LONG__ == 8
> #define HAS_ZERO(_x) (((_x) - 0x0101010101010101ULL) & ~(_x) &
> 0x8080808080808080ULL)
> #else
> #define HAS_ZERO(_x) (((_x) - 0x01010101UL) & ~(_x) & 0x80808080UL)
> #endif
>
> void *
> memchr(const void *src_ptr, int c, size_t len)
> {
> union const_data src = { .as_bytes = src_ptr };
> unsigned char byte = (unsigned char) c;
> unsigned long mask = (unsigned long) c;
> size_t remaining = len;
>
> /* Nothing to do */
> if (!src_ptr || !len)
> return NULL;
>
> if (len < 2 * WORD_SIZE)
> goto trailing;
>
> mask |= mask << 8;
> mask |= mask << 16;
> #if __SIZEOF_LONG__ == 8
> mask |= mask << 32;
> #endif
>
> /* Search by byte up to the src's alignment boundary */
> for(; src.as_uptr & WORD_MASK; remaining--, src.as_bytes++) {
> if (*src.as_bytes == byte)
> return (void*) src.as_bytes;
> }
>
> /* Search word by word using the mask */
> for(; remaining >= WORD_SIZE; remaining -= WORD_SIZE, src.as_ulong++) {
> unsigned long check = *src.as_ulong ^ mask;
> if(HAS_ZERO(check))
> break;
> }
>
> trailing:
> for(; remaining > 0; remaining--, src.as_bytes++) {
> if (*src.as_bytes == byte)
> return (void*) src.as_bytes;
> }
>
> return NULL;
> }
>
> Regards,
> Nick