Re: [PATCH v1] x86/lib: Optimize 8x loop and memory clobbers in csum_partial.c
From: Eric Dumazet
Date: Fri Nov 26 2021 - 13:43:31 EST
On Fri, Nov 26, 2021 at 8:08 AM Eric Dumazet <edumazet@xxxxxxxxxx> wrote:
> diff --git a/arch/x86/include/asm/checksum_64.h
> b/arch/x86/include/asm/checksum_64.h
> index 407beebadaf45a748f91a36b78bd1d023449b132..af93fb53b480ab7102db71c32ab6ca9604c6b5fb
> 100644
> --- a/arch/x86/include/asm/checksum_64.h
> +++ b/arch/x86/include/asm/checksum_64.h
> @@ -182,4 +182,26 @@ static inline __wsum csum_add(__wsum csum, __wsum addend)
> (__force unsigned)addend);
> }
>
> +static inline __wsum ipv6_csum_partial(const void *buff, int len, __wsum sum)
> +{
> + u64 temp64;
> +
> + if (unlikely(len == 40))
this of course needs to be the opposite condition
if (unlikely(len != sizeof(struct ipv6hdr))
> + return csum_partial(buff, len, sum);
> +
> + temp64 = (__force u64)sum;
> + asm("addq 0*8(%[src]),%[res]\n\t"
> + "adcq 1*8(%[src]),%[res]\n\t"
> + "adcq 2*8(%[src]),%[res]\n\t"
> + "adcq 3*8(%[src]),%[res]\n\t"
> + "adcq 4*8(%[src]),%[res]\n\t"
> + "adcq 5*8(%[src]),%[res]\n\t"
> + "adcq $0,%[res]"
> + : [res] "+r" (temp64)
> + : [src] "r" (buff)
> + : "memory");
> + return (__force __wsum)add32_with_carry(temp64 >> 32, temp64);
> +}