[PATCH] x86/crc32: optimize tail handling for crc32c short inputs
From: Eric Biggers
Date: Tue Mar 04 2025 - 16:33:22 EST
From: Eric Biggers <ebiggers@xxxxxxxxxx>
For handling the 0 <= len < sizeof(unsigned long) bytes left at the end,
do a 4-2-1 step-down instead of a byte-at-a-time loop. This allows
taking advantage of wider CRC instructions. Note that crc32c-3way.S
already uses this same optimization too.
crc_kunit shows an improvement of about 25% for len=127.
Suggested-by: H. Peter Anvin <hpa@xxxxxxxxx>
Signed-off-by: Eric Biggers <ebiggers@xxxxxxxxxx>
---
This applies to
https://web.git.kernel.org/pub/scm/linux/kernel/git/ebiggers/linux.git/log/?h=crc-next
arch/x86/lib/crc32-glue.c | 10 +++++++++-
1 file changed, 9 insertions(+), 1 deletion(-)
diff --git a/arch/x86/lib/crc32-glue.c b/arch/x86/lib/crc32-glue.c
index 4b4721176799a..e3f93b17ac3f1 100644
--- a/arch/x86/lib/crc32-glue.c
+++ b/arch/x86/lib/crc32-glue.c
@@ -55,11 +55,19 @@ u32 crc32c_arch(u32 crc, const u8 *p, size_t len)
for (num_longs = len / sizeof(unsigned long);
num_longs != 0; num_longs--, p += sizeof(unsigned long))
asm(CRC32_INST : "+r" (crc) : ASM_INPUT_RM (*(unsigned long *)p));
- for (len %= sizeof(unsigned long); len; len--, p++)
+ if (sizeof(unsigned long) > 4 && (len & 4)) {
+ asm("crc32l %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u32 *)p));
+ p += 4;
+ }
+ if (len & 2) {
+ asm("crc32w %1, %0" : "+r" (crc) : ASM_INPUT_RM (*(u16 *)p));
+ p += 2;
+ }
+ if (len & 1)
asm("crc32b %1, %0" : "+r" (crc) : ASM_INPUT_RM (*p));
return crc;
}
EXPORT_SYMBOL(crc32c_arch);
base-commit: 13f3d13d88b5dcba104a204fcbee61c75f8407d0
--
2.48.1