[PATCH] Bitmap: Optimized division operation to shift operation

From: Wang Qing
Date: Wed Apr 15 2020 - 03:33:06 EST


On some processors, the / operate will call the compiler`s div lib,
which is low efficient. Bitmap is performance sensitive, We can
replace the / operation with shift.

Signed-off-by: Wang Qing <wangqing@xxxxxxxx>
---
include/linux/bitmap.h | 6 +++---
1 file changed, 3 insertions(+), 3 deletions(-)

diff --git a/include/linux/bitmap.h b/include/linux/bitmap.h
index 99058eb..85ff982 100644
--- a/include/linux/bitmap.h
+++ b/include/linux/bitmap.h
@@ -337,7 +337,7 @@ static inline int bitmap_equal(const unsigned long *src1,
return !((*src1 ^ *src2) & BITMAP_LAST_WORD_MASK(nbits));
if (__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
- return !memcmp(src1, src2, nbits / 8);
+ return !memcmp(src1, src2, nbits >> 3);
return __bitmap_equal(src1, src2, nbits);
}

@@ -411,7 +411,7 @@ static __always_inline void bitmap_set(unsigned long *map, unsigned int start,
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
- memset((char *)map + start / 8, 0xff, nbits / 8);
+ memset((char *)map + (start >> 3), 0xff, nbits >> 3);
else
__bitmap_set(map, start, nbits);
}
@@ -425,7 +425,7 @@ static __always_inline void bitmap_clear(unsigned long *map, unsigned int start,
IS_ALIGNED(start, BITMAP_MEM_ALIGNMENT) &&
__builtin_constant_p(nbits & BITMAP_MEM_MASK) &&
IS_ALIGNED(nbits, BITMAP_MEM_ALIGNMENT))
- memset((char *)map + start / 8, 0, nbits / 8);
+ memset((char *)map + (start >> 3), 0, nbits >> 3);
else
__bitmap_clear(map, start, nbits);
}
--
2.7.4