[PATCH v1 02/48] tools headers: Silence -Wshorten-64-to-32 warnings
From: Ian Rogers
Date: Tue Apr 01 2025 - 14:25:09 EST
The clang warning -Wshorten-64-to-32 can be useful to catch
inadvertent truncation. In some instances this truncation can lead to
changing the sign of a result, for example, truncation to return an
int to fit a sort routine. Silence the warning by making the implicit
truncation explicit.
Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
tools/include/asm-generic/bitops/fls64.h | 2 +-
tools/include/linux/bitfield.h | 2 +-
tools/include/linux/bitmap.h | 2 +-
tools/include/linux/err.h | 2 +-
tools/include/linux/hash.h | 2 +-
tools/include/linux/math64.h | 2 +-
6 files changed, 6 insertions(+), 6 deletions(-)
diff --git a/tools/include/asm-generic/bitops/fls64.h b/tools/include/asm-generic/bitops/fls64.h
index 866f2b2304ff..9ad3ff12f454 100644
--- a/tools/include/asm-generic/bitops/fls64.h
+++ b/tools/include/asm-generic/bitops/fls64.h
@@ -21,7 +21,7 @@ static __always_inline int fls64(__u64 x)
__u32 h = x >> 32;
if (h)
return fls(h) + 32;
- return fls(x);
+ return fls((__u32)x);
}
#elif BITS_PER_LONG == 64
static __always_inline int fls64(__u64 x)
diff --git a/tools/include/linux/bitfield.h b/tools/include/linux/bitfield.h
index 6093fa6db260..aa0b8e52214f 100644
--- a/tools/include/linux/bitfield.h
+++ b/tools/include/linux/bitfield.h
@@ -146,7 +146,7 @@ static __always_inline __##type type##_encode_bits(base v, base field) \
{ \
if (__builtin_constant_p(v) && (v & ~field_mask(field))) \
__field_overflow(); \
- return to((v & field_mask(field)) * field_multiplier(field)); \
+ return to((__##type)((v & field_mask(field)) * field_multiplier(field))); \
} \
static __always_inline __##type type##_replace_bits(__##type old, \
base val, base field) \
diff --git a/tools/include/linux/bitmap.h b/tools/include/linux/bitmap.h
index 2a7f260ef9dc..b7a7c752e4f2 100644
--- a/tools/include/linux/bitmap.h
+++ b/tools/include/linux/bitmap.h
@@ -63,7 +63,7 @@ static inline bool bitmap_full(const unsigned long *src, unsigned int nbits)
return find_first_zero_bit(src, nbits) == nbits;
}
-static inline unsigned int bitmap_weight(const unsigned long *src, unsigned int nbits)
+static inline unsigned long bitmap_weight(const unsigned long *src, unsigned int nbits)
{
if (small_const_nbits(nbits))
return hweight_long(*src & BITMAP_LAST_WORD_MASK(nbits));
diff --git a/tools/include/linux/err.h b/tools/include/linux/err.h
index 332b983ead1e..9c1746e3696d 100644
--- a/tools/include/linux/err.h
+++ b/tools/include/linux/err.h
@@ -55,7 +55,7 @@ static inline bool __must_check IS_ERR_OR_NULL(__force const void *ptr)
static inline int __must_check PTR_ERR_OR_ZERO(__force const void *ptr)
{
if (IS_ERR(ptr))
- return PTR_ERR(ptr);
+ return (int)PTR_ERR(ptr);
else
return 0;
}
diff --git a/tools/include/linux/hash.h b/tools/include/linux/hash.h
index 38edaa08f862..ecc8296cb397 100644
--- a/tools/include/linux/hash.h
+++ b/tools/include/linux/hash.h
@@ -75,7 +75,7 @@ static __always_inline u32 hash_64_generic(u64 val, unsigned int bits)
{
#if BITS_PER_LONG == 64
/* 64x64-bit multiply is efficient on all 64-bit processors */
- return val * GOLDEN_RATIO_64 >> (64 - bits);
+ return (u32)(val * GOLDEN_RATIO_64 >> (64 - bits));
#else
/* Hash 64 bits using only 32x32-bit multiply. */
return hash_32((u32)val ^ __hash_32(val >> 32), bits);
diff --git a/tools/include/linux/math64.h b/tools/include/linux/math64.h
index 4ad45d5943dc..03d6c5220957 100644
--- a/tools/include/linux/math64.h
+++ b/tools/include/linux/math64.h
@@ -48,7 +48,7 @@ static inline u64 mul_u64_u32_shr(u64 a, u32 b, unsigned int shift)
u32 ah, al;
u64 ret;
- al = a;
+ al = (u32)a;
ah = a >> 32;
ret = mul_u32_u32(al, b) >> shift;
--
2.49.0.504.g3bcea36a83-goog