[PATCH v4] bitops: use common function parameter names
From: Randy Dunlap
Date: Sun May 03 2026 - 01:25:27 EST
Fix the function prototypes to use the common parameter name 'addr'
instead of 'p' (common to arch-specific implementations of these
functions).
This avoids the kernel-doc warnings:
Warning: include/asm-generic/bitops/lock.h:19 function parameter 'p'
not described in 'arch_test_and_set_bit_lock'
Warning: include/asm-generic/bitops/lock.h:41 function parameter 'p'
not described in 'arch_clear_bit_unlock'
Warning: include/asm-generic/bitops/lock.h:59 function parameter 'p'
not described in 'arch___clear_bit_unlock'
Fixes: 84c6591103db ("locking/atomics, asm-generic/bitops/lock.h: Rewrite using atomic_fetch_*()")
Signed-off-by: Randy Dunlap <rdunlap@xxxxxxxxxxxxx>
---
v2: rebase & resend
v3: change the function parameter names instead of the kernel-doc
comments (Yury) (Fixes: can be kept or dropped at maintainer discretion.)
v4: convert function usage of p to addr (thanks, Yury)
Cc: Yury Norov <yury.norov@xxxxxxxxx>
Cc: Rasmus Villemoes <linux@xxxxxxxxxxxxxxxxxx>
Cc: Arnd Bergmann <arnd@xxxxxxxx>
Cc: linux-arch@xxxxxxxxxxxxxxx
Note: Shouldn't this line in the MAINTAINERS file:
F: include/asm-generic/bitops
instead be
F: include/asm-generic/bitops/
include/asm-generic/bitops/lock.h | 22 +++++++++++-----------
1 file changed, 11 insertions(+), 11 deletions(-)
--- linux-next-20260429.orig/include/asm-generic/bitops/lock.h
+++ linux-next-20260429/include/asm-generic/bitops/lock.h
@@ -16,16 +16,16 @@
* It can be used to implement bit locks.
*/
static __always_inline int
-arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *p)
+arch_test_and_set_bit_lock(unsigned int nr, volatile unsigned long *addr)
{
long old;
unsigned long mask = BIT_MASK(nr);
- p += BIT_WORD(nr);
- if (READ_ONCE(*p) & mask)
+ addr += BIT_WORD(nr);
+ if (READ_ONCE(*addr) & mask)
return 1;
- old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)p);
+ old = raw_atomic_long_fetch_or_acquire(mask, (atomic_long_t *)addr);
return !!(old & mask);
}
@@ -38,10 +38,10 @@ arch_test_and_set_bit_lock(unsigned int
* This operation is atomic and provides release barrier semantics.
*/
static __always_inline void
-arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+arch_clear_bit_unlock(unsigned int nr, volatile unsigned long *addr)
{
- p += BIT_WORD(nr);
- raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)p);
+ addr += BIT_WORD(nr);
+ raw_atomic_long_fetch_andnot_release(BIT_MASK(nr), (atomic_long_t *)addr);
}
/**
@@ -56,14 +56,14 @@ arch_clear_bit_unlock(unsigned int nr, v
* See for example x86's implementation.
*/
static inline void
-arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *p)
+arch___clear_bit_unlock(unsigned int nr, volatile unsigned long *addr)
{
unsigned long old;
- p += BIT_WORD(nr);
- old = READ_ONCE(*p);
+ addr += BIT_WORD(nr);
+ old = READ_ONCE(*addr);
old &= ~BIT_MASK(nr);
- raw_atomic_long_set_release((atomic_long_t *)p, old);
+ raw_atomic_long_set_release((atomic_long_t *)addr, old);
}
#ifndef arch_xor_unlock_is_negative_byte