[tip: locking/core] locking/seqlock: Propagate 'const' pointers within read-only methods, remove forced type casts

From: tip-bot2 for Ingo Molnar
Date: Sat Oct 14 2023 - 06:35:08 EST


The following commit has been merged into the locking/core branch of tip:

Commit-ID: 886ee55eabac0d46faf8bc0b22207ca2740847ba
Gitweb: https://git.kernel.org/tip/886ee55eabac0d46faf8bc0b22207ca2740847ba
Author: Ingo Molnar <mingo@xxxxxxxxxx>
AuthorDate: Fri, 13 Oct 2023 10:15:46 +02:00
Committer: Ingo Molnar <mingo@xxxxxxxxxx>
CommitterDate: Sat, 14 Oct 2023 11:06:15 +02:00

locking/seqlock: Propagate 'const' pointers within read-only methods, remove forced type casts

Currently __seqprop_ptr() is an inline function that must chose to either
use 'const' or non-const seqcount related pointers - but this results in
the undesirable loss of 'const' propagation, via a forced type cast.

The easiest solution would be to turn the pointer wrappers into macros that
pass through whatever type is passed to them - but the clever maze of
seqlock API instantiation macros relies on the GCC CPP '##' macro
extension, which isn't recursive, so inline functions must be used here.

So create two wrapper variants instead: 'ptr' and 'const_ptr', and pick the
right one for the codepaths that are const: read_seqcount_begin() and
read_seqcount_retry().

This cleans up type handling and allows the removal of all type forcing.

No change in functionality.

Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Reviewed-by: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Paul E. McKenney <paulmck@xxxxxxxxxx>
---
include/linux/seqlock.h | 26 +++++++++++++++++++-------
1 file changed, 19 insertions(+), 7 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 4b8dcd3..80f21d2 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -200,9 +200,15 @@ typedef struct seqcount_##lockname { \
} seqcount_##lockname##_t; \
\
static __always_inline seqcount_t * \
-__seqprop_##lockname##_ptr(const seqcount_##lockname##_t *s) \
+__seqprop_##lockname##_ptr(seqcount_##lockname##_t *s) \
{ \
- return (void *)&s->seqcount; /* drop const */ \
+ return &s->seqcount; \
+} \
+ \
+static __always_inline const seqcount_t * \
+__seqprop_##lockname##_const_ptr(const seqcount_##lockname##_t *s) \
+{ \
+ return &s->seqcount; \
} \
\
static __always_inline unsigned \
@@ -247,9 +253,14 @@ __seqprop_##lockname##_assert(const seqcount_##lockname##_t *s) \
* __seqprop() for seqcount_t
*/

-static inline seqcount_t *__seqprop_ptr(const seqcount_t *s)
+static inline seqcount_t *__seqprop_ptr(seqcount_t *s)
+{
+ return s;
+}
+
+static inline const seqcount_t *__seqprop_const_ptr(const seqcount_t *s)
{
- return (void *)s; /* drop const */
+ return s;
}

static inline unsigned __seqprop_sequence(const seqcount_t *s)
@@ -302,6 +313,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
__seqprop_case((s), mutex, prop))

#define seqprop_ptr(s) __seqprop(s, ptr)(s)
+#define seqprop_const_ptr(s) __seqprop(s, const_ptr)(s)
#define seqprop_sequence(s) __seqprop(s, sequence)(s)
#define seqprop_preemptible(s) __seqprop(s, preemptible)(s)
#define seqprop_assert(s) __seqprop(s, assert)(s)
@@ -353,7 +365,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
*/
#define read_seqcount_begin(s) \
({ \
- seqcount_lockdep_reader_access(seqprop_ptr(s)); \
+ seqcount_lockdep_reader_access(seqprop_const_ptr(s)); \
raw_read_seqcount_begin(s); \
})

@@ -419,7 +431,7 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* Return: true if a read section retry is required, else false
*/
#define __read_seqcount_retry(s, start) \
- do___read_seqcount_retry(seqprop_ptr(s), start)
+ do___read_seqcount_retry(seqprop_const_ptr(s), start)

static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
{
@@ -439,7 +451,7 @@ static inline int do___read_seqcount_retry(const seqcount_t *s, unsigned start)
* Return: true if a read section retry is required, else false
*/
#define read_seqcount_retry(s, start) \
- do_read_seqcount_retry(seqprop_ptr(s), start)
+ do_read_seqcount_retry(seqprop_const_ptr(s), start)

static inline int do_read_seqcount_retry(const seqcount_t *s, unsigned start)
{