[PATCH v1 3/5] seqlock: seqcount_t: Implement all read APIs as statement expressions

From: Ahmed S. Darwish
Date: Thu Aug 27 2020 - 21:07:09 EST


The sequence counters read APIs are implemented as CPP macros, so they
can take either seqcount_t or any of the seqcount_LOCKTYPE_t variants.
Such macros then get *directly* transformed to internal C functions that
only take plain seqcount_t.

Further commits need access to seqcount_LOCKTYPE_t inside of the actual
read APIs code. Thus transform all of the seqcount read APIs to pure GCC
statement expressions instead.

This will not break type-safety: all of the transformed APIs resolve to
a _Generic() selection that does not have a "default" case.

This will also not affect the transformed APIs readability: previously
added kernel-doc above all of seqlock.h functions makes the expectations
quite clear for call-site developers.

Signed-off-by: Ahmed S. Darwish <a.darwish@xxxxxxxxxxxxx>
---
include/linux/seqlock.h | 98 ++++++++++++++++++++---------------------
1 file changed, 49 insertions(+), 49 deletions(-)

diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5470e9cd52ce..d114a9f4e9d9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -182,6 +182,12 @@ __seqcount_##locktype##_ptr(seqcount_##locktype##_t *s) \
return &s->seqcount; \
} \
\
+static __always_inline unsigned \
+__seqcount_##locktype##_sequence(const seqcount_##locktype##_t *s) \
+{ \
+ return READ_ONCE(s->seqcount.sequence); \
+} \
+ \
static __always_inline bool \
__seqcount_##locktype##_preemptible(const seqcount_##locktype##_t *s) \
{ \
@@ -203,6 +209,11 @@ static inline seqcount_t *__seqcount_t_ptr(seqcount_t *s)
return s;
}

+static inline unsigned __seqcount_t_sequence(const seqcount_t *s)
+{
+ return READ_ONCE(s->sequence);
+}
+
static inline bool __seqcount_t_preemptible(const seqcount_t *s)
{
return false;
@@ -248,6 +259,7 @@ SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex, true, &s->lock->base)
__seqprop_case((s), ww_mutex, prop))

#define __seqcount_ptr(s) __seqprop(s, ptr)
+#define __seqcount_sequence(s) __seqprop(s, sequence)
#define __seqcount_lock_preemptible(s) __seqprop(s, preemptible)
#define __seqcount_assert_lock_held(s) __seqprop(s, assert)

@@ -266,21 +278,19 @@ SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex, true, &s->lock->base)
* Return: count to be passed to read_seqcount_retry()
*/
#define __read_seqcount_begin(s) \
- __read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret;
-
-repeat:
- ret = READ_ONCE(s->sequence);
- if (unlikely(ret & 1)) {
- cpu_relax();
- goto repeat;
- }
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq; \
+ \
+ do { \
+ seq = __seqcount_sequence(s); \
+ if (likely(! (seq & 1))) \
+ break; \
+ cpu_relax(); \
+ } while (true); \
+ \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_read_seqcount_begin() - begin a seqcount_t read section w/o lockdep
@@ -289,14 +299,12 @@ static inline unsigned __read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount_begin(s) \
- raw_read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
-{
- unsigned ret = __read_seqcount_t_begin(s);
- smp_rmb();
- return ret;
-}
+({ \
+ unsigned seq = __read_seqcount_begin(s); \
+ \
+ smp_rmb(); \
+ seq; \
+})

/**
* read_seqcount_begin() - begin a seqcount_t read critical section
@@ -305,13 +313,10 @@ static inline unsigned raw_read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define read_seqcount_begin(s) \
- read_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
-{
- seqcount_lockdep_reader_access(s);
- return raw_read_seqcount_t_begin(s);
-}
+({ \
+ seqcount_lockdep_reader_access(__seqcount_ptr(s)); \
+ raw_read_seqcount_begin(s); \
+})

/**
* raw_read_seqcount() - read the raw seqcount_t counter value
@@ -325,15 +330,13 @@ static inline unsigned read_seqcount_t_begin(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_read_seqcount(s) \
- raw_read_seqcount_t(__seqcount_ptr(s))
-
-static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
-{
- unsigned ret = READ_ONCE(s->sequence);
- smp_rmb();
- kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX);
- return ret;
-}
+({ \
+ unsigned seq = __seqcount_sequence(s); \
+ \
+ smp_rmb(); \
+ kcsan_atomic_next(KCSAN_SEQLOCK_REGION_MAX); \
+ seq; \
+})

/**
* raw_seqcount_begin() - begin a seqcount_t read critical section w/o
@@ -353,16 +356,13 @@ static inline unsigned raw_read_seqcount_t(const seqcount_t *s)
* Return: count to be passed to read_seqcount_retry()
*/
#define raw_seqcount_begin(s) \
- raw_seqcount_t_begin(__seqcount_ptr(s))
-
-static inline unsigned raw_seqcount_t_begin(const seqcount_t *s)
-{
- /*
- * If the counter is odd, let read_seqcount_retry() fail
- * by decrementing the counter.
- */
- return raw_read_seqcount_t(s) & ~1;
-}
+({ \
+ /* \
+ * If the counter is odd, let read_seqcount_retry() fail \
+ * by decrementing the counter. \
+ */ \
+ raw_read_seqcount(s) & ~1; \
+})

/**
* __read_seqcount_retry() - end a seqcount_t read section w/o barrier
--
2.28.0