[PATCH RFC 12/24] locking/seqlock: Support Clang's capability analysis

From: Marco Elver
Date: Thu Feb 06 2025 - 13:25:30 EST


Add support for Clang's capability analysis for seqlock_t.

Signed-off-by: Marco Elver <elver@xxxxxxxxxx>
---
.../dev-tools/capability-analysis.rst | 2 +-
include/linux/seqlock.h | 24 +++++++++++
include/linux/seqlock_types.h | 5 ++-
lib/test_capability-analysis.c | 43 +++++++++++++++++++
4 files changed, 71 insertions(+), 3 deletions(-)

diff --git a/Documentation/dev-tools/capability-analysis.rst b/Documentation/dev-tools/capability-analysis.rst
index 31f76e877be5..8d9336e91ce2 100644
--- a/Documentation/dev-tools/capability-analysis.rst
+++ b/Documentation/dev-tools/capability-analysis.rst
@@ -85,7 +85,7 @@ Supported Kernel Primitives
~~~~~~~~~~~~~~~~~~~~~~~~~~~

Currently the following synchronization primitives are supported:
-`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`.
+`raw_spinlock_t`, `spinlock_t`, `rwlock_t`, `mutex`, `seqlock_t`.

For capabilities with an initialization function (e.g., `spin_lock_init()`),
calling this function on the capability instance before initializing any
diff --git a/include/linux/seqlock.h b/include/linux/seqlock.h
index 5ce48eab7a2a..c914eb9714e9 100644
--- a/include/linux/seqlock.h
+++ b/include/linux/seqlock.h
@@ -816,6 +816,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
do { \
spin_lock_init(&(sl)->lock); \
seqcount_spinlock_init(&(sl)->seqcount, &(sl)->lock); \
+ __assert_cap(sl); \
} while (0)

/**
@@ -832,6 +833,7 @@ static __always_inline void write_seqcount_latch_end(seqcount_latch_t *s)
* Return: count, to be passed to read_seqretry()
*/
static inline unsigned read_seqbegin(const seqlock_t *sl)
+ __acquires_shared(sl) __no_capability_analysis
{
return read_seqcount_begin(&sl->seqcount);
}
@@ -848,6 +850,7 @@ static inline unsigned read_seqbegin(const seqlock_t *sl)
* Return: true if a read section retry is required, else false
*/
static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
+ __releases_shared(sl) __no_capability_analysis
{
return read_seqcount_retry(&sl->seqcount, start);
}
@@ -872,6 +875,7 @@ static inline unsigned read_seqretry(const seqlock_t *sl, unsigned start)
* _irqsave or _bh variants of this function instead.
*/
static inline void write_seqlock(seqlock_t *sl)
+ __acquires(sl) __no_capability_analysis
{
spin_lock(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -885,6 +889,7 @@ static inline void write_seqlock(seqlock_t *sl)
* critical section of given seqlock_t.
*/
static inline void write_sequnlock(seqlock_t *sl)
+ __releases(sl) __no_capability_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock(&sl->lock);
@@ -898,6 +903,7 @@ static inline void write_sequnlock(seqlock_t *sl)
* other write side sections, can be invoked from softirq contexts.
*/
static inline void write_seqlock_bh(seqlock_t *sl)
+ __acquires(sl) __no_capability_analysis
{
spin_lock_bh(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -912,6 +918,7 @@ static inline void write_seqlock_bh(seqlock_t *sl)
* write_seqlock_bh().
*/
static inline void write_sequnlock_bh(seqlock_t *sl)
+ __releases(sl) __no_capability_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_bh(&sl->lock);
@@ -925,6 +932,7 @@ static inline void write_sequnlock_bh(seqlock_t *sl)
* other write sections, can be invoked from hardirq contexts.
*/
static inline void write_seqlock_irq(seqlock_t *sl)
+ __acquires(sl) __no_capability_analysis
{
spin_lock_irq(&sl->lock);
do_write_seqcount_begin(&sl->seqcount.seqcount);
@@ -938,12 +946,14 @@ static inline void write_seqlock_irq(seqlock_t *sl)
* seqlock_t write side section opened with write_seqlock_irq().
*/
static inline void write_sequnlock_irq(seqlock_t *sl)
+ __releases(sl) __no_capability_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irq(&sl->lock);
}

static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
+ __acquires(sl) __no_capability_analysis
{
unsigned long flags;

@@ -976,6 +986,7 @@ static inline unsigned long __write_seqlock_irqsave(seqlock_t *sl)
*/
static inline void
write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases(sl) __no_capability_analysis
{
do_write_seqcount_end(&sl->seqcount.seqcount);
spin_unlock_irqrestore(&sl->lock, flags);
@@ -998,6 +1009,7 @@ write_sequnlock_irqrestore(seqlock_t *sl, unsigned long flags)
* The opened read section must be closed with read_sequnlock_excl().
*/
static inline void read_seqlock_excl(seqlock_t *sl)
+ __acquires_shared(sl) __no_capability_analysis
{
spin_lock(&sl->lock);
}
@@ -1007,6 +1019,7 @@ static inline void read_seqlock_excl(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl(seqlock_t *sl)
+ __releases_shared(sl) __no_capability_analysis
{
spin_unlock(&sl->lock);
}
@@ -1021,6 +1034,7 @@ static inline void read_sequnlock_excl(seqlock_t *sl)
* from softirq contexts.
*/
static inline void read_seqlock_excl_bh(seqlock_t *sl)
+ __acquires_shared(sl) __no_capability_analysis
{
spin_lock_bh(&sl->lock);
}
@@ -1031,6 +1045,7 @@ static inline void read_seqlock_excl_bh(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_bh(seqlock_t *sl)
+ __releases_shared(sl) __no_capability_analysis
{
spin_unlock_bh(&sl->lock);
}
@@ -1045,6 +1060,7 @@ static inline void read_sequnlock_excl_bh(seqlock_t *sl)
* hardirq context.
*/
static inline void read_seqlock_excl_irq(seqlock_t *sl)
+ __acquires_shared(sl) __no_capability_analysis
{
spin_lock_irq(&sl->lock);
}
@@ -1055,11 +1071,13 @@ static inline void read_seqlock_excl_irq(seqlock_t *sl)
* @sl: Pointer to seqlock_t
*/
static inline void read_sequnlock_excl_irq(seqlock_t *sl)
+ __releases_shared(sl) __no_capability_analysis
{
spin_unlock_irq(&sl->lock);
}

static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
+ __acquires_shared(sl) __no_capability_analysis
{
unsigned long flags;

@@ -1089,6 +1107,7 @@ static inline unsigned long __read_seqlock_excl_irqsave(seqlock_t *sl)
*/
static inline void
read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
+ __releases_shared(sl) __no_capability_analysis
{
spin_unlock_irqrestore(&sl->lock, flags);
}
@@ -1125,6 +1144,7 @@ read_sequnlock_excl_irqrestore(seqlock_t *sl, unsigned long flags)
* parameter of the next read_seqbegin_or_lock() iteration.
*/
static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_capability_analysis
{
if (!(*seq & 1)) /* Even */
*seq = read_seqbegin(lock);
@@ -1140,6 +1160,7 @@ static inline void read_seqbegin_or_lock(seqlock_t *lock, int *seq)
* Return: true if a read section retry is required, false otherwise
*/
static inline int need_seqretry(seqlock_t *lock, int seq)
+ __releases_shared(lock) __no_capability_analysis
{
return !(seq & 1) && read_seqretry(lock, seq);
}
@@ -1153,6 +1174,7 @@ static inline int need_seqretry(seqlock_t *lock, int seq)
* with read_seqbegin_or_lock() and validated by need_seqretry().
*/
static inline void done_seqretry(seqlock_t *lock, int seq)
+ __no_capability_analysis
{
if (seq & 1)
read_sequnlock_excl(lock);
@@ -1180,6 +1202,7 @@ static inline void done_seqretry(seqlock_t *lock, int seq)
*/
static inline unsigned long
read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
+ __acquires_shared(lock) __no_capability_analysis
{
unsigned long flags = 0;

@@ -1205,6 +1228,7 @@ read_seqbegin_or_lock_irqsave(seqlock_t *lock, int *seq)
*/
static inline void
done_seqretry_irqrestore(seqlock_t *lock, int seq, unsigned long flags)
+ __no_capability_analysis
{
if (seq & 1)
read_sequnlock_excl_irqrestore(lock, flags);
diff --git a/include/linux/seqlock_types.h b/include/linux/seqlock_types.h
index dfdf43e3fa3d..9775d6f1a234 100644
--- a/include/linux/seqlock_types.h
+++ b/include/linux/seqlock_types.h
@@ -81,13 +81,14 @@ SEQCOUNT_LOCKNAME(mutex, struct mutex, true, mutex)
* - Comments on top of seqcount_t
* - Documentation/locking/seqlock.rst
*/
-typedef struct {
+struct_with_capability(seqlock) {
/*
* Make sure that readers don't starve writers on PREEMPT_RT: use
* seqcount_spinlock_t instead of seqcount_t. Check __SEQ_LOCK().
*/
seqcount_spinlock_t seqcount;
spinlock_t lock;
-} seqlock_t;
+};
+typedef struct seqlock seqlock_t;

#endif /* __LINUX_SEQLOCK_TYPES_H */
diff --git a/lib/test_capability-analysis.c b/lib/test_capability-analysis.c
index 3410c04c2b76..1e4b90f76420 100644
--- a/lib/test_capability-analysis.c
+++ b/lib/test_capability-analysis.c
@@ -6,6 +6,7 @@

#include <linux/build_bug.h>
#include <linux/mutex.h>
+#include <linux/seqlock.h>
#include <linux/spinlock.h>

/*
@@ -208,3 +209,45 @@ static void __used test_mutex_cond_guard(struct test_mutex_data *d)
d->counter++;
}
}
+
+struct test_seqlock_data {
+ seqlock_t sl;
+ int counter __var_guarded_by(&sl);
+};
+
+static void __used test_seqlock_init(struct test_seqlock_data *d)
+{
+ seqlock_init(&d->sl);
+ d->counter = 0;
+}
+
+static void __used test_seqlock_reader(struct test_seqlock_data *d)
+{
+ unsigned int seq;
+
+ do {
+ seq = read_seqbegin(&d->sl);
+ (void)d->counter;
+ } while (read_seqretry(&d->sl, seq));
+}
+
+static void __used test_seqlock_writer(struct test_seqlock_data *d)
+{
+ unsigned long flags;
+
+ write_seqlock(&d->sl);
+ d->counter++;
+ write_sequnlock(&d->sl);
+
+ write_seqlock_irq(&d->sl);
+ d->counter++;
+ write_sequnlock_irq(&d->sl);
+
+ write_seqlock_bh(&d->sl);
+ d->counter++;
+ write_sequnlock_bh(&d->sl);
+
+ write_seqlock_irqsave(&d->sl, flags);
+ d->counter++;
+ write_sequnlock_irqrestore(&d->sl, flags);
+}
--
2.48.1.502.g6dc24dfdaf-goog