Re: [PATCH v3 06/20] seqlock: Extend seqcount API with associated locks

From: Peter Zijlstra
Date: Wed Jul 08 2020 - 08:30:20 EST


On Wed, Jul 08, 2020 at 12:33:14PM +0200, Ahmed S. Darwish wrote:

> > +#define read_seqcount_begin(s) do_read_seqcount_begin(__to_seqcount_t(s))
> > +
> > +static inline unsigned do_read_seqcount_begin(const seqcount_t *s)
> > +{
> ...
>
> Hmm, the __to_seqcount_t(s) force cast is not good. It will break the
> arguments type-safety of seqcount macros that do not have either:
>
> __associated_lock_is_preemptible() or
> __assert_associated_lock_held()
>
> in their path. This basically includes all the read path macros, and
> even some others (e.g. write_seqcount_invalidate()).
>
> With the suggested force cast above, I can literally *pass anything* to
> read_seqcount_begin() and friends, and the compiler won't say a thing.
>
> So, I'll restore __to_seqcount_t(s) that to its original implementation:

Right, I figured that the write side would be enough to catch glaring
abuse. But sure.

It's a bummer we didn't upgrade the minimum compiler version to 4.9,
that would've given us _Generic(), which allows one to write this
slightly less verbose I think.

How about something disguisting like this then?


#ifdef CONFIG_LOCKDEP
#define __SEQCOUNT_LOCKDEP(expr) expr
#else
#define __SEQCOUNT_LOCKDEP(expr)
#endif

typedef seqcount_t * __seqprop_ptr_t;
typedef bool __seqprop_preempt_t;
typedef void __seqprop_assert_t;

static __always_inline __seqprop_ptr_t
__seqprop_seqcount_ptr(seqcount_t *s)
{
return s;
}

static __always_inline __seqprop_preempt_t
__seqprop_seqcount_preempt(seqcount_t *s)
{
return false;
}

static __always_inline __seqprop_assert_t
__seqprop_seqcount_assert(seqcount_t *s)
{
}

#define SEQCOUNT_LOCKTYPE(name, locktype, preempt, lockmember) \
typedef struct seqcount_##name { \
seqcount_t seqcount; \
__SEQCOUNT_LOCKDEP(locktype *lock); \
} seqcount_##name##_t; \
\
static __always_inline void \
seqcount_##name##_init(seqcount_##name##_t *s, locktype *l) \
{ \
seqcount_init(&s->seqcount); \
__SEQCOUNT_LOCKDEP(s->lock = l); \
} \
\
static __always_inline __seqprop_ptr_t \
__seqprop_##name##_ptr(seqcount_##name##_t *s) \
{ \
return &s->seqcount; \
} \
\
static __always_inline __seqprop_preempt_t \
__seqprop_##name##_preempt(seqcount_##name##_t *s) \
{ \
return preempt; \
} \
\
static __always_inline __seqprop_assert_t \
__seqprop_##name##_assert(seqcount_##name##_t *s) \
{ \
__SEQCOUNT_LOCKDEP(lockdep_assert_held(s->lockmember)); \
}


#define SEQCNT_LOCKTYPE_ZERO(_name, _lock) { \
.seqcount = SEQCNT_ZERO(_name.seqcount), \
__SEQCOUNT_LOCKDEP(.lock = (_lock)) \
}

#include <linux/spinlock.h>
#include <linux/ww_mutex.h>

#define __SEQ_RT IS_BUILTIN(CONFIG_PREEMPT_RT)

SEQCOUNT_LOCKTYPE(raw_spinlock, raw_spinlock_t, false, lock)
SEQCOUNT_LOCKTYPE(spinlock, spinlock_t, __SEQ_RT, lock)
SEQCOUNT_LOCKTYPE(rwlock, rwlock_t, __SEQ_RT, lock)
SEQCOUNT_LOCKTYPE(mutex, struct mutex, true, lock)
SEQCOUNT_LOCKTYPE(ww_mutex, struct ww_mutex,true, lock->base)

#if (defined(CONFIG_CC_IS_GCC) && CONFIG_GCC_VERSION < 40900) || defined(__CHECKER__)

#define __seqprop_pick(const_expr, s, locktype, prop, otherwise) \
__builtin_choose_expr(const_expr, \
__seqprop_##locktype##_##prop((void *)(s)), \
otherwise)

extern void __seqprop_invalid(void);

#define __seqprop(s, prop) \
__seqprop_pick(__same_type(*(s), seqcount_t), (s), seqcount, prop, \
__seqprop_pick(__same_type(*(s), seqcount_raw_spinlock_t), (s), raw_spinlock, prop, \
__seqprop_pick(__same_type(*(s), seqcount_spinlock_t), (s), spinlock, prop, \
__seqprop_pick(__same_type(*(s), seqcount_rwlock_t), (s), rwlock, prop, \
__seqprop_pick(__same_type(*(s), seqcount_mutex_t), (s), mutex, prop, \
__seqprop_pick(__same_type(*(s), seqcount_ww_mutex_t), (s), ww_mutex, prop, \
__seqprop_invalid()))))))

#else

#define __seqprop_case(s, locktype, prop) \
seqcount_##locktype##_t: __seqprop_##locktype##_##prop((void *)s)

#define __seqprop(s, prop) \
_Generic(*(s), \
seqcount_t: __seqprop_seqcount_##prop((void*)s),\
__seqprop_case((s), raw_spinlock, prop), \
__seqprop_case((s), spinlock, prop), \
__seqprop_case((s), rwlock, prop), \
__seqprop_case((s), mutex, prop), \
__seqprop_case((s), ww_mutex, prop))

#endif

#define __to_seqcount_t(s) __seqprop(s, ptr)
#define __associated_lock_is_preemptible(s) __seqprop(s, preempt)
#define __assert_associated_lock_held(s) __seqprop(s, assert)