Re: [PATCH v2 2/9] rcu: Add support for consolidated-RCU reader checking
From: Paul E. McKenney
Date: Tue Jul 16 2019 - 14:30:34 EST
On Fri, Jul 12, 2019 at 01:00:17PM -0400, Joel Fernandes (Google) wrote:
> This patch adds support for checking RCU reader sections in list
> traversal macros. Optionally, if the list macro is called under SRCU or
> other lock/mutex protection, then appropriate lockdep expressions can be
> passed to make the checks pass.
>
> Existing list_for_each_entry_rcu() invocations don't need to pass the
> optional fourth argument (cond) unless they are under some non-RCU
> protection and needs to make lockdep check pass.
>
> Signed-off-by: Joel Fernandes (Google) <joel@xxxxxxxxxxxxxxxxx>
If you fold in the checks for extra parameters, I will take this
one and also 1/9.
Thanx, Paul
> ---
> include/linux/rculist.h | 28 +++++++++++++++++++++++-----
> include/linux/rcupdate.h | 7 +++++++
> kernel/rcu/Kconfig.debug | 11 +++++++++++
> kernel/rcu/update.c | 14 ++++++++++++++
> 4 files changed, 55 insertions(+), 5 deletions(-)
>
> diff --git a/include/linux/rculist.h b/include/linux/rculist.h
> index e91ec9ddcd30..1048160625bb 100644
> --- a/include/linux/rculist.h
> +++ b/include/linux/rculist.h
> @@ -40,6 +40,20 @@ static inline void INIT_LIST_HEAD_RCU(struct list_head *list)
> */
> #define list_next_rcu(list) (*((struct list_head __rcu **)(&(list)->next)))
>
> +/*
> + * Check during list traversal that we are within an RCU reader
> + */
> +
> +#ifdef CONFIG_PROVE_RCU_LIST
> +#define __list_check_rcu(dummy, cond, ...) \
> + ({ \
> + RCU_LOCKDEP_WARN(!cond && !rcu_read_lock_any_held(), \
> + "RCU-list traversed in non-reader section!"); \
> + })
> +#else
> +#define __list_check_rcu(dummy, cond, ...) ({})
> +#endif
> +
> /*
> * Insert a new entry between two known consecutive entries.
> *
> @@ -343,14 +357,16 @@ static inline void list_splice_tail_init_rcu(struct list_head *list,
> * @pos: the type * to use as a loop cursor.
> * @head: the head for your list.
> * @member: the name of the list_head within the struct.
> + * @cond: optional lockdep expression if called from non-RCU protection.
> *
> * This list-traversal primitive may safely run concurrently with
> * the _rcu list-mutation primitives such as list_add_rcu()
> * as long as the traversal is guarded by rcu_read_lock().
> */
> -#define list_for_each_entry_rcu(pos, head, member) \
> - for (pos = list_entry_rcu((head)->next, typeof(*pos), member); \
> - &pos->member != (head); \
> +#define list_for_each_entry_rcu(pos, head, member, cond...) \
> + for (__list_check_rcu(dummy, ## cond, 0), \
> + pos = list_entry_rcu((head)->next, typeof(*pos), member); \
> + &pos->member != (head); \
> pos = list_entry_rcu(pos->member.next, typeof(*pos), member))
>
> /**
> @@ -616,13 +632,15 @@ static inline void hlist_add_behind_rcu(struct hlist_node *n,
> * @pos: the type * to use as a loop cursor.
> * @head: the head for your list.
> * @member: the name of the hlist_node within the struct.
> + * @cond: optional lockdep expression if called from non-RCU protection.
> *
> * This list-traversal primitive may safely run concurrently with
> * the _rcu list-mutation primitives such as hlist_add_head_rcu()
> * as long as the traversal is guarded by rcu_read_lock().
> */
> -#define hlist_for_each_entry_rcu(pos, head, member) \
> - for (pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
> +#define hlist_for_each_entry_rcu(pos, head, member, cond...) \
> + for (__list_check_rcu(dummy, ## cond, 0), \
> + pos = hlist_entry_safe (rcu_dereference_raw(hlist_first_rcu(head)),\
> typeof(*(pos)), member); \
> pos; \
> pos = hlist_entry_safe(rcu_dereference_raw(hlist_next_rcu(\
> diff --git a/include/linux/rcupdate.h b/include/linux/rcupdate.h
> index 922bb6848813..712b464ab960 100644
> --- a/include/linux/rcupdate.h
> +++ b/include/linux/rcupdate.h
> @@ -223,6 +223,7 @@ int debug_lockdep_rcu_enabled(void);
> int rcu_read_lock_held(void);
> int rcu_read_lock_bh_held(void);
> int rcu_read_lock_sched_held(void);
> +int rcu_read_lock_any_held(void);
>
> #else /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
>
> @@ -243,6 +244,12 @@ static inline int rcu_read_lock_sched_held(void)
> {
> return !preemptible();
> }
> +
> +static inline int rcu_read_lock_any_held(void)
> +{
> + return !preemptible();
> +}
> +
> #endif /* #else #ifdef CONFIG_DEBUG_LOCK_ALLOC */
>
> #ifdef CONFIG_PROVE_RCU
> diff --git a/kernel/rcu/Kconfig.debug b/kernel/rcu/Kconfig.debug
> index 0ec7d1d33a14..b20d0e2903d1 100644
> --- a/kernel/rcu/Kconfig.debug
> +++ b/kernel/rcu/Kconfig.debug
> @@ -7,6 +7,17 @@ menu "RCU Debugging"
> config PROVE_RCU
> def_bool PROVE_LOCKING
>
> +config PROVE_RCU_LIST
> + bool "RCU list lockdep debugging"
> + depends on PROVE_RCU
> + default n
> + help
> + Enable RCU lockdep checking for list usages. By default it is
> + turned off since there are several list RCU users that still
> + need to be converted to pass a lockdep expression. To prevent
> + false-positive splats, we keep it default disabled but once all
> + users are converted, we can remove this config option.
> +
> config TORTURE_TEST
> tristate
> default n
> diff --git a/kernel/rcu/update.c b/kernel/rcu/update.c
> index bb961cd89e76..0cc7be0fb6b5 100644
> --- a/kernel/rcu/update.c
> +++ b/kernel/rcu/update.c
> @@ -294,6 +294,20 @@ int rcu_read_lock_bh_held(void)
> }
> EXPORT_SYMBOL_GPL(rcu_read_lock_bh_held);
>
> +int rcu_read_lock_any_held(void)
> +{
> + if (!debug_lockdep_rcu_enabled())
> + return 1;
> + if (!rcu_is_watching())
> + return 0;
> + if (!rcu_lockdep_current_cpu_online())
> + return 0;
> + if (lock_is_held(&rcu_lock_map) || lock_is_held(&rcu_sched_lock_map))
> + return 1;
> + return !preemptible();
> +}
> +EXPORT_SYMBOL_GPL(rcu_read_lock_any_held);
> +
> #endif /* #ifdef CONFIG_DEBUG_LOCK_ALLOC */
>
> /**
> --
> 2.22.0.510.g264f2c817a-goog
>