Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

From: Qian Cai
Date: Mon Jul 08 2019 - 13:32:38 EST


I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
commit "locking/lockdep: Consolidate lock usage bitÂinitialization" but did not
include this patch.

[1] https://lore.kernel.org/lkml/20190708093516.GA57558@xxxxxxxxx/

On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> The commit:
>
> Â 091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> initialization")
>
> misses marking LOCK_USED flag at IRQ usage initialization when
> CONFIG_TRACE_IRQFLAGS
> or CONFIG_PROVE_LOCKING is not defined. Fix it.
>
> Reported-by: Qian Cai <cai@xxxxxx>
> Signed-off-by: Yuyang Du <duyuyang@xxxxxxxxx>
> ---
> Âkernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
> -
> Â1 file changed, 53 insertions(+), 57 deletions(-)
>
> diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> index 48a840a..c3db987 100644
> --- a/kernel/locking/lockdep.c
> +++ b/kernel/locking/lockdep.c
> @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
> Â debug_atomic_inc(redundant_softirqs_off);
> Â}
> Â
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> + return 2 * !!task->hardirq_context + !!task->softirq_context;
> +}
> +
> +static int separate_irq_context(struct task_struct *curr,
> + struct held_lock *hlock)
> +{
> + unsigned int depth = curr->lockdep_depth;
> +
> + /*
> + Â* Keep track of points where we cross into an interrupt context:
> + Â*/
> + if (depth) {
> + struct held_lock *prev_hlock;
> +
> + prev_hlock = curr->held_locks + depth-1;
> + /*
> + Â* If we cross into another context, reset the
> + Â* hash key (this also prevents the checking and the
> + Â* adding of the dependency to 'prev'):
> + Â*/
> + if (prev_hlock->irq_context != hlock->irq_context)
> + return 1;
> + }
> + return 0;
> +}
> +
> +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
> +static inline
> +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> + enum lock_usage_bit new_bit)
> +{
> + WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> + return 1;
> +}
> +
> +static inline unsigned int task_irq_context(struct task_struct *task)
> +{
> + return 0;
> +}
> +
> +static inline int separate_irq_context(struct task_struct *curr,
> + struct held_lock *hlock)
> +{
> + return 0;
> +}
> +
> +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> +
> Âstatic int
> Âmark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> Â{
> +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
> Â if (!check)
> Â goto lock_used;
> Â
> @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
> Â }
> Â
> Âlock_used:
> +#endif
> Â /* mark it as used: */
> Â if (!mark_lock(curr, hlock, LOCK_USED))
> Â return 0;
> @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
> Â return 1;
> Â}
> Â
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> - return 2 * !!task->hardirq_context + !!task->softirq_context;
> -}
> -
> -static int separate_irq_context(struct task_struct *curr,
> - struct held_lock *hlock)
> -{
> - unsigned int depth = curr->lockdep_depth;
> -
> - /*
> - Â* Keep track of points where we cross into an interrupt context:
> - Â*/
> - if (depth) {
> - struct held_lock *prev_hlock;
> -
> - prev_hlock = curr->held_locks + depth-1;
> - /*
> - Â* If we cross into another context, reset the
> - Â* hash key (this also prevents the checking and the
> - Â* adding of the dependency to 'prev'):
> - Â*/
> - if (prev_hlock->irq_context != hlock->irq_context)
> - return 1;
> - }
> - return 0;
> -}
> -
> -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
> -static inline
> -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> - enum lock_usage_bit new_bit)
> -{
> - WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> - return 1;
> -}
> -
> -static inline int
> -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> -{
> - return 1;
> -}
> -
> -static inline unsigned int task_irq_context(struct task_struct *task)
> -{
> - return 0;
> -}
> -
> -static inline int separate_irq_context(struct task_struct *curr,
> - struct held_lock *hlock)
> -{
> - return 0;
> -}
> -
> -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> -
> Â/*
> Â * Mark a lock with a usage bit, and validate the state transition:
> Â */