Re: [PATCH] locking/lockdep: Fix lock IRQ usage initialization bug

From: Yuyang Du
Date: Mon Jul 08 2019 - 21:21:48 EST


The problem should have been fixed with this in that pull:

locking/lockdep: Move mark_lock() inside CONFIG_TRACE_IRQFLAGS &&
CONFIG_PROVE_LOCKING

and this is a better fix than mine.

Thanks,
Yuyang

On Tue, 9 Jul 2019 at 01:32, Qian Cai <cai@xxxxxx> wrote:
>
> I saw Ingo send a pull request to Linus for 5.3 [1] includes the offensive
> commit "locking/lockdep: Consolidate lock usage bit initialization" but did not
> include this patch.
>
> [1] https://lore.kernel.org/lkml/20190708093516.GA57558@xxxxxxxxx/
>
> On Mon, 2019-06-10 at 13:52 +0800, Yuyang Du wrote:
> > The commit:
> >
> > 091806515124b20 ("locking/lockdep: Consolidate lock usage bit
> > initialization")
> >
> > misses marking LOCK_USED flag at IRQ usage initialization when
> > CONFIG_TRACE_IRQFLAGS
> > or CONFIG_PROVE_LOCKING is not defined. Fix it.
> >
> > Reported-by: Qian Cai <cai@xxxxxx>
> > Signed-off-by: Yuyang Du <duyuyang@xxxxxxxxx>
> > ---
> > kernel/locking/lockdep.c | 110 +++++++++++++++++++++++-----------------------
> > -
> > 1 file changed, 53 insertions(+), 57 deletions(-)
> >
> > diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
> > index 48a840a..c3db987 100644
> > --- a/kernel/locking/lockdep.c
> > +++ b/kernel/locking/lockdep.c
> > @@ -3460,9 +3460,61 @@ void trace_softirqs_off(unsigned long ip)
> > debug_atomic_inc(redundant_softirqs_off);
> > }
> >
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > + return 2 * !!task->hardirq_context + !!task->softirq_context;
> > +}
> > +
> > +static int separate_irq_context(struct task_struct *curr,
> > + struct held_lock *hlock)
> > +{
> > + unsigned int depth = curr->lockdep_depth;
> > +
> > + /*
> > + * Keep track of points where we cross into an interrupt context:
> > + */
> > + if (depth) {
> > + struct held_lock *prev_hlock;
> > +
> > + prev_hlock = curr->held_locks + depth-1;
> > + /*
> > + * If we cross into another context, reset the
> > + * hash key (this also prevents the checking and the
> > + * adding of the dependency to 'prev'):
> > + */
> > + if (prev_hlock->irq_context != hlock->irq_context)
> > + return 1;
> > + }
> > + return 0;
> > +}
> > +
> > +#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > +
> > +static inline
> > +int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> > + enum lock_usage_bit new_bit)
> > +{
> > + WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> > + return 1;
> > +}
> > +
> > +static inline unsigned int task_irq_context(struct task_struct *task)
> > +{
> > + return 0;
> > +}
> > +
> > +static inline int separate_irq_context(struct task_struct *curr,
> > + struct held_lock *hlock)
> > +{
> > + return 0;
> > +}
> > +
> > +#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > +
> > static int
> > mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> > {
> > +#if defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING)
> > if (!check)
> > goto lock_used;
> >
> > @@ -3510,6 +3562,7 @@ void trace_softirqs_off(unsigned long ip)
> > }
> >
> > lock_used:
> > +#endif
> > /* mark it as used: */
> > if (!mark_lock(curr, hlock, LOCK_USED))
> > return 0;
> > @@ -3517,63 +3570,6 @@ void trace_softirqs_off(unsigned long ip)
> > return 1;
> > }
> >
> > -static inline unsigned int task_irq_context(struct task_struct *task)
> > -{
> > - return 2 * !!task->hardirq_context + !!task->softirq_context;
> > -}
> > -
> > -static int separate_irq_context(struct task_struct *curr,
> > - struct held_lock *hlock)
> > -{
> > - unsigned int depth = curr->lockdep_depth;
> > -
> > - /*
> > - * Keep track of points where we cross into an interrupt context:
> > - */
> > - if (depth) {
> > - struct held_lock *prev_hlock;
> > -
> > - prev_hlock = curr->held_locks + depth-1;
> > - /*
> > - * If we cross into another context, reset the
> > - * hash key (this also prevents the checking and the
> > - * adding of the dependency to 'prev'):
> > - */
> > - if (prev_hlock->irq_context != hlock->irq_context)
> > - return 1;
> > - }
> > - return 0;
> > -}
> > -
> > -#else /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > -
> > -static inline
> > -int mark_lock_irq(struct task_struct *curr, struct held_lock *this,
> > - enum lock_usage_bit new_bit)
> > -{
> > - WARN_ON(1); /* Impossible innit? when we don't have TRACE_IRQFLAG */
> > - return 1;
> > -}
> > -
> > -static inline int
> > -mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)
> > -{
> > - return 1;
> > -}
> > -
> > -static inline unsigned int task_irq_context(struct task_struct *task)
> > -{
> > - return 0;
> > -}
> > -
> > -static inline int separate_irq_context(struct task_struct *curr,
> > - struct held_lock *hlock)
> > -{
> > - return 0;
> > -}
> > -
> > -#endif /* defined(CONFIG_TRACE_IRQFLAGS) && defined(CONFIG_PROVE_LOCKING) */
> > -
> > /*
> > * Mark a lock with a usage bit, and validate the state transition:
> > */