[PATCH v3 1/8] locking/lockdep: Decrement irq context counters when removing lock chain

From: Waiman Long
Date: Wed Jan 15 2020 - 16:43:41 EST


There are currently three counters to track the irq context of a lock
chain - nr_hardirq_chains, nr_softirq_chains and nr_process_chains.
They are incremented when a new lock chain is added, but they are
not decremented when a lock chain is removed. That causes some of the
statistic counts reported by /proc/lockdep_stats to be incorrect.

Fix that by decrementing the right counter when a lock chain is removed.

Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use")
Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
kernel/locking/lockdep.c | 36 +++++++++++++++++++++---------
kernel/locking/lockdep_internals.h | 6 +++++
2 files changed, 32 insertions(+), 10 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 32282e7112d3..b20fa6236b2a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -2299,16 +2299,24 @@ static int check_irq_usage(struct task_struct *curr, struct held_lock *prev,
return 0;
}

-static void inc_chains(void)
+static void inc_chains(int irq_context)
{
- if (current->hardirq_context)
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
nr_hardirq_chains++;
- else {
- if (current->softirq_context)
- nr_softirq_chains++;
- else
- nr_process_chains++;
- }
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains++;
+ else
+ nr_process_chains++;
+}
+
+static void dec_chains(int irq_context)
+{
+ if (irq_context & LOCK_CHAIN_HARDIRQ_CONTEXT)
+ nr_hardirq_chains--;
+ else if (irq_context & LOCK_CHAIN_SOFTIRQ_CONTEXT)
+ nr_softirq_chains--;
+ else
+ nr_process_chains--;
}

#else
@@ -2324,6 +2332,10 @@ static inline void inc_chains(void)
nr_process_chains++;
}

+static void dec_chains(int irq_context)
+{
+ nr_process_chains--;
+}
#endif /* CONFIG_TRACE_IRQFLAGS */

static void
@@ -2844,7 +2856,7 @@ static inline int add_chain_cache(struct task_struct *curr,

hlist_add_head_rcu(&chain->entry, hash_head);
debug_atomic_inc(chain_lookup_misses);
- inc_chains();
+ inc_chains(chain->irq_context);

return 1;
}
@@ -3597,7 +3609,8 @@ mark_usage(struct task_struct *curr, struct held_lock *hlock, int check)

static inline unsigned int task_irq_context(struct task_struct *task)
{
- return 2 * !!task->hardirq_context + !!task->softirq_context;
+ return LOCK_CHAIN_HARDIRQ_CONTEXT * !!task->hardirq_context +
+ LOCK_CHAIN_SOFTIRQ_CONTEXT * !!task->softirq_context;
}

static int separate_irq_context(struct task_struct *curr,
@@ -4799,6 +4812,8 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
return;
/* Overwrite the chain key for concurrent RCU readers. */
WRITE_ONCE(chain->chain_key, chain_key);
+ dec_chains(chain->irq_context);
+
/*
* Note: calling hlist_del_rcu() from inside a
* hlist_for_each_entry_rcu() loop is safe.
@@ -4820,6 +4835,7 @@ static void remove_class_from_lock_chain(struct pending_free *pf,
}
*new_chain = *chain;
hlist_add_head_rcu(&new_chain->entry, chainhashentry(chain_key));
+ inc_chains(new_chain->irq_context);
#endif
}

diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 18d85aebbb57..53500a1dac58 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -98,6 +98,12 @@ static const unsigned long LOCKF_USED_IN_IRQ_READ =

#define MAX_LOCKDEP_CHAINS_BITS 16

+/*
+ * Bit definitions for lock_chain.irq_context
+ */
+#define LOCK_CHAIN_SOFTIRQ_CONTEXT (1 << 0)
+#define LOCK_CHAIN_HARDIRQ_CONTEXT (1 << 1)
+
/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the hash_lock.
--
2.18.1