[PATCH] locking/lockdep: Zap lock classes even with lock debugging disabled

From: Bart Van Assche
Date: Tue Mar 26 2019 - 15:59:40 EST


Commit a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer
in use") changed the behavior of lockdep_free_key_range() from
unconditionally zapping lock classes into only zapping lock classes if
debug_lock == true. Since the new behavior can cause cat /proc/lockdep to
crash due to a NULL pointer dereference, restore the pre-v5.1 behavior.

Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Will Deacon <will.deacon@xxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: shenghui <shhuiw@xxxxxxxxxxx>
Reported-by: shenghui <shhuiw@xxxxxxxxxxx>
Fixes: a0b0fd53e1e6 ("locking/lockdep: Free lock classes that are no longer in use") # v5.1-rc1.
Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
kernel/locking/lockdep.c | 23 ++++++-----------------
1 file changed, 6 insertions(+), 17 deletions(-)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 34cdcbedda49..70480e4f8f5d 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -4689,8 +4689,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
return;

raw_local_irq_save(flags);
- if (!graph_lock())
- goto out_irq;
+ arch_spin_lock(&lockdep_lock);

/* closed head */
pf = delayed_free.pf + (delayed_free.index ^ 1);
@@ -4702,8 +4701,7 @@ static void free_zapped_rcu(struct rcu_head *ch)
*/
call_rcu_zapped(delayed_free.pf + delayed_free.index);

- graph_unlock();
-out_irq:
+ arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);
}

@@ -4744,21 +4742,15 @@ static void lockdep_free_key_range_reg(void *start, unsigned long size)
{
struct pending_free *pf;
unsigned long flags;
- int locked;

init_data_structures_once();

raw_local_irq_save(flags);
- locked = graph_lock();
- if (!locked)
- goto out_irq;
-
+ arch_spin_lock(&lockdep_lock);
pf = get_pending_free();
__lockdep_free_key_range(pf, start, size);
call_rcu_zapped(pf);
-
- graph_unlock();
-out_irq:
+ arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);

/*
@@ -4911,9 +4903,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
return;

raw_local_irq_save(flags);
- if (!graph_lock())
- goto out_irq;
-
+ arch_spin_lock(&lockdep_lock);
pf = get_pending_free();
hlist_for_each_entry_rcu(k, hash_head, hash_entry) {
if (k == key) {
@@ -4925,8 +4915,7 @@ void lockdep_unregister_key(struct lock_class_key *key)
WARN_ON_ONCE(!found);
__lockdep_free_key_range(pf, key, 1);
call_rcu_zapped(pf);
- graph_unlock();
-out_irq:
+ arch_spin_unlock(&lockdep_lock);
raw_local_irq_restore(flags);

/* Wait until is_dynamic_key() has finished accessing k->hash_entry. */
--
2.21.0.196.g041f5ea1cf98