[PATCH v2 18/24] locking/lockdep: Reuse list entries that are no longer in use
From: Bart Van Assche
Date: Mon Dec 03 2018 - 19:29:37 EST
Instead of abandoning elements of list_entries[] that are no longer in
use, make alloc_list_entry() reuse array elements that have been freed.
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Waiman Long <longman@xxxxxxxxxx>
Cc: Johannes Berg <johannes@xxxxxxxxxxxxxxxx>
Signed-off-by: Bart Van Assche <bvanassche@xxxxxxx>
---
kernel/locking/lockdep.c | 27 ++++++++++++++++++++-------
1 file changed, 20 insertions(+), 7 deletions(-)
diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index d907d8bfefdf..f343e7612a3a 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -130,6 +130,8 @@ static inline int debug_locks_off_graph_unlock(void)
unsigned long nr_list_entries;
static struct lock_list list_entries[MAX_LOCKDEP_ENTRIES];
+static DECLARE_BITMAP(list_entries_in_use, MAX_LOCKDEP_ENTRIES);
+static DECLARE_BITMAP(list_entries_being_freed, MAX_LOCKDEP_ENTRIES);
/*
* All data structures here are protected by the global debug_lock.
@@ -871,7 +873,10 @@ register_lock_class(struct lockdep_map *lock, unsigned int subclass, int force)
*/
static struct lock_list *alloc_list_entry(void)
{
- if (nr_list_entries >= MAX_LOCKDEP_ENTRIES) {
+ int idx = find_first_zero_bit(list_entries_in_use,
+ ARRAY_SIZE(list_entries));
+
+ if (idx >= ARRAY_SIZE(list_entries)) {
if (!debug_locks_off_graph_unlock())
return NULL;
@@ -879,7 +884,8 @@ static struct lock_list *alloc_list_entry(void)
dump_stack();
return NULL;
}
- return list_entries + nr_list_entries++;
+ __set_bit(idx, list_entries_in_use);
+ return list_entries + idx;
}
/*
@@ -984,7 +990,7 @@ static inline void mark_lock_accessed(struct lock_list *lock,
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
lock->parent = parent;
lock->class->dep_gen_id = lockdep_dependency_gen_id;
}
@@ -994,7 +1000,7 @@ static inline unsigned long lock_accessed(struct lock_list *lock)
unsigned long nr;
nr = lock - list_entries;
- WARN_ON(nr >= nr_list_entries); /* Out-of-bounds, input fail */
+ WARN_ON(nr >= ARRAY_SIZE(list_entries)); /* Out-of-bounds, input fail */
return lock->class->dep_gen_id == lockdep_dependency_gen_id;
}
@@ -4250,9 +4256,12 @@ static void zap_class(struct lock_class *class)
* Remove all dependencies this lock is
* involved in:
*/
- for (i = 0, entry = list_entries; i < nr_list_entries; i++, entry++) {
+ for_each_set_bit(i, list_entries_in_use, ARRAY_SIZE(list_entries)) {
+ entry = list_entries + i;
if (entry->class != class && entry->links_to != class)
continue;
+ if (__test_and_set_bit(i, list_entries_being_freed))
+ continue;
links_to = entry->links_to;
WARN_ON_ONCE(entry->class == links_to);
list_del_rcu(&entry->entry);
@@ -4286,8 +4295,9 @@ static inline int within(const void *addr, void *start, unsigned long size)
}
/*
- * Free all lock classes that are on the zapped_classes list. Called as an
- * RCU callback function.
+ * Free all lock classes that are on the zapped_classes list and also all list
+ * entries that have been marked as being freed. Called as an RCU callback
+ * function.
*/
static void free_zapped_classes(struct callback_head *ch)
{
@@ -4303,6 +4313,9 @@ static void free_zapped_classes(struct callback_head *ch)
nr_lock_classes--;
}
list_splice_init(&zapped_classes, &free_lock_classes);
+ bitmap_andnot(list_entries_in_use, list_entries_in_use,
+ list_entries_being_freed, ARRAY_SIZE(list_entries));
+ bitmap_clear(list_entries_being_freed, 0, ARRAY_SIZE(list_entries));
if (locked)
graph_unlock();
raw_local_irq_restore(flags);
--
2.20.0.rc1.387.gf8505762e3-goog