[PATCH 2/2] locking/lockdep: Track number of zapped classes & report abuse

From: Waiman Long
Date: Thu Nov 29 2018 - 17:42:01 EST


When a kernel module is unloaded, all the lock classes associated with
that kernel module will be zapped. Unfortunately the corresponding
lockdep entries in stack_trace[], list_entries[], lock_classes[],
lock_chains[] and chain_hlocks[] are not reusable without greatly
complicating the existing code.

As a result, an application that does repeated kernel module load and
unload operations may exhaust all the entries in one of the lockdep
arrays leading to a bug message. Lockdep cannot currently support this
particular use case and we need to let users aware of that.

The number of zapped lock classes will now be tracked. If one of the
lockdep arrays is out of entries and the number of zapped classes is
a significant portion (1/4) of the total, a warning message will be
printed to notify users that we cannot supported repeated kernel module
load and unload operations.

Signed-off-by: Waiman Long <longman@xxxxxxxxxx>
---
kernel/locking/lockdep.c | 26 ++++++++++++++++++++++++++
kernel/locking/lockdep_internals.h | 1 +
kernel/locking/lockdep_proc.c | 2 ++
3 files changed, 29 insertions(+)

diff --git a/kernel/locking/lockdep.c b/kernel/locking/lockdep.c
index 9e0d36b..7136da4 100644
--- a/kernel/locking/lockdep.c
+++ b/kernel/locking/lockdep.c
@@ -138,6 +138,7 @@ static inline int debug_locks_off_graph_unlock(void)
* get freed - this significantly simplifies the debugging code.
*/
unsigned long nr_lock_classes;
+unsigned long nr_zapped_classes;
struct lock_class lock_classes[MAX_LOCKDEP_KEYS];

static inline struct lock_class *hlock_class(struct held_lock *hlock)
@@ -372,6 +373,25 @@ static int verbose(struct lock_class *class)
}

/*
+ * Check the number of zapped classes and print a warning if it is
+ * more than 1/4 of the total.
+ */
+static void check_zapped_classes(void)
+{
+ if (nr_zapped_classes < nr_lock_classes/4)
+ return;
+ pr_warn("========================================================\n");
+ pr_warn("WARNING: %ld out of %ld locks have been destroyed\n",
+ nr_zapped_classes, nr_lock_classes);
+ pr_warn("through kernel module unload operations.\n");
+ pr_warn("The corresponding lockdep entries are not reusable.\n");
+ pr_warn("The system might have run out of lockdep entries because\n");
+ pr_warn("of repeated kernel module load and unload operations.\n");
+ pr_warn("Lockdep cannot support this particular use case.\n");
+ pr_warn("--------------------------------------------------------\n");
+}
+
+/*
* Stack-trace: tightly packed array of stack backtrace
* addresses. Protected by the graph_lock.
*/
@@ -417,6 +437,7 @@ static int save_trace(struct stack_trace *trace)
return 0;

print_lockdep_off("BUG: MAX_STACK_TRACE_ENTRIES too low!");
+ check_zapped_classes();
dump_stack();

return 0;
@@ -781,6 +802,7 @@ static bool assign_lock_key(struct lockdep_map *lock)
}

print_lockdep_off("BUG: MAX_LOCKDEP_KEYS too low!");
+ check_zapped_classes();
dump_stack();
return NULL;
}
@@ -847,6 +869,7 @@ static struct lock_list *alloc_list_entry(void)
return NULL;

print_lockdep_off("BUG: MAX_LOCKDEP_ENTRIES too low!");
+ check_zapped_classes();
dump_stack();
return NULL;
}
@@ -2183,6 +2206,7 @@ static inline int add_chain_cache(struct task_struct *curr,
return 0;

print_lockdep_off("BUG: MAX_LOCKDEP_CHAINS too low!");
+ check_zapped_classes();
dump_stack();
return 0;
}
@@ -2217,6 +2241,7 @@ static inline int add_chain_cache(struct task_struct *curr,
return 0;

print_lockdep_off("BUG: MAX_LOCKDEP_CHAIN_HLOCKS too low!");
+ check_zapped_classes();
dump_stack();
return 0;
}
@@ -4146,6 +4171,7 @@ static void zap_class(struct lock_class *class)

RCU_INIT_POINTER(class->key, NULL);
RCU_INIT_POINTER(class->name, NULL);
+ nr_zapped_classes++;
}

static inline int within(const void *addr, void *start, unsigned long size)
diff --git a/kernel/locking/lockdep_internals.h b/kernel/locking/lockdep_internals.h
index 88c847a..e719d3d 100644
--- a/kernel/locking/lockdep_internals.h
+++ b/kernel/locking/lockdep_internals.h
@@ -95,6 +95,7 @@ extern void get_usage_chars(struct lock_class *class,
struct lock_class *lock_chain_get_class(struct lock_chain *chain, int i);

extern unsigned long nr_lock_classes;
+extern unsigned long nr_zapped_classes;
extern unsigned long nr_list_entries;
extern unsigned long nr_lock_chains;
extern int nr_chain_hlocks;
diff --git a/kernel/locking/lockdep_proc.c b/kernel/locking/lockdep_proc.c
index 3d31f9b..04823e6 100644
--- a/kernel/locking/lockdep_proc.c
+++ b/kernel/locking/lockdep_proc.c
@@ -314,6 +314,8 @@ static int lockdep_stats_show(struct seq_file *m, void *v)
seq_printf(m, " irq-read-unsafe locks: %11lu\n",
nr_irq_read_unsafe);

+ seq_printf(m, " zapped locks: %11lu\n",
+ nr_zapped_classes);
seq_printf(m, " uncategorized locks: %11lu\n",
nr_uncategorized);
seq_printf(m, " unused locks: %11lu\n",
--
1.8.3.1