[PATCH 1/8] module: Sanitize RCU usage and locking
From: Peter Zijlstra
Date: Wed Mar 18 2015 - 09:56:20 EST
Currently the RCU usage in module is an inconsistent mess of RCU and
RCU-sched, this is broken for CONFIG_PREEMPT where synchronize_rcu()
does not imply synchronize_sched().
Most usage sites use preempt_{dis,en}able() which is RCU-sched, but
(most of) the modification sites use synchronize_rcu(). With the
exception of the module bug list, which actually uses RCU.
Convert everything over to RCU-sched.
Furthermore add lockdep asserts to all sites, because its not at all
clear to me the required locking is observed, esp. on exported
functions.
Cc: Rusty Russell <rusty@xxxxxxxxxxxxxxx>
Acked-by: "Paul E. McKenney" <paulmck@xxxxxxxxxxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
include/linux/module.h | 12 ++++++++++--
kernel/module.c | 42 ++++++++++++++++++++++++++++++++++--------
lib/bug.c | 7 +++++--
3 files changed, 49 insertions(+), 12 deletions(-)
--- a/include/linux/module.h
+++ b/include/linux/module.h
@@ -415,14 +415,22 @@ struct symsearch {
bool unused;
};
-/* Search for an exported symbol by name. */
+/*
+ * Search for an exported symbol by name.
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
const struct kernel_symbol *find_symbol(const char *name,
struct module **owner,
const unsigned long **crc,
bool gplok,
bool warn);
-/* Walk the exported symbol table */
+/*
+ * Walk the exported symbol table
+ *
+ * Must be called with module_mutex held or preemption disabled.
+ */
bool each_symbol_section(bool (*fn)(const struct symsearch *arr,
struct module *owner,
void *data), void *data);
--- a/kernel/module.c
+++ b/kernel/module.c
@@ -106,6 +106,24 @@ static LIST_HEAD(modules);
struct list_head *kdb_modules = &modules; /* kdb needs the list of modules */
#endif /* CONFIG_KGDB_KDB */
+static void module_assert_mutex(void)
+{
+ lockdep_assert_held(&module_mutex);
+}
+
+static void module_assert_mutex_or_preempt(void)
+{
+#ifdef CONFIG_LOCKDEP
+ int rcu_held = rcu_read_lock_sched_held();
+ int mutex_held = 1;
+
+ if (debug_locks)
+ mutex_held = lockdep_is_held(&module_mutex);
+
+ WARN_ON(!rcu_held && !mutex_held);
+#endif
+}
+
#ifdef CONFIG_MODULE_SIG
#ifdef CONFIG_MODULE_SIG_FORCE
static bool sig_enforce = true;
@@ -319,6 +337,8 @@ bool each_symbol_section(bool (*fn)(cons
#endif
};
+ module_assert_mutex_or_preempt();
+
if (each_symbol_in_section(arr, ARRAY_SIZE(arr), NULL, fn, data))
return true;
@@ -458,6 +478,8 @@ static struct module *find_module_all(co
{
struct module *mod;
+ module_assert_mutex();
+
list_for_each_entry(mod, &modules, list) {
if (!even_unformed && mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -1856,8 +1878,8 @@ static void free_module(struct module *m
list_del_rcu(&mod->list);
/* Remove this module from bug list, this uses list_del_rcu */
module_bug_cleanup(mod);
- /* Wait for RCU synchronizing before releasing mod->list and buglist. */
- synchronize_rcu();
+ /* Wait for RCU-sched synchronizing before releasing mod->list and buglist. */
+ synchronize_sched();
mutex_unlock(&module_mutex);
/* This may be NULL, but that's OK */
@@ -3106,11 +3128,11 @@ static noinline int do_init_module(struc
mod->init_text_size = 0;
/*
* We want to free module_init, but be aware that kallsyms may be
- * walking this with preempt disabled. In all the failure paths,
- * we call synchronize_rcu/synchronize_sched, but we don't want
- * to slow down the success path, so use actual RCU here.
+ * walking this with preempt disabled. In all the failure paths, we
+ * call synchronize_sched, but we don't want to slow down the success
+ * path, so use actual RCU here.
*/
- call_rcu(&freeinit->rcu, do_free_init);
+ call_rcu_sched(&freeinit->rcu, do_free_init);
mutex_unlock(&module_mutex);
wake_up_all(&module_wq);
@@ -3368,8 +3390,8 @@ static int load_module(struct load_info
/* Unlink carefully: kallsyms could be walking list. */
list_del_rcu(&mod->list);
wake_up_all(&module_wq);
- /* Wait for RCU synchronizing before releasing mod->list. */
- synchronize_rcu();
+ /* Wait for RCU-sched synchronizing before releasing mod->list. */
+ synchronize_sched();
mutex_unlock(&module_mutex);
free_module:
/* Free lock-classes; relies on the preceding sync_rcu() */
@@ -3636,6 +3658,8 @@ int module_kallsyms_on_each_symbol(int (
unsigned int i;
int ret;
+ module_assert_mutex();
+
list_for_each_entry(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
@@ -3810,6 +3834,8 @@ struct module *__module_address(unsigned
if (addr < module_addr_min || addr > module_addr_max)
return NULL;
+ module_assert_mutex_or_preempt();
+
list_for_each_entry_rcu(mod, &modules, list) {
if (mod->state == MODULE_STATE_UNFORMED)
continue;
--- a/lib/bug.c
+++ b/lib/bug.c
@@ -66,7 +66,7 @@ static const struct bug_entry *module_fi
struct module *mod;
const struct bug_entry *bug = NULL;
- rcu_read_lock();
+ rcu_read_lock_sched();
list_for_each_entry_rcu(mod, &module_bug_list, bug_list) {
unsigned i;
@@ -77,7 +77,7 @@ static const struct bug_entry *module_fi
}
bug = NULL;
out:
- rcu_read_unlock();
+ rcu_read_unlock_sched();
return bug;
}
@@ -88,6 +88,8 @@ void module_bug_finalize(const Elf_Ehdr
char *secstrings;
unsigned int i;
+ lockdep_assert_held(&module_mutex);
+
mod->bug_table = NULL;
mod->num_bugs = 0;
@@ -113,6 +115,7 @@ void module_bug_finalize(const Elf_Ehdr
void module_bug_cleanup(struct module *mod)
{
+ lockdep_assert_held(&module_mutex);
list_del_rcu(&mod->bug_list);
}
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/