[patch V2 21/24] jump_label: Pull get_online_cpus() into generic code
From: Thomas Gleixner
Date: Tue Apr 18 2017 - 15:51:28 EST
This change does two things:
- it moves the get_online_cpus() call into generic code, with the aim of
later providing some static_key ops that avoid it.
- as a side effect it inverts the lock order between cpu_hotplug_lock and
jump_label_mutex.
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: jbaron@xxxxxxxxxx
Cc: bigeasy@xxxxxxxxxxxxx
Cc: rostedt@xxxxxxxxxxx
Link: http://lkml.kernel.org/r/20170418103422.590118425@xxxxxxxxxxxxx
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
---
arch/mips/kernel/jump_label.c | 2 --
arch/sparc/kernel/jump_label.c | 2 --
arch/tile/kernel/jump_label.c | 2 --
arch/x86/kernel/jump_label.c | 2 --
kernel/jump_label.c | 14 ++++++++++++++
5 files changed, 14 insertions(+), 8 deletions(-)
--- a/arch/mips/kernel/jump_label.c
+++ b/arch/mips/kernel/jump_label.c
@@ -58,7 +58,6 @@ void arch_jump_label_transform(struct ju
insn.word = 0; /* nop */
}
- get_online_cpus();
mutex_lock(&text_mutex);
if (IS_ENABLED(CONFIG_CPU_MICROMIPS)) {
insn_p->halfword[0] = insn.word >> 16;
@@ -70,7 +69,6 @@ void arch_jump_label_transform(struct ju
(unsigned long)insn_p + sizeof(*insn_p));
mutex_unlock(&text_mutex);
- put_online_cpus();
}
#endif /* HAVE_JUMP_LABEL */
--- a/arch/sparc/kernel/jump_label.c
+++ b/arch/sparc/kernel/jump_label.c
@@ -41,12 +41,10 @@ void arch_jump_label_transform(struct ju
val = 0x01000000;
}
- get_online_cpus();
mutex_lock(&text_mutex);
*insn = val;
flushi(insn);
mutex_unlock(&text_mutex);
- put_online_cpus();
}
#endif
--- a/arch/tile/kernel/jump_label.c
+++ b/arch/tile/kernel/jump_label.c
@@ -45,14 +45,12 @@ static void __jump_label_transform(struc
void arch_jump_label_transform(struct jump_entry *e,
enum jump_label_type type)
{
- get_online_cpus();
mutex_lock(&text_mutex);
__jump_label_transform(e, type);
flush_icache_range(e->code, e->code + sizeof(tilegx_bundle_bits));
mutex_unlock(&text_mutex);
- put_online_cpus();
}
__init_or_module void arch_jump_label_transform_static(struct jump_entry *e,
--- a/arch/x86/kernel/jump_label.c
+++ b/arch/x86/kernel/jump_label.c
@@ -105,11 +105,9 @@ static void __jump_label_transform(struc
void arch_jump_label_transform(struct jump_entry *entry,
enum jump_label_type type)
{
- get_online_cpus();
mutex_lock(&text_mutex);
__jump_label_transform(entry, type, NULL, 0);
mutex_unlock(&text_mutex);
- put_online_cpus();
}
static enum {
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -15,6 +15,7 @@
#include <linux/static_key.h>
#include <linux/jump_label_ratelimit.h>
#include <linux/bug.h>
+#include <linux/cpu.h>
#ifdef HAVE_JUMP_LABEL
@@ -124,6 +125,12 @@ void static_key_slow_inc(struct static_k
return;
}
+ /*
+ * A number of architectures need to synchronize I$ across
+ * the all CPUs, for that to be serialized against CPU hot-plug
+ * we need to avoid CPUs coming online.
+ */
+ get_online_cpus();
jump_label_lock();
if (atomic_read(&key->enabled) == 0) {
atomic_set(&key->enabled, -1);
@@ -133,6 +140,7 @@ void static_key_slow_inc(struct static_k
atomic_inc(&key->enabled);
}
jump_label_unlock();
+ put_online_cpus();
}
EXPORT_SYMBOL_GPL(static_key_slow_inc);
@@ -146,6 +154,7 @@ static void __static_key_slow_dec(struct
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
+ get_online_cpus();
if (!atomic_dec_and_mutex_lock(&key->enabled, &jump_label_mutex)) {
WARN(atomic_read(&key->enabled) < 0,
"jump label: negative count!\n");
@@ -159,6 +168,7 @@ static void __static_key_slow_dec(struct
jump_label_update(key);
}
jump_label_unlock();
+ put_online_cpus();
}
static void jump_label_update_timeout(struct work_struct *work)
@@ -592,6 +602,10 @@ jump_label_module_notify(struct notifier
switch (val) {
case MODULE_STATE_COMING:
+ /*
+ * XXX do we need get_online_cpus() ? the module isn't
+ * executable yet, so nothing should be looking at our code.
+ */
jump_label_lock();
ret = jump_label_add_module(mod);
if (ret) {