[PATCH 1/5] jump_label: add queueing API for batched static key updates
From: Jim Cromie
Date: Fri Apr 03 2026 - 10:24:15 EST
Introduce a new queueing API for static keys to allow batching of
instruction updates. This is provided for subsystems like
dynamic-debug, which may toggle thousands of static keys in a single
operation. Currently, this suffers from (N)$ overhead due to repeated
machine-wide `stop_machine` or `text_poke_sync` synchronizations.
The new API includes:
- static_key_enable_queued(key)
- static_key_disable_queued(key)
- static_key_apply_queued() (the global synchronization barrier)
- Corresponding static_branch_*_queued(x) macros.
The _queued functionality is achieved by adding a 'bool apply'
parameter to 3 existing midlayer static functions, and using it to
decide whether or not to call arch_jump_label_transform_apply(). By
not explicitly calling it, we allow the transform queue to fill 1st.
This amortizes the cost of the IPI sync to 1/N (256 on x86).
1. __jump_label_update(struct static_key *key, bool apply)
this implements the if (apply) arch_jump_label_transform_apply();
2. __jump_label_mod_update(struct static_key *key, bool apply)
extends _queued feature to modules' static-keys
3. rename jump_label_update(struct static_key *key)
to __jump_label_update_key(struct static_key *key, bool apply)
and wrap it with 1-liners:
jump_label_update() & jump_label_update_queued()
On architectures supporting HAVE_JUMP_LABEL_BATCH, the _queued variants
lazily fill the architecture's patch queue.
It is safe to mix queued and immediate updates; any non-queued
static_key_enable/disable call will effectively "flush" any previously
queued changes as a side effect. Or just call static_key_apply_queued().
On architectures that do not need a patch queue (e.g., those with atomic
patching) or that do not support batching, the API transparently falls back
to immediate patching.
Signed-off-by: Jim Cromie <jim.cromie@xxxxxxxxx>
---
include/linux/jump_label.h | 18 +++++++
kernel/jump_label.c | 104 +++++++++++++++++++++++++++++--------
2 files changed, 100 insertions(+), 22 deletions(-)
diff --git a/include/linux/jump_label.h b/include/linux/jump_label.h
index fdb79dd1ebd8..283ee9360026 100644
--- a/include/linux/jump_label.h
+++ b/include/linux/jump_label.h
@@ -234,6 +234,9 @@ extern void static_key_slow_dec_cpuslocked(struct static_key *key);
extern int static_key_count(struct static_key *key);
extern void static_key_enable(struct static_key *key);
extern void static_key_disable(struct static_key *key);
+extern void static_key_enable_queued(struct static_key *key);
+extern void static_key_disable_queued(struct static_key *key);
+extern void static_key_apply_queued(void);
extern void static_key_enable_cpuslocked(struct static_key *key);
extern void static_key_disable_cpuslocked(struct static_key *key);
extern enum jump_label_type jump_label_init_type(struct jump_entry *entry);
@@ -340,6 +343,18 @@ static inline void static_key_disable(struct static_key *key)
atomic_set(&key->enabled, 0);
}
+static inline void static_key_enable_queued(struct static_key *key)
+{
+ static_key_enable(key);
+}
+
+static inline void static_key_disable_queued(struct static_key *key)
+{
+ static_key_disable(key);
+}
+
+static inline void static_key_apply_queued(void) {}
+
#define static_key_enable_cpuslocked(k) static_key_enable((k))
#define static_key_disable_cpuslocked(k) static_key_disable((k))
@@ -535,6 +550,9 @@ extern bool ____wrong_branch_error(void);
#define static_branch_enable(x) static_key_enable(&(x)->key)
#define static_branch_disable(x) static_key_disable(&(x)->key)
+#define static_branch_enable_queued(x) static_key_enable_queued(&(x)->key)
+#define static_branch_disable_queued(x) static_key_disable_queued(&(x)->key)
+#define static_branch_apply_queued() static_key_apply_queued()
#define static_branch_enable_cpuslocked(x) static_key_enable_cpuslocked(&(x)->key)
#define static_branch_disable_cpuslocked(x) static_key_disable_cpuslocked(&(x)->key)
diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 7cb19e601426..9826aefd77e5 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -91,6 +91,7 @@ jump_label_sort_entries(struct jump_entry *start, struct jump_entry *stop)
}
static void jump_label_update(struct static_key *key);
+static void jump_label_update_queued(struct static_key *key);
/*
* There are similar definitions for the !CONFIG_JUMP_LABEL case in jump_label.h.
@@ -250,10 +251,57 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);
+void static_key_enable_queued(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) > 0) {
+ /*
+ * already enabled, don't act.
+ * warn if static-key-inc was used.
+ */
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 1);
+ return;
+ }
+
+ scoped_guard(jump_label_lock) {
+ if (atomic_read(&key->enabled) == 0) {
+ /*
+ * set transitional value, update,
+ * then set stable enabled state
+ */
+ atomic_set(&key->enabled, -1);
+ jump_label_update_queued(key);
+ atomic_set_release(&key->enabled, 1);
+ }
+ }
+}
+
+void static_key_disable_queued(struct static_key *key)
+{
+ STATIC_KEY_CHECK_USE(key);
+
+ if (atomic_read(&key->enabled) != 1) {
+ /*
+ * not simply enabled; is disabled, inc-enabled, or
+ * in transition. don't act. warn if inc-enabled.
+ */
+ WARN_ON_ONCE(atomic_read(&key->enabled) != 0);
+ return;
+ }
+
+ scoped_guard(jump_label_lock) {
+ /*
+ * update if simply enabled. dont act if in transition.
+ */
+ if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ jump_label_update_queued(key);
+ }
+}
+
static bool static_key_dec_not_one(struct static_key *key)
{
int v;
-
/*
* Go into the slow path if key::enabled is less than or equal than
* one. One is valid to shut down the key, anything less than one
@@ -488,28 +536,16 @@ static bool jump_label_can_update(struct jump_entry *entry, bool init)
return true;
}
-#ifndef HAVE_JUMP_LABEL_BATCH
static void __jump_label_update(struct static_key *key,
struct jump_entry *entry,
struct jump_entry *stop,
- bool init)
+ bool init, bool apply)
{
for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
- if (jump_label_can_update(entry, init))
- arch_jump_label_transform(entry, jump_label_type(entry));
- }
-}
-#else
-static void __jump_label_update(struct static_key *key,
- struct jump_entry *entry,
- struct jump_entry *stop,
- bool init)
-{
- for (; (entry < stop) && (jump_entry_key(entry) == key); entry++) {
-
if (!jump_label_can_update(entry, init))
continue;
+#ifdef HAVE_JUMP_LABEL_BATCH
if (!arch_jump_label_transform_queue(entry, jump_label_type(entry))) {
/*
* Queue is full: Apply the current queue and try again.
@@ -517,10 +553,24 @@ static void __jump_label_update(struct static_key *key,
arch_jump_label_transform_apply();
BUG_ON(!arch_jump_label_transform_queue(entry, jump_label_type(entry)));
}
+#else
+ arch_jump_label_transform(entry, jump_label_type(entry));
+#endif
}
- arch_jump_label_transform_apply();
+#ifdef HAVE_JUMP_LABEL_BATCH
+ if (apply)
+ arch_jump_label_transform_apply();
+#endif
}
+
+void static_key_apply_queued(void)
+{
+#ifdef HAVE_JUMP_LABEL_BATCH
+ jump_label_lock();
+ arch_jump_label_transform_apply();
+ jump_label_unlock();
#endif
+}
void __init jump_label_init(void)
{
@@ -671,7 +721,7 @@ static int __jump_label_mod_text_reserved(void *start, void *end)
return ret;
}
-static void __jump_label_mod_update(struct static_key *key)
+static void __jump_label_mod_update(struct static_key *key, bool apply)
{
struct static_key_mod *mod;
@@ -692,7 +742,7 @@ static void __jump_label_mod_update(struct static_key *key)
else
stop = m->jump_entries + m->num_jump_entries;
__jump_label_update(key, mod->entries, stop,
- m && m->state == MODULE_STATE_COMING);
+ m && m->state == MODULE_STATE_COMING, apply);
}
}
@@ -762,7 +812,7 @@ static int jump_label_add_module(struct module *mod)
/* Only update if we've changed from our initial state */
do_poke:
if (jump_label_type(iter) != jump_label_init_type(iter))
- __jump_label_update(key, iter, iter_stop, true);
+ __jump_label_update(key, iter, iter_stop, true, true);
}
return 0;
@@ -892,7 +942,7 @@ int jump_label_text_reserved(void *start, void *end)
return ret;
}
-static void jump_label_update(struct static_key *key)
+static void jump_label_update_key(struct static_key *key, bool apply)
{
struct jump_entry *stop = __stop___jump_table;
bool init = system_state < SYSTEM_RUNNING;
@@ -901,7 +951,7 @@ static void jump_label_update(struct static_key *key)
struct module *mod;
if (static_key_linked(key)) {
- __jump_label_mod_update(key);
+ __jump_label_mod_update(key, apply);
return;
}
@@ -916,7 +966,17 @@ static void jump_label_update(struct static_key *key)
entry = static_key_entries(key);
/* if there are no users, entry can be NULL */
if (entry)
- __jump_label_update(key, entry, stop, init);
+ __jump_label_update(key, entry, stop, init, apply);
+}
+
+static void jump_label_update(struct static_key *key)
+{
+ jump_label_update_key(key, true);
+}
+
+static void jump_label_update_queued(struct static_key *key)
+{
+ jump_label_update_key(key, false);
}
#ifdef CONFIG_STATIC_KEYS_SELFTEST
--
2.53.0