[tip: locking/urgent] jump_label: Fix static_key_slow_dec() yet again

From: tip-bot2 for Peter Zijlstra
Date: Tue Sep 10 2024 - 06:12:23 EST


The following commit has been merged into the locking/urgent branch of tip:

Commit-ID: 1d7f856c2ca449f04a22d876e36b464b7a9d28b6
Gitweb: https://git.kernel.org/tip/1d7f856c2ca449f04a22d876e36b464b7a9d28b6
Author: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
AuthorDate: Mon, 09 Sep 2024 12:50:09 +02:00
Committer: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
CommitterDate: Tue, 10 Sep 2024 11:57:27 +02:00

jump_label: Fix static_key_slow_dec() yet again

While commit 83ab38ef0a0b ("jump_label: Fix concurrency issues in
static_key_slow_dec()") fixed one problem, it created yet another,
notably the following is now possible:

slow_dec
if (try_dec) // dec_not_one-ish, false
// enabled == 1
slow_inc
if (inc_not_disabled) // inc_not_zero-ish
// enabled == 2
return

guard((mutex)(&jump_label_mutex);
if (atomic_cmpxchg(1,0)==1) // false, we're 2

slow_dec
if (try-dec) // dec_not_one, true
// enabled == 1
return
else
try_dec() // dec_not_one, false
WARN

Use dec_and_test instead of cmpxchg(), like it was prior to
83ab38ef0a0b. Add a few WARNs for the paranoid.

Fixes: 83ab38ef0a0b ("jump_label: Fix concurrency issues in static_key_slow_dec()")
Reported-by: "Darrick J. Wong" <djwong@xxxxxxxxxx>
Tested-by: Klara Modin <klarasmodin@xxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
---
kernel/jump_label.c | 34 +++++++++++++++++++++++++++-------
1 file changed, 27 insertions(+), 7 deletions(-)

diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 6dc76b5..93a822d 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -168,7 +168,7 @@ bool static_key_slow_inc_cpuslocked(struct static_key *key)
jump_label_update(key);
/*
* Ensure that when static_key_fast_inc_not_disabled() or
- * static_key_slow_try_dec() observe the positive value,
+ * static_key_dec_not_one() observe the positive value,
* they must also observe all the text changes.
*/
atomic_set_release(&key->enabled, 1);
@@ -250,7 +250,7 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);

-static bool static_key_slow_try_dec(struct static_key *key)
+static bool static_key_dec_not_one(struct static_key *key)
{
int v;

@@ -274,6 +274,14 @@ static bool static_key_slow_try_dec(struct static_key *key)
* enabled. This suggests an ordering problem on the user side.
*/
WARN_ON_ONCE(v < 0);
+
+ /*
+ * Warn about underflow, and lie about success in an attempt to
+ * not make things worse.
+ */
+ if (WARN_ON_ONCE(v == 0))
+ return true;
+
if (v <= 1)
return false;
} while (!likely(atomic_try_cmpxchg(&key->enabled, &v, v - 1)));
@@ -284,15 +292,27 @@ static bool static_key_slow_try_dec(struct static_key *key)
static void __static_key_slow_dec_cpuslocked(struct static_key *key)
{
lockdep_assert_cpus_held();
+ int val;

- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;

guard(mutex)(&jump_label_mutex);
- if (atomic_cmpxchg(&key->enabled, 1, 0) == 1)
+ val = atomic_read(&key->enabled);
+ /*
+ * It should be impossible to observe -1 with jump_label_mutex held,
+ * see static_key_slow_inc_cpuslocked().
+ */
+ if (WARN_ON_ONCE(val == -1))
+ return;
+ /*
+ * Cannot already be 0, something went sideways.
+ */
+ if (WARN_ON_ONCE(val == 0))
+ return;
+
+ if (atomic_dec_and_test(&key->enabled))
jump_label_update(key);
- else
- WARN_ON_ONCE(!static_key_slow_try_dec(key));
}

static void __static_key_slow_dec(struct static_key *key)
@@ -329,7 +349,7 @@ void __static_key_slow_dec_deferred(struct static_key *key,
{
STATIC_KEY_CHECK_USE(key);

- if (static_key_slow_try_dec(key))
+ if (static_key_dec_not_one(key))
return;

schedule_delayed_work(work, timeout);