[PATCH 2/3] locking/static_key: factor out the fast path of static_key_slow_dec()

From: Jakub Kicinski
Date: Fri Mar 29 2019 - 20:09:28 EST


static_key_slow_dec() checks if the atomic enable count is larger
than 1, and if so there decrements it before taking the jump_label_lock.
Move this logic into a helper for reuse in rate limitted keys.

Signed-off-by: Jakub Kicinski <jakub.kicinski@xxxxxxxxxxxxx>
Reviewed-by: Simon Horman <simon.horman@xxxxxxxxxxxxx>
---
kernel/jump_label.c | 23 +++++++++++++++--------
1 file changed, 15 insertions(+), 8 deletions(-)

diff --git a/kernel/jump_label.c b/kernel/jump_label.c
index 73bbbaddbd9c..02c3d11264dd 100644
--- a/kernel/jump_label.c
+++ b/kernel/jump_label.c
@@ -202,13 +202,13 @@ void static_key_disable(struct static_key *key)
}
EXPORT_SYMBOL_GPL(static_key_disable);

-static void __static_key_slow_dec_cpuslocked(struct static_key *key,
- unsigned long rate_limit,
- struct delayed_work *work)
+static bool static_key_slow_try_dec(struct static_key *key)
{
int val;

- lockdep_assert_cpus_held();
+ val = atomic_fetch_add_unless(&key->enabled, -1, 1);
+ if (val == 1)
+ return false;

/*
* The negative count check is valid even when a negative
@@ -217,11 +217,18 @@ static void __static_key_slow_dec_cpuslocked(struct static_key *key,
* returns is unbalanced, because all other static_key_slow_inc()
* instances block while the update is in progress.
*/
- val = atomic_fetch_add_unless(&key->enabled, -1, 1);
- if (val != 1) {
- WARN(val < 0, "jump label: negative count!\n");
+ WARN(val < 0, "jump label: negative count!\n");
+ return true;
+}
+
+static void __static_key_slow_dec_cpuslocked(struct static_key *key,
+ unsigned long rate_limit,
+ struct delayed_work *work)
+{
+ lockdep_assert_cpus_held();
+
+ if (static_key_slow_try_dec(key))
return;
- }

jump_label_lock();
if (atomic_dec_and_test(&key->enabled)) {
--
2.21.0