[PATCH rfc 3/6] bpf: sched: introduce bpf_sched_enable()

From: Roman Gushchin
Date: Thu Sep 16 2021 - 12:34:07 EST


Introduce a dedicated static key and the bpf_sched_enabled() wrapper
to guard all invocations of bpf programs in the scheduler code.

It will help to avoid any potential performance regression in a case
when no scheduler bpf programs are attached.

Signed-off-by: Roman Gushchin <guro@xxxxxx>
---
include/linux/bpf_sched.h | 24 ++++++++++++++++++++++++
kernel/bpf/syscall.c | 7 +++++++
kernel/sched/bpf_sched.c | 2 ++
3 files changed, 33 insertions(+)

diff --git a/include/linux/bpf_sched.h b/include/linux/bpf_sched.h
index 0f8d3dae53df..6e773aecdff7 100644
--- a/include/linux/bpf_sched.h
+++ b/include/linux/bpf_sched.h
@@ -6,6 +6,8 @@

#ifdef CONFIG_BPF_SYSCALL

+#include <linux/jump_label.h>
+
#define BPF_SCHED_HOOK(RET, DEFAULT, NAME, ...) \
RET bpf_sched_##NAME(__VA_ARGS__);
#include <linux/sched_hook_defs.h>
@@ -14,6 +16,23 @@
int bpf_sched_verify_prog(struct bpf_verifier_log *vlog,
const struct bpf_prog *prog);

+DECLARE_STATIC_KEY_FALSE(bpf_sched_enabled_key);
+
+static inline bool bpf_sched_enabled(void)
+{
+ return static_branch_unlikely(&bpf_sched_enabled_key);
+}
+
+static inline void bpf_sched_inc(void)
+{
+ static_branch_inc(&bpf_sched_enabled_key);
+}
+
+static inline void bpf_sched_dec(void)
+{
+ static_branch_dec(&bpf_sched_enabled_key);
+}
+
#else /* CONFIG_BPF_SYSCALL */

#define BPF_SCHED_HOOK(RET, DEFAULT, NAME, ...) \
@@ -23,6 +42,11 @@ static inline RET bpf_sched_##NAME(__VA_ARGS__) \
}
#undef BPF_SCHED_HOOK

+static inline bool bpf_sched_enabled(void)
+{
+ return false;
+}
+
#endif /* CONFIG_BPF_SYSCALL */

#endif /* _BPF_CGROUP_H */
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 67e062376f22..aa5565110498 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -31,6 +31,7 @@
#include <linux/bpf-netns.h>
#include <linux/rcupdate_trace.h>
#include <linux/memcontrol.h>
+#include <linux/bpf_sched.h>

#define IS_FD_ARRAY(map) ((map)->map_type == BPF_MAP_TYPE_PERF_EVENT_ARRAY || \
(map)->map_type == BPF_MAP_TYPE_CGROUP_ARRAY || \
@@ -2602,6 +2603,9 @@ static void bpf_tracing_link_release(struct bpf_link *link)
struct bpf_tracing_link *tr_link =
container_of(link, struct bpf_tracing_link, link);

+ if (link->prog->type == BPF_PROG_TYPE_SCHED)
+ bpf_sched_dec();
+
WARN_ON_ONCE(bpf_trampoline_unlink_prog(link->prog,
tr_link->trampoline));

@@ -2804,6 +2808,9 @@ static int bpf_tracing_prog_attach(struct bpf_prog *prog,
goto out_unlock;
}

+ if (prog->type == BPF_PROG_TYPE_SCHED)
+ bpf_sched_inc();
+
link->tgt_prog = tgt_prog;
link->trampoline = tr;

diff --git a/kernel/sched/bpf_sched.c b/kernel/sched/bpf_sched.c
index ead691dc6e85..bf92cfb5ecf4 100644
--- a/kernel/sched/bpf_sched.c
+++ b/kernel/sched/bpf_sched.c
@@ -6,6 +6,8 @@
#include <linux/btf_ids.h>
#include "sched.h"

+DEFINE_STATIC_KEY_FALSE(bpf_sched_enabled_key);
+
/*
* For every hook declare a nop function where a BPF program can be attached.
*/
--
2.31.1