[RFC bpf-next 12/13] bpf: verifier: Make verifier loadable
From: Daniel Xu
Date: Tue Apr 08 2025 - 23:37:42 EST
This commit makes the BPF verifier loadable, with the default being the
same as before (built in). Note that no matter the build configuration,
it is always possible to load a new module (evicting the previous).
Signed-off-by: Daniel Xu <dxu@xxxxxxxxx>
---
include/linux/bpf.h | 15 ++++++++++++---
kernel/bpf/Kconfig | 12 ++++++++++++
kernel/bpf/Makefile | 3 ++-
kernel/bpf/core.c | 4 ++++
kernel/bpf/syscall.c | 45 ++++++++++++++++++++++++++++++++++++++++++-
kernel/bpf/verifier.c | 28 +++++++++++++++++++++++++--
6 files changed, 100 insertions(+), 7 deletions(-)
diff --git a/include/linux/bpf.h b/include/linux/bpf.h
index a5806a7b31d3..127b75ecc532 100644
--- a/include/linux/bpf.h
+++ b/include/linux/bpf.h
@@ -72,6 +72,18 @@ typedef int (*bpf_iter_init_seq_priv_t)(void *private_data,
typedef void (*bpf_iter_fini_seq_priv_t)(void *private_data);
typedef unsigned int (*bpf_func_t)(const void *,
const struct bpf_insn *);
+
+struct bpf_check_hook {
+ struct module *owner;
+ /* verify correctness of eBPF program */
+ int (*bpf_check)(struct bpf_prog **prog,
+ union bpf_attr *attr,
+ bpfptr_t uattr,
+ __u32 uattr_size);
+};
+
+extern const struct bpf_check_hook __rcu *bpf_check;
+
struct bpf_iter_seq_info {
const struct seq_operations *seq_ops;
bpf_iter_init_seq_priv_t init_seq_private;
@@ -2663,9 +2675,6 @@ int bpf_get_file_flag(int flags);
int bpf_check_uarg_tail_zero(bpfptr_t uaddr, size_t expected_size,
size_t actual_size);
-/* verify correctness of eBPF program */
-int bpf_check(struct bpf_prog **fp, union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size);
-
#ifndef CONFIG_BPF_JIT_ALWAYS_ON
void bpf_patch_call_args(struct bpf_insn *insn, u32 stack_depth);
#endif
diff --git a/kernel/bpf/Kconfig b/kernel/bpf/Kconfig
index 17067dcb4386..90745b6e2af1 100644
--- a/kernel/bpf/Kconfig
+++ b/kernel/bpf/Kconfig
@@ -39,6 +39,18 @@ config BPF_SYSCALL
Enable the bpf() system call that allows to manipulate BPF programs
and maps via file descriptors.
+config BPF_VERIFIER
+ tristate "BPF verifier"
+ default y
+ depends on BPF_SYSCALL
+ help
+ Controls if BPF verifier is built as a kernel module or not.
+
+ Regardless of choice, it is possible to dynamically load a new verifier
+ module.
+
+ If you are unsure how to answer this question, answer Y.
+
config BPF_JIT
bool "Enable BPF Just In Time compiler"
depends on BPF
diff --git a/kernel/bpf/Makefile b/kernel/bpf/Makefile
index 70502f038b92..82cf9ea39225 100644
--- a/kernel/bpf/Makefile
+++ b/kernel/bpf/Makefile
@@ -6,11 +6,12 @@ cflags-nogcse-$(CONFIG_X86)$(CONFIG_CC_IS_GCC) := -fno-gcse
endif
CFLAGS_core.o += -Wno-override-init $(cflags-nogcse-yy)
-obj-$(CONFIG_BPF_SYSCALL) += syscall.o verifier.o inode.o helpers.o tnum.o log.o token.o
+obj-$(CONFIG_BPF_SYSCALL) += syscall.o inode.o helpers.o tnum.o log.o token.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_iter.o map_iter.o task_iter.o prog_iter.o link_iter.o
obj-$(CONFIG_BPF_SYSCALL) += hashtab.o arraymap.o percpu_freelist.o bpf_lru_list.o lpm_trie.o map_in_map.o bloom_filter.o
obj-$(CONFIG_BPF_SYSCALL) += local_storage.o queue_stack_maps.o ringbuf.o
obj-$(CONFIG_BPF_SYSCALL) += bpf_local_storage.o bpf_task_storage.o
+obj-$(CONFIG_BPF_VERIFIER) += verifier.o
obj-${CONFIG_BPF_LSM} += bpf_inode_storage.o
obj-$(CONFIG_BPF_SYSCALL) += disasm.o mprog.o
obj-$(CONFIG_BPF_JIT) += trampoline.o
diff --git a/kernel/bpf/core.c b/kernel/bpf/core.c
index 6c8bb4cdac0f..25eac0e2f929 100644
--- a/kernel/bpf/core.c
+++ b/kernel/bpf/core.c
@@ -30,6 +30,7 @@
#include <linux/kallsyms.h>
#include <linux/rcupdate.h>
#include <linux/perf_event.h>
+#include <linux/export.h>
#include <linux/extable.h>
#include <linux/log2.h>
#include <linux/bpf_verifier.h>
@@ -44,6 +45,9 @@
#include <asm/barrier.h>
#include <linux/unaligned.h>
+const struct bpf_check_hook *bpf_check __rcu __read_mostly;
+EXPORT_SYMBOL_GPL(bpf_check);
+
/* Registers */
#define BPF_R0 regs[BPF_REG_0]
#define BPF_R1 regs[BPF_REG_1]
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 2ef55503ba32..7cf65d2c37ee 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -2759,6 +2759,41 @@ static bool is_perfmon_prog_type(enum bpf_prog_type prog_type)
}
}
+static const struct bpf_check_hook *bpf_check_get(void)
+{
+ const struct bpf_check_hook *hook;
+ int err;
+
+ /* RCU protects us from races against module unloading */
+ rcu_read_lock();
+ hook = rcu_dereference(bpf_check);
+ if (!hook) {
+ rcu_read_unlock();
+ err = request_module("verifier");
+ if (err)
+ return ERR_PTR(err < 0 ? err : -ENOENT);
+
+ rcu_read_lock();
+ hook = rcu_dereference(bpf_check);
+ }
+
+ if (hook && try_module_get(hook->owner)) {
+ /* Once we have a refcnt on the module, we no longer need RCU */
+ hook = rcu_pointer_handoff(hook);
+ } else {
+ WARN_ONCE(!hook, "verifier has bad registration");
+ hook = ERR_PTR(-ENOENT);
+ }
+ rcu_read_unlock();
+
+ return hook;
+}
+
+static void bpf_check_put(const struct bpf_check_hook *c)
+{
+ module_put(c->owner);
+}
+
/* last field in 'union bpf_attr' used by this command */
#define BPF_PROG_LOAD_LAST_FIELD fd_array_cnt
@@ -2766,6 +2801,7 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
{
enum bpf_prog_type type = attr->prog_type;
struct bpf_prog *prog, *dst_prog = NULL;
+ const struct bpf_check_hook *hook;
struct btf *attach_btf = NULL;
struct bpf_token *token = NULL;
bool bpf_cap;
@@ -2973,8 +3009,15 @@ static int bpf_prog_load(union bpf_attr *attr, bpfptr_t uattr, u32 uattr_size)
if (err)
goto free_prog_sec;
+ hook = bpf_check_get();
+ if (IS_ERR(hook)) {
+ err = PTR_ERR(hook);
+ goto free_used_maps;
+ }
+
/* run eBPF verifier */
- err = bpf_check(&prog, attr, uattr, uattr_size);
+ err = hook->bpf_check(&prog, attr, uattr, uattr_size);
+ bpf_check_put(hook);
if (err < 0)
goto free_used_maps;
diff --git a/kernel/bpf/verifier.c b/kernel/bpf/verifier.c
index 080cc380e806..1574400a0c76 100644
--- a/kernel/bpf/verifier.c
+++ b/kernel/bpf/verifier.c
@@ -18351,7 +18351,6 @@ static __init int unbound_reg_init(void)
unbound_reg.live |= REG_LIVE_READ;
return 0;
}
-late_initcall(unbound_reg_init);
static bool is_stack_all_misc(struct bpf_verifier_env *env,
struct bpf_stack_state *stack)
@@ -23428,7 +23427,7 @@ static int compute_live_registers(struct bpf_verifier_env *env)
return err;
}
-int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
+static int __bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u32 uattr_size)
{
u64 start_time = ktime_get_ns();
struct bpf_verifier_env *env;
@@ -23695,3 +23694,28 @@ int bpf_check(struct bpf_prog **prog, union bpf_attr *attr, bpfptr_t uattr, __u3
kvfree(env);
return ret;
}
+
+static const struct bpf_check_hook verifier = {
+ .owner = THIS_MODULE,
+ .bpf_check = __bpf_check,
+};
+
+static int __init bpf_verifier_init(void)
+{
+ unbound_reg_init();
+ rcu_assign_pointer(bpf_check, &verifier);
+
+ return 0;
+}
+
+static void __exit bpf_verifier_fini(void)
+{
+ rcu_assign_pointer(bpf_check, NULL);
+}
+
+
+module_init(bpf_verifier_init);
+module_exit(bpf_verifier_fini);
+
+MODULE_LICENSE("GPL");
+MODULE_DESCRIPTION("eBPF verifier");
--
2.47.1