[PATCH bpf-next v10 6/8] bpf: Add syscall common attributes support for map_create

From: Leon Hwang

Date: Wed Feb 11 2026 - 10:17:17 EST


Many BPF_MAP_CREATE validation failures currently return -EINVAL without
any explanation to userspace.

Plumb common syscall log attributes into map_create(), create a verifier
log from bpf_common_attr::log_buf/log_size/log_level, and report
map-creation failure reasons through that buffer.

This improves debuggability by allowing userspace to inspect why map
creation failed and read back log_true_size from common attributes.

Signed-off-by: Leon Hwang <leon.hwang@xxxxxxxxx>
---
include/linux/bpf_verifier.h | 3 ++
kernel/bpf/log.c | 30 +++++++++++++++++
kernel/bpf/syscall.c | 65 ++++++++++++++++++++++++++++++------
3 files changed, 88 insertions(+), 10 deletions(-)

diff --git a/include/linux/bpf_verifier.h b/include/linux/bpf_verifier.h
index 34f28d40022a..52627a93338f 100644
--- a/include/linux/bpf_verifier.h
+++ b/include/linux/bpf_verifier.h
@@ -645,6 +645,9 @@ struct bpf_log_attr {
int bpf_log_attr_init(struct bpf_log_attr *log, u64 log_buf, u32 log_size, u32 log_level,
u32 __user *log_true_size, struct bpf_common_attr *common, bpfptr_t uattr,
u32 size);
+struct bpf_verifier_log *bpf_log_attr_create_vlog(struct bpf_log_attr *attr_log,
+ struct bpf_common_attr *common, bpfptr_t uattr,
+ u32 size);
int bpf_log_attr_finalize(struct bpf_log_attr *attr, struct bpf_verifier_log *log);

#define BPF_MAX_SUBPROGS 256
diff --git a/kernel/bpf/log.c b/kernel/bpf/log.c
index a2b41bf5e9cb..f7251217452e 100644
--- a/kernel/bpf/log.c
+++ b/kernel/bpf/log.c
@@ -904,6 +904,36 @@ int bpf_log_attr_init(struct bpf_log_attr *log, u64 log_buf, u32 log_size, u32 l
return 0;
}

+
+struct bpf_verifier_log *bpf_log_attr_create_vlog(struct bpf_log_attr *attr_log,
+ struct bpf_common_attr *common, bpfptr_t uattr,
+ u32 size)
+{
+ struct bpf_verifier_log *log;
+ int err;
+
+ memset(attr_log, 0, sizeof(*attr_log));
+ if (size >= offsetofend(struct bpf_common_attr, log_true_size))
+ attr_log->log_true_size = uattr.user + offsetof(struct bpf_common_attr,
+ log_true_size);
+
+ if (!common->log_buf)
+ return NULL;
+
+ log = kzalloc_obj(*log, GFP_KERNEL);
+ if (!log)
+ return ERR_PTR(-ENOMEM);
+
+ err = bpf_vlog_init(log, common->log_level, u64_to_user_ptr(common->log_buf),
+ common->log_size);
+ if (err) {
+ kfree(log);
+ return ERR_PTR(err);
+ }
+
+ return log;
+}
+
int bpf_log_attr_finalize(struct bpf_log_attr *attr, struct bpf_verifier_log *log)
{
u32 log_true_size;
diff --git a/kernel/bpf/syscall.c b/kernel/bpf/syscall.c
index 6d727a4b5121..04bb4a905559 100644
--- a/kernel/bpf/syscall.c
+++ b/kernel/bpf/syscall.c
@@ -1365,7 +1365,7 @@ static int map_check_btf(struct bpf_map *map, struct bpf_token *token,

#define BPF_MAP_CREATE_LAST_FIELD excl_prog_hash_size
/* called via syscall */
-static int map_create(union bpf_attr *attr, bpfptr_t uattr)
+static int __map_create(union bpf_attr *attr, bpfptr_t uattr, struct bpf_verifier_log *log)
{
const struct bpf_map_ops *ops;
struct bpf_token *token = NULL;
@@ -1377,8 +1377,10 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)
int err;

err = CHECK_ATTR(BPF_MAP_CREATE);
- if (err)
+ if (err) {
+ bpf_log(log, "Invalid attr.\n");
return -EINVAL;
+ }

/* check BPF_F_TOKEN_FD flag, remember if it's set, and then clear it
* to avoid per-map type checks tripping on unknown flag
@@ -1387,17 +1389,25 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)
attr->map_flags &= ~BPF_F_TOKEN_FD;

if (attr->btf_vmlinux_value_type_id) {
- if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS ||
- attr->btf_key_type_id || attr->btf_value_type_id)
+ if (attr->map_type != BPF_MAP_TYPE_STRUCT_OPS) {
+ bpf_log(log, "btf_vmlinux_value_type_id can only be used with struct_ops maps.\n");
return -EINVAL;
+ }
+ if (attr->btf_key_type_id || attr->btf_value_type_id) {
+ bpf_log(log, "btf_vmlinux_value_type_id is mutually exclusive with btf_key_type_id and btf_value_type_id.\n");
+ return -EINVAL;
+ }
} else if (attr->btf_key_type_id && !attr->btf_value_type_id) {
+ bpf_log(log, "Invalid btf_value_type_id.\n");
return -EINVAL;
}

if (attr->map_type != BPF_MAP_TYPE_BLOOM_FILTER &&
attr->map_type != BPF_MAP_TYPE_ARENA &&
- attr->map_extra != 0)
+ attr->map_extra != 0) {
+ bpf_log(log, "Invalid map_extra.\n");
return -EINVAL;
+ }

f_flags = bpf_get_file_flag(attr->map_flags);
if (f_flags < 0)
@@ -1405,13 +1415,17 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)

if (numa_node != NUMA_NO_NODE &&
((unsigned int)numa_node >= nr_node_ids ||
- !node_online(numa_node)))
+ !node_online(numa_node))) {
+ bpf_log(log, "Invalid numa_node.\n");
return -EINVAL;
+ }

/* find map type and init map: hashtable vs rbtree vs bloom vs ... */
map_type = attr->map_type;
- if (map_type >= ARRAY_SIZE(bpf_map_types))
+ if (map_type >= ARRAY_SIZE(bpf_map_types)) {
+ bpf_log(log, "Invalid map_type.\n");
return -EINVAL;
+ }
map_type = array_index_nospec(map_type, ARRAY_SIZE(bpf_map_types));
ops = bpf_map_types[map_type];
if (!ops)
@@ -1429,8 +1443,10 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)

if (token_flag) {
token = bpf_token_get_from_fd(attr->map_token_fd);
- if (IS_ERR(token))
+ if (IS_ERR(token)) {
+ bpf_log(log, "Invalid map_token_fd.\n");
return PTR_ERR(token);
+ }

/* if current token doesn't grant map creation permissions,
* then we can't use this token, so ignore it and rely on
@@ -1513,8 +1529,10 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)

err = bpf_obj_name_cpy(map->name, attr->map_name,
sizeof(attr->map_name));
- if (err < 0)
+ if (err < 0) {
+ bpf_log(log, "Invalid map_name.\n");
goto free_map;
+ }

preempt_disable();
map->cookie = gen_cookie_next(&bpf_map_cookie);
@@ -1537,6 +1555,7 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)

btf = btf_get_by_fd(attr->btf_fd);
if (IS_ERR(btf)) {
+ bpf_log(log, "Invalid btf_fd.\n");
err = PTR_ERR(btf);
goto free_map;
}
@@ -1564,6 +1583,7 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)
bpfptr_t uprog_hash = make_bpfptr(attr->excl_prog_hash, uattr.is_kernel);

if (attr->excl_prog_hash_size != SHA256_DIGEST_SIZE) {
+ bpf_log(log, "Invalid excl_prog_hash_size.\n");
err = -EINVAL;
goto free_map;
}
@@ -1579,6 +1599,7 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)
goto free_map;
}
} else if (attr->excl_prog_hash_size) {
+ bpf_log(log, "Invalid excl_prog_hash_size.\n");
err = -EINVAL;
goto free_map;
}
@@ -1617,6 +1638,30 @@ static int map_create(union bpf_attr *attr, bpfptr_t uattr)
return err;
}

+static int map_create(union bpf_attr *attr, bpfptr_t uattr, struct bpf_common_attr *attr_common,
+ bpfptr_t uattr_common, u32 size_common)
+{
+ struct bpf_verifier_log *log;
+ struct bpf_log_attr attr_log;
+ int err, ret;
+
+ log = bpf_log_attr_create_vlog(&attr_log, attr_common, uattr_common, size_common);
+ if (IS_ERR(log))
+ return PTR_ERR(log);
+
+ err = __map_create(attr, uattr, log);
+ if (err >= 0)
+ goto free;
+
+ ret = bpf_log_attr_finalize(&attr_log, log);
+ if (ret)
+ err = ret;
+
+free:
+ kfree(log);
+ return err;
+}
+
void bpf_map_inc(struct bpf_map *map)
{
atomic64_inc(&map->refcnt);
@@ -6226,7 +6271,7 @@ static int __sys_bpf(enum bpf_cmd cmd, bpfptr_t uattr, unsigned int size,

switch (cmd) {
case BPF_MAP_CREATE:
- err = map_create(&attr, uattr);
+ err = map_create(&attr, uattr, &attr_common, uattr_common, size_common);
break;
case BPF_MAP_LOOKUP_ELEM:
err = map_lookup_elem(&attr);
--
2.52.0