[PATCH bpf-next 2/2] bpf: fix stackmap overflow check in __bpf_get_stackid()
From: Arnaud Lecomte
Date: Sat Oct 25 2025 - 10:15:35 EST
Syzkaller reported a KASAN slab-out-of-bounds write in __bpf_get_stackid()
when copying stack trace data. The issue occurs when the perf trace
contains more stack entries than the stack map bucket can hold,
leading to an out-of-bounds write in the bucket's data array.
Reported-by: syzbot+c9b724fbb41cf2538b7b@xxxxxxxxxxxxxxxxxxxxxxxxx
Closes: https://syzkaller.appspot.com/bug?extid=c9b724fbb41cf2538b7b
Fixes: ee2a098851bf ("bpf: Adjust BPF stack helper functions to accommodate skip > 0")
Acked-by: Yonghong Song <yonghong.song@xxxxxxxxx>
Acked-by: Song Liu <song@xxxxxxxxxx>
Signed-off-by: Arnaud Lecomte <contact@xxxxxxxxxxxxxx>
---
Changes in v2:
- Fixed max_depth names across get stack id
Changes in v4:
- Removed unnecessary empty line in __bpf_get_stackid
Changes in v6:
- Added back trace_len computation in __bpf_get_stackid
Changes in v7:
- Removed usefull trace->nr assignation in bpf_get_stackid_pe
- Added restoration of trace->nr for both kernel and user traces
in bpf_get_stackid_pe
Changes in v9:
- Fixed variable declarations in bpf_get_stackid_pe
- Added the missing truncate of trace_nr in __bpf_getstackid
Changes in v10:
- Remove not required trace->nr = nr_kernel; in bpf_get_stackid_pe
Link to v9:
https://lore.kernel.org/all/20250912233558.75076-1-contact@xxxxxxxxxxxxxx/
---
---
kernel/bpf/stackmap.c | 15 ++++++++-------
1 file changed, 8 insertions(+), 7 deletions(-)
diff --git a/kernel/bpf/stackmap.c b/kernel/bpf/stackmap.c
index 9a86b5acac10..c0ee51db8eed 100644
--- a/kernel/bpf/stackmap.c
+++ b/kernel/bpf/stackmap.c
@@ -251,8 +251,8 @@ static long __bpf_get_stackid(struct bpf_map *map,
{
struct bpf_stack_map *smap = container_of(map, struct bpf_stack_map, map);
struct stack_map_bucket *bucket, *new_bucket, *old_bucket;
+ u32 hash, id, trace_nr, trace_len, i, max_depth;
u32 skip = flags & BPF_F_SKIP_FIELD_MASK;
- u32 hash, id, trace_nr, trace_len, i;
bool user = flags & BPF_F_USER_STACK;
u64 *ips;
bool hash_matches;
@@ -261,7 +261,8 @@ static long __bpf_get_stackid(struct bpf_map *map,
/* skipping more than usable stack trace */
return -EFAULT;
- trace_nr = trace->nr - skip;
+ max_depth = stack_map_calculate_max_depth(map->value_size, stack_map_data_size(map), flags);
+ trace_nr = min_t(u32, trace->nr - skip, max_depth - skip);
trace_len = trace_nr * sizeof(u64);
ips = trace->ip + skip;
hash = jhash2((u32 *)ips, trace_len / sizeof(u32), 0);
@@ -390,15 +391,11 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
return -EFAULT;
nr_kernel = count_kernel_ip(trace);
+ __u64 nr = trace->nr; /* save original */
if (kernel) {
- __u64 nr = trace->nr;
-
trace->nr = nr_kernel;
ret = __bpf_get_stackid(map, trace, flags);
-
- /* restore nr */
- trace->nr = nr;
} else { /* user */
u64 skip = flags & BPF_F_SKIP_FIELD_MASK;
@@ -409,6 +406,10 @@ BPF_CALL_3(bpf_get_stackid_pe, struct bpf_perf_event_data_kern *, ctx,
flags = (flags & ~BPF_F_SKIP_FIELD_MASK) | skip;
ret = __bpf_get_stackid(map, trace, flags);
}
+
+ /* restore nr */
+ trace->nr = nr;
+
return ret;
}
--
2.47.3