This patch adds the flag sample_disable to control the trace data...
output process when perf sampling. By setting this flag and
integrating with ebpf, we can control the data output process and
get the samples we are most interested in.
The bpf helper bpf_perf_event_sample_control() can control the
perf_event on current cpu.
Signed-off-by: Kaixu Xia <xiakaixu@xxxxxxxxxx>
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -6337,6 +6337,9 @@ static int __perf_event_overflow(struct perf_event *event,
irq_work_queue(&event->pending);
}
+ if (!atomic_read(&event->sample_disable))
+ return ret;
+
if (event->overflow_handler)
event->overflow_handler(event, data, regs);
else
@@ -7709,6 +7712,14 @@ static void account_event(struct perf_event *event)
account_event_cpu(event, event->cpu);
}
+static void perf_event_check_sample_flag(struct perf_event *event)
+{
+ if (event->attr.sample_disable == 1)
+ atomic_set(&event->sample_disable, 0);
+ else
+ atomic_set(&event->sample_disable, 1);
+}
+static u64 bpf_perf_event_sample_control(u64 r1, u64 index, u64 flag, u64 r4, u64 r5)
+{
+ struct bpf_map *map = (struct bpf_map *) (unsigned long) r1;
+ struct bpf_array *array = container_of(map, struct bpf_array, map);
+ struct perf_event *event;
+
+ if (unlikely(index >= array->map.max_entries))
+ return -E2BIG;
+
+ event = (struct perf_event *)array->ptrs[index];
+ if (!event)
+ return -ENOENT;
+
+ if (flag)
+ atomic_dec(&event->sample_disable);
+ else
+ atomic_inc(&event->sample_disable);
+const struct bpf_func_proto bpf_perf_event_sample_control_proto = {