[PATCH bpf-next v1 01/13] bpf: Refactor BPF_EVENT context macros to its own header.

From: KP Singh
Date: Fri Dec 20 2019 - 10:42:10 EST


From: KP Singh <kpsingh@xxxxxxxxxx>

These macros are useful for other program types than tracing.
i.e. KRSI (an upccoming BPF based LSM) which does not use
BPF_PROG_TYPE_TRACE but uses verifiable BTF accesses similar
to raw tracepoints.

Signed-off-by: KP Singh <kpsingh@xxxxxxxxxx>
---
include/linux/bpf_event.h | 78 +++++++++++++++++++++++++++++++++++++++
include/trace/bpf_probe.h | 30 +--------------
kernel/trace/bpf_trace.c | 24 +-----------
3 files changed, 81 insertions(+), 51 deletions(-)
create mode 100644 include/linux/bpf_event.h

diff --git a/include/linux/bpf_event.h b/include/linux/bpf_event.h
new file mode 100644
index 000000000000..353eb1f5a3d0
--- /dev/null
+++ b/include/linux/bpf_event.h
@@ -0,0 +1,78 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+
+
+/*
+ * Copyright (c) 2018 Facebook
+ * Copyright 2019 Google LLC.
+ */
+
+#ifndef _LINUX_BPF_EVENT_H
+#define _LINUX_BPF_EVENT_H
+
+#ifdef CONFIG_BPF_EVENTS
+
+/* cast any integer, pointer, or small struct to u64 */
+#define UINTTYPE(size) \
+ __typeof__(__builtin_choose_expr(size == 1, (u8)1, \
+ __builtin_choose_expr(size == 2, (u16)2, \
+ __builtin_choose_expr(size == 4, (u32)3, \
+ __builtin_choose_expr(size == 8, (u64)4, \
+ (void)5)))))
+#define __CAST_TO_U64(x) ({ \
+ typeof(x) __src = (x); \
+ UINTTYPE(sizeof(x)) __dst; \
+ memcpy(&__dst, &__src, sizeof(__dst)); \
+ (u64)__dst; })
+
+#define __CAST0(...) 0
+#define __CAST1(a, ...) __CAST_TO_U64(a)
+#define __CAST2(a, ...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__)
+#define __CAST3(a, ...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__)
+#define __CAST4(a, ...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__)
+#define __CAST5(a, ...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__)
+#define __CAST6(a, ...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__)
+#define __CAST7(a, ...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__)
+#define __CAST8(a, ...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__)
+#define __CAST9(a, ...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__)
+#define __CAST10(a ,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__)
+#define __CAST11(a, ...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__)
+#define __CAST12(a, ...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__)
+/* tracepoints with more than 12 arguments will hit build error */
+#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
+
+#define UINTTYPE(size) \
+ __typeof__(__builtin_choose_expr(size == 1, (u8)1, \
+ __builtin_choose_expr(size == 2, (u16)2, \
+ __builtin_choose_expr(size == 4, (u32)3, \
+ __builtin_choose_expr(size == 8, (u64)4, \
+ (void)5)))))
+
+#define UNPACK(...) __VA_ARGS__
+#define REPEAT_1(FN, DL, X, ...) FN(X)
+#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
+#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
+#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
+#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
+#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
+#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
+#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
+#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
+#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
+#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
+#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
+#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
+
+#define SARG(X) u64 arg##X
+#ifdef COPY
+#undef COPY
+#endif
+
+#define COPY(X) args[X] = arg##X
+#define __DL_COM (,)
+#define __DL_SEM (;)
+
+#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
+
+#endif
+#endif /* _LINUX_BPF_EVENT_H */
+
diff --git a/include/trace/bpf_probe.h b/include/trace/bpf_probe.h
index b04c29270973..5165dbc66098 100644
--- a/include/trace/bpf_probe.h
+++ b/include/trace/bpf_probe.h
@@ -1,5 +1,7 @@
/* SPDX-License-Identifier: GPL-2.0 */

+#include <linux/bpf_event.h>
+
#undef TRACE_SYSTEM_VAR

#ifdef CONFIG_BPF_EVENTS
@@ -27,34 +29,6 @@
#undef __perf_task
#define __perf_task(t) (t)

-/* cast any integer, pointer, or small struct to u64 */
-#define UINTTYPE(size) \
- __typeof__(__builtin_choose_expr(size == 1, (u8)1, \
- __builtin_choose_expr(size == 2, (u16)2, \
- __builtin_choose_expr(size == 4, (u32)3, \
- __builtin_choose_expr(size == 8, (u64)4, \
- (void)5)))))
-#define __CAST_TO_U64(x) ({ \
- typeof(x) __src = (x); \
- UINTTYPE(sizeof(x)) __dst; \
- memcpy(&__dst, &__src, sizeof(__dst)); \
- (u64)__dst; })
-
-#define __CAST1(a,...) __CAST_TO_U64(a)
-#define __CAST2(a,...) __CAST_TO_U64(a), __CAST1(__VA_ARGS__)
-#define __CAST3(a,...) __CAST_TO_U64(a), __CAST2(__VA_ARGS__)
-#define __CAST4(a,...) __CAST_TO_U64(a), __CAST3(__VA_ARGS__)
-#define __CAST5(a,...) __CAST_TO_U64(a), __CAST4(__VA_ARGS__)
-#define __CAST6(a,...) __CAST_TO_U64(a), __CAST5(__VA_ARGS__)
-#define __CAST7(a,...) __CAST_TO_U64(a), __CAST6(__VA_ARGS__)
-#define __CAST8(a,...) __CAST_TO_U64(a), __CAST7(__VA_ARGS__)
-#define __CAST9(a,...) __CAST_TO_U64(a), __CAST8(__VA_ARGS__)
-#define __CAST10(a,...) __CAST_TO_U64(a), __CAST9(__VA_ARGS__)
-#define __CAST11(a,...) __CAST_TO_U64(a), __CAST10(__VA_ARGS__)
-#define __CAST12(a,...) __CAST_TO_U64(a), __CAST11(__VA_ARGS__)
-/* tracepoints with more than 12 arguments will hit build error */
-#define CAST_TO_U64(...) CONCATENATE(__CAST, COUNT_ARGS(__VA_ARGS__))(__VA_ARGS__)
-
#undef DECLARE_EVENT_CLASS
#define DECLARE_EVENT_CLASS(call, proto, args, tstruct, assign, print) \
static notrace void \
diff --git a/kernel/trace/bpf_trace.c b/kernel/trace/bpf_trace.c
index ffc91d4935ac..3fb02fe799ab 100644
--- a/kernel/trace/bpf_trace.c
+++ b/kernel/trace/bpf_trace.c
@@ -7,6 +7,7 @@
#include <linux/slab.h>
#include <linux/bpf.h>
#include <linux/bpf_perf_event.h>
+#include <linux/bpf_event.h>
#include <linux/filter.h>
#include <linux/uaccess.h>
#include <linux/ctype.h>
@@ -1461,29 +1462,6 @@ void __bpf_trace_run(struct bpf_prog *prog, u64 *args)
rcu_read_unlock();
}

-#define UNPACK(...) __VA_ARGS__
-#define REPEAT_1(FN, DL, X, ...) FN(X)
-#define REPEAT_2(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_1(FN, DL, __VA_ARGS__)
-#define REPEAT_3(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_2(FN, DL, __VA_ARGS__)
-#define REPEAT_4(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_3(FN, DL, __VA_ARGS__)
-#define REPEAT_5(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_4(FN, DL, __VA_ARGS__)
-#define REPEAT_6(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_5(FN, DL, __VA_ARGS__)
-#define REPEAT_7(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_6(FN, DL, __VA_ARGS__)
-#define REPEAT_8(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_7(FN, DL, __VA_ARGS__)
-#define REPEAT_9(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_8(FN, DL, __VA_ARGS__)
-#define REPEAT_10(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_9(FN, DL, __VA_ARGS__)
-#define REPEAT_11(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_10(FN, DL, __VA_ARGS__)
-#define REPEAT_12(FN, DL, X, ...) FN(X) UNPACK DL REPEAT_11(FN, DL, __VA_ARGS__)
-#define REPEAT(X, FN, DL, ...) REPEAT_##X(FN, DL, __VA_ARGS__)
-
-#define SARG(X) u64 arg##X
-#define COPY(X) args[X] = arg##X
-
-#define __DL_COM (,)
-#define __DL_SEM (;)
-
-#define __SEQ_0_11 0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11
-
#define BPF_TRACE_DEFN_x(x) \
void bpf_trace_run##x(struct bpf_prog *prog, \
REPEAT(x, SARG, __DL_COM, __SEQ_0_11)) \
--
2.20.1