[PATCH v4 perf,bpf 06/15] perf, bpf: save bpf_prog_info in a rbtree in perf_env
From: Song Liu
Date: Mon Feb 25 2019 - 19:22:22 EST
bpf_prog_info contains information necessary to annotate bpf programs.
This patch saves bpf_prog_info for bpf programs loaded in the system.
Signed-off-by: Song Liu <songliubraving@xxxxxx>
---
tools/perf/util/bpf-event.c | 32 +++++++++++++-
tools/perf/util/bpf-event.h | 7 ++-
tools/perf/util/env.c | 85 +++++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 17 ++++++++
4 files changed, 139 insertions(+), 2 deletions(-)
diff --git a/tools/perf/util/bpf-event.c b/tools/perf/util/bpf-event.c
index ff7ee149ec46..ce81b2c43a51 100644
--- a/tools/perf/util/bpf-event.c
+++ b/tools/perf/util/bpf-event.c
@@ -10,6 +10,7 @@
#include "debug.h"
#include "symbol.h"
#include "machine.h"
+#include "env.h"
#include "session.h"
#define ptr_to_u64(ptr) ((__u64)(unsigned long)(ptr))
@@ -54,17 +55,28 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
struct bpf_event *bpf_event = &event->bpf_event;
struct bpf_prog_info_linear *info_linear;
struct perf_tool *tool = session->tool;
+ struct bpf_prog_info_node *info_node;
struct bpf_prog_info *info;
struct btf *btf = NULL;
bool has_btf = false;
+ struct perf_env *env;
u32 sub_prog_cnt, i;
int err = 0;
u64 arrays;
+ /*
+ * for perf-record and perf-report use header.env;
+ * otherwise, use global perf_env.
+ */
+ env = session->data ? &session->header.env : &perf_env;
+
arrays = 1UL << BPF_PROG_INFO_JITED_KSYMS;
arrays |= 1UL << BPF_PROG_INFO_JITED_FUNC_LENS;
arrays |= 1UL << BPF_PROG_INFO_FUNC_INFO;
arrays |= 1UL << BPF_PROG_INFO_PROG_TAGS;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_INSNS;
+ arrays |= 1UL << BPF_PROG_INFO_LINE_INFO;
+ arrays |= 1UL << BPF_PROG_INFO_JITED_LINE_INFO;
info_linear = bpf_program__get_prog_info_linear(fd, arrays);
if (IS_ERR_OR_NULL(info_linear)) {
@@ -153,8 +165,8 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
machine, process);
}
- /* Synthesize PERF_RECORD_BPF_EVENT */
if (opts->bpf_event) {
+ /* Synthesize PERF_RECORD_BPF_EVENT */
*bpf_event = (struct bpf_event){
.header = {
.type = PERF_RECORD_BPF_EVENT,
@@ -167,6 +179,24 @@ static int perf_event__synthesize_one_bpf_prog(struct perf_session *session,
memcpy(bpf_event->tag, info->tag, BPF_TAG_SIZE);
memset((void *)event + event->header.size, 0, machine->id_hdr_size);
event->header.size += machine->id_hdr_size;
+
+ /* save bpf_prog_info to env */
+ info_node = malloc(sizeof(struct bpf_prog_info_node));
+
+ /*
+ * Do not bail out for !info_node, as we still want to
+ * call perf_tool__process_synth_event()
+ */
+ if (info_node) {
+ info_node->info_linear = info_linear;
+ perf_env__insert_bpf_prog_info(env, info_node);
+ info_linear = NULL;
+ }
+
+ /*
+ * process after saving bpf_prog_info to env, so that
+ * required information is ready for look up
+ */
err = perf_tool__process_synth_event(tool, event,
machine, process);
}
diff --git a/tools/perf/util/bpf-event.h b/tools/perf/util/bpf-event.h
index 6698683612a7..fad932f7404f 100644
--- a/tools/perf/util/bpf-event.h
+++ b/tools/perf/util/bpf-event.h
@@ -3,14 +3,19 @@
#define __PERF_BPF_EVENT_H
#include <linux/compiler.h>
+#include <linux/rbtree.h>
#include "event.h"
struct machine;
union perf_event;
struct perf_sample;
-struct perf_tool;
struct record_opts;
+struct bpf_prog_info_node {
+ struct bpf_prog_info_linear *info_linear;
+ struct rb_node rb_node;
+};
+
#ifdef HAVE_LIBBPF_SUPPORT
int machine__process_bpf_event(struct machine *machine, union perf_event *event,
struct perf_sample *sample);
diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 4c23779e271a..650c14ad7e9b 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -3,15 +3,93 @@
#include "env.h"
#include "sane_ctype.h"
#include "util.h"
+#include "bpf-event.h"
#include <errno.h>
#include <sys/utsname.h>
+#include <bpf/libbpf.h>
struct perf_env perf_env;
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node)
+{
+ __u32 prog_id = info_node->info_linear->info.id;
+ struct bpf_prog_info_node *node;
+ struct rb_node *parent = NULL;
+ struct rb_node **p;
+
+ down_write(&env->bpf_progs.lock);
+ p = &env->bpf_progs.prog_infos.rb_node;
+
+ while (*p != NULL) {
+ parent = *p;
+ node = rb_entry(parent, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id) {
+ p = &(*p)->rb_left;
+ } else if (prog_id > node->info_linear->info.id) {
+ p = &(*p)->rb_right;
+ } else {
+ pr_debug("duplicated bpf prog info %u\n", prog_id);
+ goto out;
+ }
+ }
+
+ rb_link_node(&info_node->rb_node, parent, p);
+ rb_insert_color(&info_node->rb_node, &env->bpf_progs.prog_infos);
+out:
+ up_write(&env->bpf_progs.lock);
+}
+
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id)
+{
+ struct bpf_prog_info_node *node = NULL;
+ struct rb_node *n;
+
+ down_read(&env->bpf_progs.lock);
+ n = env->bpf_progs.prog_infos.rb_node;
+
+ while (n) {
+ node = rb_entry(n, struct bpf_prog_info_node, rb_node);
+ if (prog_id < node->info_linear->info.id)
+ n = n->rb_left;
+ else if (prog_id > node->info_linear->info.id)
+ n = n->rb_right;
+ else
+ break;
+ }
+
+ up_read(&env->bpf_progs.lock);
+ return node;
+}
+
+/* purge data in bpf_prog_infos tree */
+static void perf_env__purge_bpf(struct perf_env *env)
+{
+ struct rb_root *root;
+ struct rb_node *next;
+
+ down_write(&env->bpf_progs.lock);
+
+ root = &env->bpf_progs.prog_infos;
+ next = rb_first(root);
+
+ while (next) {
+ struct bpf_prog_info_node *node;
+
+ node = rb_entry(next, struct bpf_prog_info_node, rb_node);
+ next = rb_next(&node->rb_node);
+ rb_erase_init(&node->rb_node, root);
+ free(node);
+ }
+ up_write(&env->bpf_progs.lock);
+}
+
void perf_env__exit(struct perf_env *env)
{
int i;
+ perf_env__purge_bpf(env);
zfree(&env->hostname);
zfree(&env->os_release);
zfree(&env->version);
@@ -38,6 +116,12 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->memory_nodes);
}
+static void init_bpf_rb_trees(struct perf_env *env)
+{
+ env->bpf_progs.prog_infos = RB_ROOT;
+ init_rwsem(&env->bpf_progs.lock);
+}
+
int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
{
int i;
@@ -59,6 +143,7 @@ int perf_env__set_cmdline(struct perf_env *env, int argc, const char *argv[])
env->nr_cmdline = argc;
+ init_bpf_rb_trees(env);
return 0;
out_free:
zfree(&env->cmdline_argv);
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d01b8355f4ca..33ef4b2d2a29 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -3,7 +3,9 @@
#define __PERF_ENV_H
#include <linux/types.h>
+#include <linux/rbtree.h>
#include "cpumap.h"
+#include "rwsem.h"
struct cpu_topology_map {
int socket_id;
@@ -64,8 +66,19 @@ struct perf_env {
struct memory_node *memory_nodes;
unsigned long long memory_bsize;
u64 clockid_res_ns;
+
+ /*
+ * bpf_info_lock protects bpf rbtrees. This is needed because the
+ * trees are accessed by different threads in perf-top
+ */
+ struct {
+ struct rw_semaphore lock;
+ struct rb_root prog_infos;
+ } bpf_progs;
};
+struct bpf_prog_info_node;
+
extern struct perf_env perf_env;
void perf_env__exit(struct perf_env *env);
@@ -80,4 +93,8 @@ const char *perf_env__arch(struct perf_env *env);
const char *perf_env__raw_arch(struct perf_env *env);
int perf_env__nr_cpus_avail(struct perf_env *env);
+void perf_env__insert_bpf_prog_info(struct perf_env *env,
+ struct bpf_prog_info_node *info_node);
+struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
+ __u32 prog_id);
#endif /* __PERF_ENV_H */
--
2.17.1