[PATCH 2/3] perf tools: Add perf_env__numa_node function

From: Jiri Olsa
Date: Wed Sep 04 2019 - 03:34:26 EST


To speed up cpu to node lookup, adding perf_env__numa_node
function, that creates cpu array on the first lookup, that
holds numa nodes for each stored cpu.

Link: http://lkml.kernel.org/n/tip-qqwxklhissf3yjyuaszh6480@xxxxxxxxxxxxxx
Signed-off-by: Jiri Olsa <jolsa@xxxxxxxxxx>
---
tools/perf/util/env.c | 40 ++++++++++++++++++++++++++++++++++++++++
tools/perf/util/env.h | 6 ++++++
2 files changed, 46 insertions(+)

diff --git a/tools/perf/util/env.c b/tools/perf/util/env.c
index 3baca06786fb..ee53e89a9535 100644
--- a/tools/perf/util/env.c
+++ b/tools/perf/util/env.c
@@ -179,6 +179,7 @@ void perf_env__exit(struct perf_env *env)
zfree(&env->sibling_threads);
zfree(&env->pmu_mappings);
zfree(&env->cpu);
+ zfree(&env->numa_map);

for (i = 0; i < env->nr_numa_nodes; i++)
perf_cpu_map__put(env->numa_nodes[i].map);
@@ -338,3 +339,42 @@ const char *perf_env__arch(struct perf_env *env)

return normalize_arch(arch_name);
}
+
+
+int perf_env__numa_node(struct perf_env *env, int cpu)
+{
+ if (!env->nr_numa_map) {
+ struct numa_node *nn;
+ int i, nr = 0;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ nn = &env->numa_nodes[i];
+ nr = max(nr, perf_cpu_map__max(nn->map));
+ }
+
+ nr++;
+
+ /*
+ * We initialize the numa_map array to prepare
+ * it for missing cpus, which return node -1.
+ */
+ env->numa_map = malloc(nr * sizeof(int));
+ if (!env->numa_map)
+ return -1;
+
+ for (i = 0; i < nr; i++)
+ env->numa_map[i] = -1;
+
+ env->nr_numa_map = nr;
+
+ for (i = 0; i < env->nr_numa_nodes; i++) {
+ int tmp, j;
+
+ nn = &env->numa_nodes[i];
+ perf_cpu_map__for_each_cpu(j, tmp, nn->map)
+ env->numa_map[j] = i;
+ }
+ }
+
+ return cpu >= 0 && cpu < env->nr_numa_map ? env->numa_map[cpu] : -1;
+}
diff --git a/tools/perf/util/env.h b/tools/perf/util/env.h
index d8e083d42610..777008f8007a 100644
--- a/tools/perf/util/env.h
+++ b/tools/perf/util/env.h
@@ -86,6 +86,10 @@ struct perf_env {
struct rb_root btfs;
u32 btfs_cnt;
} bpf_progs;
+
+ /* For fast cpu to numa node lookup via perf_env__numa_node */
+ int *numa_map;
+ int nr_numa_map;
};

enum perf_compress_type {
@@ -118,4 +122,6 @@ struct bpf_prog_info_node *perf_env__find_bpf_prog_info(struct perf_env *env,
__u32 prog_id);
void perf_env__insert_btf(struct perf_env *env, struct btf_node *btf_node);
struct btf_node *perf_env__find_btf(struct perf_env *env, __u32 btf_id);
+
+int perf_env__numa_node(struct perf_env *env, int cpu);
#endif /* __PERF_ENV_H */
--
2.21.0