[PATCH] perf-stat: skip evlist__[enable|disable] when all events u= BPF

From: Song Liu
Date: Wed Apr 28 2021 - 20:41:28 EST


When all events of a perf-stat session use BPF, it is not necessary to
call evlist__enable() and evlist__disable(). Skip them when
all_counters_use_bpf is true.

Signed-off-by: Song Liu <song@xxxxxxxxxx>
---
tools/perf/builtin-stat.c | 12 +++++++++---
tools/perf/util/evlist.c | 3 ---
2 files changed, 9 insertions(+), 6 deletions(-)

diff --git a/tools/perf/builtin-stat.c b/tools/perf/builtin-stat.c
index 5a830ae09418e..44459e0352fda 100644
--- a/tools/perf/builtin-stat.c
+++ b/tools/perf/builtin-stat.c
@@ -572,7 +572,8 @@ static int enable_counters(void)
* - we have initial delay configured
*/
if (!target__none(&target) || stat_config.initial_delay) {
- evlist__enable(evsel_list);
+ if (!all_counters_use_bpf)
+ evlist__enable(evsel_list);
if (stat_config.initial_delay > 0)
pr_info(EVLIST_ENABLED_MSG);
}
@@ -581,13 +582,18 @@ static int enable_counters(void)

static void disable_counters(void)
{
+ struct evsel *counter;
/*
* If we don't have tracee (attaching to task or cpu), counters may
* still be running. To get accurate group ratios, we must stop gro=
ups
* from counting before reading their constituent counters.
*/
- if (!target__none(&target))
- evlist__disable(evsel_list);
+ if (!target__none(&target)) {
+ evlist__for_each_entry(evsel_list, counter)
+ bpf_counter__disable(counter);
+ if (!all_counters_use_bpf)
+ evlist__disable(evsel_list);
+ }
}

static volatile int workload_exec_errno;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index 6e5c41528c7d0..6ea3e677dc1e7 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -425,9 +425,6 @@ static void __evlist__disable(struct evlist *evlist, ch=
ar *evsel_name)
if (affinity__setup(&affinity) < 0)
return;

- evlist__for_each_entry(evlist, pos)
- bpf_counter__disable(pos);
-
/* Disable 'immediate' events last */
for (imm =3D 0; imm <=3D 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
--
2.30.2

=20