[PATCH v5 5/5] perf-stat: introduce bpf_counter_ops->disable()

From: Song Liu
Date: Sun Apr 25 2021 - 17:44:41 EST


Introduce bpf_counter_ops->disable(), which is used stop counting the
event.

Signed-off-by: Song Liu <song@xxxxxxxxxx>
---
tools/perf/util/bpf_counter.c | 26 ++++++++++++++++++++++++++
tools/perf/util/bpf_counter.h | 7 +++++++
tools/perf/util/evlist.c | 4 ++++
3 files changed, 37 insertions(+)

diff --git a/tools/perf/util/bpf_counter.c b/tools/perf/util/bpf_counter.c
index f179f57430253..ddb52f748c8e8 100644
--- a/tools/perf/util/bpf_counter.c
+++ b/tools/perf/util/bpf_counter.c
@@ -215,6 +215,17 @@ static int bpf_program_profiler__enable(struct evsel *evsel)
return 0;
}

+static int bpf_program_profiler__disable(struct evsel *evsel)
+{
+ struct bpf_counter *counter;
+
+ list_for_each_entry(counter, &evsel->bpf_counter_list, list) {
+ assert(counter->skel != NULL);
+ bpf_prog_profiler_bpf__detach(counter->skel);
+ }
+ return 0;
+}
+
static int bpf_program_profiler__read(struct evsel *evsel)
{
// perf_cpu_map uses /sys/devices/system/cpu/online
@@ -280,6 +291,7 @@ static int bpf_program_profiler__install_pe(struct evsel *evsel, int cpu,
struct bpf_counter_ops bpf_program_profiler_ops = {
.load = bpf_program_profiler__load,
.enable = bpf_program_profiler__enable,
+ .disable = bpf_program_profiler__disable,
.read = bpf_program_profiler__read,
.destroy = bpf_program_profiler__destroy,
.install_pe = bpf_program_profiler__install_pe,
@@ -627,6 +639,12 @@ static int bperf__enable(struct evsel *evsel)
return 0;
}

+static int bperf__disable(struct evsel *evsel)
+{
+ evsel->follower_skel->bss->enabled = 0;
+ return 0;
+}
+
static int bperf__read(struct evsel *evsel)
{
struct bperf_follower_bpf *skel = evsel->follower_skel;
@@ -768,6 +786,7 @@ static int bperf__destroy(struct evsel *evsel)
struct bpf_counter_ops bperf_ops = {
.load = bperf__load,
.enable = bperf__enable,
+ .disable = bperf__disable,
.read = bperf__read,
.install_pe = bperf__install_pe,
.destroy = bperf__destroy,
@@ -806,6 +825,13 @@ int bpf_counter__enable(struct evsel *evsel)
return evsel->bpf_counter_ops->enable(evsel);
}

+int bpf_counter__disable(struct evsel *evsel)
+{
+ if (bpf_counter_skip(evsel))
+ return 0;
+ return evsel->bpf_counter_ops->disable(evsel);
+}
+
int bpf_counter__read(struct evsel *evsel)
{
if (bpf_counter_skip(evsel))
diff --git a/tools/perf/util/bpf_counter.h b/tools/perf/util/bpf_counter.h
index cb9c532e0a079..d6d907c3dcf92 100644
--- a/tools/perf/util/bpf_counter.h
+++ b/tools/perf/util/bpf_counter.h
@@ -18,6 +18,7 @@ typedef int (*bpf_counter_evsel_install_pe_op)(struct evsel *evsel,
struct bpf_counter_ops {
bpf_counter_evsel_target_op load;
bpf_counter_evsel_op enable;
+ bpf_counter_evsel_op disable;
bpf_counter_evsel_op read;
bpf_counter_evsel_op destroy;
bpf_counter_evsel_install_pe_op install_pe;
@@ -32,6 +33,7 @@ struct bpf_counter {

int bpf_counter__load(struct evsel *evsel, struct target *target);
int bpf_counter__enable(struct evsel *evsel);
+int bpf_counter__disable(struct evsel *evsel);
int bpf_counter__read(struct evsel *evsel);
void bpf_counter__destroy(struct evsel *evsel);
int bpf_counter__install_pe(struct evsel *evsel, int cpu, int fd);
@@ -51,6 +53,11 @@ static inline int bpf_counter__enable(struct evsel *evsel __maybe_unused)
return 0;
}

+static inline int bpf_counter__disable(struct evsel *evsel __maybe_unused)
+{
+ return 0;
+}
+
static inline int bpf_counter__read(struct evsel *evsel __maybe_unused)
{
return -EAGAIN;
diff --git a/tools/perf/util/evlist.c b/tools/perf/util/evlist.c
index d29a8a118973c..e71041c890102 100644
--- a/tools/perf/util/evlist.c
+++ b/tools/perf/util/evlist.c
@@ -17,6 +17,7 @@
#include "evsel.h"
#include "debug.h"
#include "units.h"
+#include "bpf_counter.h"
#include <internal/lib.h> // page_size
#include "affinity.h"
#include "../perf.h"
@@ -421,6 +422,9 @@ static void __evlist__disable(struct evlist *evlist, char *evsel_name)
if (affinity__setup(&affinity) < 0)
return;

+ evlist__for_each_entry(evlist, pos)
+ bpf_counter__disable(pos);
+
/* Disable 'immediate' events last */
for (imm = 0; imm <= 1; imm++) {
evlist__for_each_cpu(evlist, i, cpu) {
--
2.30.2