[PATCH v1 3/5] perf tools: Check if mem_events is supported for hybrid

From: Jin Yao
Date: Thu May 20 2021 - 03:02:18 EST


Check if the mem_events ('mem-loads' and 'mem-stores') exist
in the sysfs path.

For Alderlake, the hybrid cpu pmu are "cpu_core" and "cpu_atom".
Check the existing of following paths:
/sys/devices/cpu_atom/events/mem-loads
/sys/devices/cpu_atom/events/mem-stores
/sys/devices/cpu_core/events/mem-loads
/sys/devices/cpu_core/events/mem-stores

If the patch exists, the mem_event is supported.

Signed-off-by: Jin Yao <yao.jin@xxxxxxxxxxxxxxx>
---
tools/perf/util/mem-events.c | 43 +++++++++++++++++++++++++++++-------
1 file changed, 35 insertions(+), 8 deletions(-)

diff --git a/tools/perf/util/mem-events.c b/tools/perf/util/mem-events.c
index c736eaded06c..e8f6e745eaf0 100644
--- a/tools/perf/util/mem-events.c
+++ b/tools/perf/util/mem-events.c
@@ -12,14 +12,16 @@
#include "mem-events.h"
#include "debug.h"
#include "symbol.h"
+#include "pmu.h"
+#include "pmu-hybrid.h"

unsigned int perf_mem_events__loads_ldlat = 30;

#define E(t, n, s) { .tag = t, .name = n, .sysfs_name = s }

static struct perf_mem_event perf_mem_events[PERF_MEM_EVENTS__MAX] = {
- E("ldlat-loads", "cpu/mem-loads,ldlat=%u/P", "cpu/events/mem-loads"),
- E("ldlat-stores", "cpu/mem-stores/P", "cpu/events/mem-stores"),
+ E("ldlat-loads", "%s/mem-loads,ldlat=%u/P", "%s/events/mem-loads"),
+ E("ldlat-stores", "%s/mem-stores/P", "%s/events/mem-stores"),
E(NULL, NULL, NULL),
};
#undef E
@@ -100,6 +102,18 @@ int perf_mem_events__parse(const char *str)
return -1;
}

+static bool perf_mem_events__supported(const char *mnt, char *sysfs_name)
+{
+ char path[PATH_MAX];
+ struct stat st;
+
+ scnprintf(path, PATH_MAX, "%s/devices/%s", mnt, sysfs_name);
+ if (!stat(path, &st))
+ return true;
+
+ return false;
+}
+
int perf_mem_events__init(void)
{
const char *mnt = sysfs__mount();
@@ -110,9 +124,10 @@ int perf_mem_events__init(void)
return -ENOENT;

for (j = 0; j < PERF_MEM_EVENTS__MAX; j++) {
- char path[PATH_MAX];
struct perf_mem_event *e = perf_mem_events__ptr(j);
- struct stat st;
+ struct perf_pmu *pmu;
+ char sysfs_name[100];
+ int unsupported = 0;

/*
* If the event entry isn't valid, skip initialization
@@ -121,11 +136,23 @@ int perf_mem_events__init(void)
if (!e->tag)
continue;

- scnprintf(path, PATH_MAX, "%s/devices/%s",
- mnt, e->sysfs_name);
+ if (!perf_pmu__has_hybrid()) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, "cpu");
+ e->supported = perf_mem_events__supported(mnt, sysfs_name);
+ } else {
+ perf_pmu__for_each_hybrid_pmu(pmu) {
+ scnprintf(sysfs_name, sizeof(sysfs_name),
+ e->sysfs_name, pmu->name);
+ if (!perf_mem_events__supported(mnt, sysfs_name))
+ unsupported++;
+ }
+
+ e->supported = (unsupported == 0) ? true : false;
+ }

- if (!stat(path, &st))
- e->supported = found = true;
+ if (e->supported)
+ found = true;
}

return found ? 0 : -ENOENT;
--
2.17.1