[PATCH 5.3 146/193] perf/x86/uncore: Fix event group support
From: Greg Kroah-Hartman
Date: Mon Nov 11 2019 - 13:55:44 EST
From: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
[ Upstream commit 75be6f703a141b048590d659a3954c4fedd30bba ]
The events in the same group don't start or stop simultaneously.
Here is the ftrace when enabling event group for uncore_iio_0:
# perf stat -e "{uncore_iio_0/event=0x1/,uncore_iio_0/event=0xe/}"
<idle>-0 [000] d.h. 8959.064832: read_msr: a41, value
b2b0b030 //Read counter reg of IIO unit0 counter0
<idle>-0 [000] d.h. 8959.064835: write_msr: a48, value
400001 //Write Ctrl reg of IIO unit0 counter0 to enable
counter0. <------ Although counter0 is enabled, Unit Ctrl is still
freezed. Nothing will count. We are still good here.
<idle>-0 [000] d.h. 8959.064836: read_msr: a40, value
30100 //Read Unit Ctrl reg of IIO unit0
<idle>-0 [000] d.h. 8959.064838: write_msr: a40, value
30000 //Write Unit Ctrl reg of IIO unit0 to enable all
counters in the unit by clear Freeze bit <------Unit0 is un-freezed.
Counter0 has been enabled. Now it starts counting. But counter1 has not
been enabled yet. The issue starts here.
<idle>-0 [000] d.h. 8959.064846: read_msr: a42, value 0
//Read counter reg of IIO unit0 counter1
<idle>-0 [000] d.h. 8959.064847: write_msr: a49, value
40000e //Write Ctrl reg of IIO unit0 counter1 to enable
counter1. <------ Now, counter1 just starts to count. Counter0 has
been running for a while.
Current code un-freezes the Unit Ctrl right after the first counter is
enabled. The subsequent group events always loses some counter values.
Implement pmu_enable and pmu_disable support for uncore, which can help
to batch hardware accesses.
No one uses uncore_enable_box and uncore_disable_box. Remove them.
Signed-off-by: Kan Liang <kan.liang@xxxxxxxxxxxxxxx>
Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
Cc: Jiri Olsa <jolsa@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Mark Rutland <mark.rutland@xxxxxxx>
Cc: Namhyung Kim <namhyung@xxxxxxxxxx>
Cc: Stephane Eranian <eranian@xxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Vince Weaver <vincent.weaver@xxxxxxxxx>
Cc: linux-drivers-review@xxxxxxxxxxxxxxxxx
Cc: linux-perf@xxxxxxxxxxxxxxxxx
Fixes: 087bfbb03269 ("perf/x86: Add generic Intel uncore PMU support")
Link: https://lkml.kernel.org/r/1572014593-31591-1-git-send-email-kan.liang@xxxxxxxxxxxxxxx
Signed-off-by: Ingo Molnar <mingo@xxxxxxxxxx>
Signed-off-by: Sasha Levin <sashal@xxxxxxxxxx>
---
arch/x86/events/intel/uncore.c | 44 +++++++++++++++++++++++++++++-----
arch/x86/events/intel/uncore.h | 12 ----------
2 files changed, 38 insertions(+), 18 deletions(-)
diff --git a/arch/x86/events/intel/uncore.c b/arch/x86/events/intel/uncore.c
index 3694a5d0703d9..f7b191d3c9b01 100644
--- a/arch/x86/events/intel/uncore.c
+++ b/arch/x86/events/intel/uncore.c
@@ -502,10 +502,8 @@ void uncore_pmu_event_start(struct perf_event *event, int flags)
local64_set(&event->hw.prev_count, uncore_read_counter(box, event));
uncore_enable_event(box, event);
- if (box->n_active == 1) {
- uncore_enable_box(box);
+ if (box->n_active == 1)
uncore_pmu_start_hrtimer(box);
- }
}
void uncore_pmu_event_stop(struct perf_event *event, int flags)
@@ -529,10 +527,8 @@ void uncore_pmu_event_stop(struct perf_event *event, int flags)
WARN_ON_ONCE(hwc->state & PERF_HES_STOPPED);
hwc->state |= PERF_HES_STOPPED;
- if (box->n_active == 0) {
- uncore_disable_box(box);
+ if (box->n_active == 0)
uncore_pmu_cancel_hrtimer(box);
- }
}
if ((flags & PERF_EF_UPDATE) && !(hwc->state & PERF_HES_UPTODATE)) {
@@ -778,6 +774,40 @@ static int uncore_pmu_event_init(struct perf_event *event)
return ret;
}
+static void uncore_pmu_enable(struct pmu *pmu)
+{
+ struct intel_uncore_pmu *uncore_pmu;
+ struct intel_uncore_box *box;
+
+ uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+ if (!uncore_pmu)
+ return;
+
+ box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+ if (!box)
+ return;
+
+ if (uncore_pmu->type->ops->enable_box)
+ uncore_pmu->type->ops->enable_box(box);
+}
+
+static void uncore_pmu_disable(struct pmu *pmu)
+{
+ struct intel_uncore_pmu *uncore_pmu;
+ struct intel_uncore_box *box;
+
+ uncore_pmu = container_of(pmu, struct intel_uncore_pmu, pmu);
+ if (!uncore_pmu)
+ return;
+
+ box = uncore_pmu_to_box(uncore_pmu, smp_processor_id());
+ if (!box)
+ return;
+
+ if (uncore_pmu->type->ops->disable_box)
+ uncore_pmu->type->ops->disable_box(box);
+}
+
static ssize_t uncore_get_attr_cpumask(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -803,6 +833,8 @@ static int uncore_pmu_register(struct intel_uncore_pmu *pmu)
pmu->pmu = (struct pmu) {
.attr_groups = pmu->type->attr_groups,
.task_ctx_nr = perf_invalid_context,
+ .pmu_enable = uncore_pmu_enable,
+ .pmu_disable = uncore_pmu_disable,
.event_init = uncore_pmu_event_init,
.add = uncore_pmu_event_add,
.del = uncore_pmu_event_del,
diff --git a/arch/x86/events/intel/uncore.h b/arch/x86/events/intel/uncore.h
index f36f7bebbc1bb..bbfdaa720b456 100644
--- a/arch/x86/events/intel/uncore.h
+++ b/arch/x86/events/intel/uncore.h
@@ -441,18 +441,6 @@ static inline int uncore_freerunning_hw_config(struct intel_uncore_box *box,
return -EINVAL;
}
-static inline void uncore_disable_box(struct intel_uncore_box *box)
-{
- if (box->pmu->type->ops->disable_box)
- box->pmu->type->ops->disable_box(box);
-}
-
-static inline void uncore_enable_box(struct intel_uncore_box *box)
-{
- if (box->pmu->type->ops->enable_box)
- box->pmu->type->ops->enable_box(box);
-}
-
static inline void uncore_disable_event(struct intel_uncore_box *box,
struct perf_event *event)
{
--
2.20.1