[PATCH v4 4/4] perf/core,x86: synchronize PMU task contexts on optimized context switches
From: Alexey Budankov
Date: Tue Oct 22 2019 - 02:01:24 EST
Install Intel specific PMU task context synchronization adapter and
extend optimized context switch path with PMU specific task context
synchronization to fix LBR callstack virtualization on context switches.
Signed-off-by: Alexey Budankov <alexey.budankov@xxxxxxxxxxxxxxx>
---
arch/x86/events/intel/core.c | 7 +++++++
kernel/events/core.c | 13 +++++++++++++
2 files changed, 20 insertions(+)
diff --git a/arch/x86/events/intel/core.c b/arch/x86/events/intel/core.c
index bbf6588d47ee..b9f518aa478e 100644
--- a/arch/x86/events/intel/core.c
+++ b/arch/x86/events/intel/core.c
@@ -3820,6 +3820,12 @@ static void intel_pmu_sched_task(struct perf_event_context *ctx,
intel_pmu_lbr_sched_task(ctx, sched_in);
}
+static void intel_pmu_sync_task_ctx(struct x86_perf_task_context *one,
+ struct x86_perf_task_context *another)
+{
+ intel_pmu_lbr_sync_task_ctx(one, another);
+}
+
static int intel_pmu_check_period(struct perf_event *event, u64 value)
{
return intel_pmu_has_bts_period(event, value) ? -EINVAL : 0;
@@ -3955,6 +3961,7 @@ static __initconst const struct x86_pmu intel_pmu = {
.guest_get_msrs = intel_guest_get_msrs,
.sched_task = intel_pmu_sched_task,
+ .sync_task_ctx = intel_pmu_sync_task_ctx,
.check_period = intel_pmu_check_period,
diff --git a/kernel/events/core.c b/kernel/events/core.c
index f9a5d4356562..51d4138b06f7 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3204,11 +3204,24 @@ static void perf_event_context_sched_out(struct task_struct *task, int ctxn,
raw_spin_lock(&ctx->lock);
raw_spin_lock_nested(&next_ctx->lock, SINGLE_DEPTH_NESTING);
if (context_equiv(ctx, next_ctx)) {
+ struct pmu *pmu = ctx->pmu;
+
WRITE_ONCE(ctx->task, next);
WRITE_ONCE(next_ctx->task, task);
swap(ctx->task_ctx_data, next_ctx->task_ctx_data);
+ /*
+ * PMU specific parts of task perf context can require
+ * additional synchronization which makes sense only if
+ * both next_ctx->task_ctx_data and ctx->task_ctx_data
+ * pointers are allocated. As an example of such
+ * synchronization see implementation details of Intel
+ * LBR call stack data profiling;
+ */
+ if (ctx->task_ctx_data && next_ctx->task_ctx_data)
+ pmu->sync_task_ctx(next_ctx->task_ctx_data,
+ ctx->task_ctx_data);
/*
* RCU_INIT_POINTER here is safe because we've not
* modified the ctx and the above modification of
--
2.20.1