[PATCH v3 3/n] perf/core: addressing 4x slowdown during per-process profiling of STREAM benchmark on Intel Xeon Phi
From: Alexey Budankov
Date: Fri Jun 16 2017 - 08:42:25 EST
perf/core: mux switch to skip to the current CPU's events list on mux
interrupt
By default, the userspace perf tool opens per-cpu task-bound events
when sampling, so for N logical events requested by the user, the tool
will open N * NR_CPUS events.
In the kernel, we mux events with a hrtimer, periodically rotating the
flexible group list and trying to schedule each group in turn. We skip
groups whose cpu filter doesn't match. So when we get unlucky, we can
walk N * (NR_CPUS - 1) groups pointlessly for each hrtimer invocation.
This has been observed to result in significant overhead when running
the STREAM benchmark on 272 core Xeon Phi systems.
One way to avoid this is to place our events into an rb tree sorted by
CPU filter, so that our hrtimer can skip to the current CPU's
list and ignore everything else.
This patch implements mux switch that triggers skipping to
the current CPU's list only.
Signed-off-by: Alexey Budankov <alexey.budankov@xxxxxxxxxxxxxxx>
---
kernel/events/core.c | 133
++++++++++++++++++++++++++++++++++-----------------
1 file changed, 90 insertions(+), 43 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index a131bd5..309c0c3 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -555,11 +555,11 @@ void perf_sample_event_took(u64 sample_len_ns)
static atomic64_t perf_event_id;
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
+ enum event_type_t event_type, int mux);
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task);
+ struct task_struct *task, int mux);
static void update_context_time(struct perf_event_context *ctx);
static u64 perf_event_time(struct perf_event *event);
@@ -701,6 +701,7 @@ static void perf_cgroup_switch(struct task_struct
*task, int mode)
struct perf_cpu_context *cpuctx;
struct list_head *list;
unsigned long flags;
+ int mux = 0;
/*
* Disable interrupts and preemption to avoid this CPU's
@@ -716,7 +717,7 @@ static void perf_cgroup_switch(struct task_struct
*task, int mode)
perf_pmu_disable(cpuctx->ctx.pmu);
if (mode & PERF_CGROUP_SWOUT) {
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL, mux);
/*
* must not be done before ctxswout due
* to event_filter_match() in event_sched_out()
@@ -735,7 +736,7 @@ static void perf_cgroup_switch(struct task_struct
*task, int mode)
*/
cpuctx->cgrp = perf_cgroup_from_task(task,
&cpuctx->ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task, mux);
}
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -1564,6 +1565,7 @@ perf_cpu_tree_iterate_cpu(struct rb_root *tree,
int cpu,
if (ret)
return ret;
}
+ return 0;
}
}
@@ -2373,36 +2375,38 @@ static void add_event_to_ctx(struct perf_event
*event,
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
- enum event_type_t event_type);
+ enum event_type_t event_type, int mux);
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task);
+ struct task_struct *task, int mux);
static void task_ctx_sched_out(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
enum event_type_t event_type)
{
+ int mux = 0;
+
if (!cpuctx->task_ctx)
return;
if (WARN_ON_ONCE(ctx != cpuctx->task_ctx))
return;
- ctx_sched_out(ctx, cpuctx, event_type);
+ ctx_sched_out(ctx, cpuctx, event_type, mux);
}
static void perf_event_sched_in(struct perf_cpu_context *cpuctx,
struct perf_event_context *ctx,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
- cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task);
+ cpu_ctx_sched_in(cpuctx, EVENT_PINNED, task, mux);
if (ctx)
- ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task);
- cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task);
+ ctx_sched_in(ctx, cpuctx, EVENT_PINNED, task, mux);
+ cpu_ctx_sched_in(cpuctx, EVENT_FLEXIBLE, task, mux);
if (ctx)
- ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task);
+ ctx_sched_in(ctx, cpuctx, EVENT_FLEXIBLE, task, mux);
}
/*
@@ -2426,6 +2430,7 @@ static void ctx_resched(struct perf_cpu_context
*cpuctx,
{
enum event_type_t ctx_event_type = event_type & EVENT_ALL;
bool cpu_event = !!(event_type & EVENT_CPU);
+ int mux = 0;
/*
* If pinned groups are involved, flexible groups also need to be
@@ -2446,11 +2451,11 @@ static void ctx_resched(struct perf_cpu_context
*cpuctx,
* - otherwise, do nothing more.
*/
if (cpu_event)
- cpu_ctx_sched_out(cpuctx, ctx_event_type);
+ cpu_ctx_sched_out(cpuctx, ctx_event_type, mux);
else if (ctx_event_type & EVENT_PINNED)
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
- perf_event_sched_in(cpuctx, task_ctx, current);
+ perf_event_sched_in(cpuctx, task_ctx, current, mux);
perf_pmu_enable(cpuctx->ctx.pmu);
}
@@ -2468,6 +2473,7 @@ static int __perf_install_in_context(void *info)
struct perf_event_context *task_ctx = cpuctx->task_ctx;
bool reprogram = true;
int ret = 0;
+ int mux = 0;
raw_spin_lock(&cpuctx->ctx.lock);
if (ctx->task) {
@@ -2494,7 +2500,7 @@ static int __perf_install_in_context(void *info)
}
if (reprogram) {
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
add_event_to_ctx(event, ctx);
ctx_resched(cpuctx, task_ctx, get_event_type(event));
} else {
@@ -2630,13 +2636,14 @@ static void __perf_event_enable(struct
perf_event *event,
{
struct perf_event *leader = event->group_leader;
struct perf_event_context *task_ctx;
+ int mux = 0;
if (event->state >= PERF_EVENT_STATE_INACTIVE ||
event->state <= PERF_EVENT_STATE_ERROR)
return;
if (ctx->is_active)
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
__perf_event_mark_enabled(event);
@@ -2646,7 +2653,7 @@ static void __perf_event_enable(struct perf_event
*event,
if (!event_filter_match(event)) {
if (is_cgroup_event(event))
perf_cgroup_defer_enabled(event);
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
return;
}
@@ -2655,7 +2662,7 @@ static void __perf_event_enable(struct perf_event
*event,
* then don't put it on unless the group is on.
*/
if (leader != event && leader->state != PERF_EVENT_STATE_ACTIVE) {
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
return;
}
@@ -2922,13 +2929,14 @@ static int group_sched_out_callback(struct
perf_event *event, void *data)
static void ctx_sched_out(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type, int mux)
{
int is_active = ctx->is_active;
struct group_sched_params params = {
.cpuctx = cpuctx,
.ctx = ctx
};
+ int cpu = smp_processor_id();
lockdep_assert_held(&ctx->lock);
@@ -2975,13 +2983,29 @@ static void ctx_sched_out(struct
perf_event_context *ctx,
perf_pmu_disable(ctx->pmu);
- if (is_active & EVENT_PINNED)
- perf_cpu_tree_iterate(&ctx->pinned_groups,
- group_sched_out_callback, ¶ms);
+ if (is_active & EVENT_PINNED) {
+ if (mux) {
+ perf_cpu_tree_iterate_cpu(&ctx->pinned_groups, -1,
+ group_sched_out_callback, ¶ms);
+ perf_cpu_tree_iterate_cpu(&ctx->pinned_groups, cpu,
+ group_sched_out_callback, ¶ms);
+ } else {
+ perf_cpu_tree_iterate(&ctx->pinned_groups,
+ group_sched_out_callback, ¶ms);
+ }
+ }
- if (is_active & EVENT_FLEXIBLE)
- perf_cpu_tree_iterate(&ctx->flexible_groups,
- group_sched_out_callback, ¶ms);
+ if (is_active & EVENT_FLEXIBLE) {
+ if (mux) {
+ perf_cpu_tree_iterate_cpu(&ctx->flexible_groups, -1,
+ group_sched_out_callback, ¶ms);
+ perf_cpu_tree_iterate_cpu(&ctx->flexible_groups, cpu,
+ group_sched_out_callback, ¶ms);
+ } else {
+ perf_cpu_tree_iterate(&ctx->flexible_groups,
+ group_sched_out_callback, ¶ms);
+ }
+ }
perf_pmu_enable(ctx->pmu);
}
@@ -3270,16 +3294,16 @@ void __perf_event_task_sched_out(struct
task_struct *task,
* Called with IRQs disabled
*/
static void cpu_ctx_sched_out(struct perf_cpu_context *cpuctx,
- enum event_type_t event_type)
+ enum event_type_t event_type, int mux)
{
- ctx_sched_out(&cpuctx->ctx, cpuctx, event_type);
+ ctx_sched_out(&cpuctx->ctx, cpuctx, event_type, mux);
}
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
int is_active = ctx->is_active;
struct group_sched_params params = {
@@ -3288,6 +3312,7 @@ ctx_sched_in(struct perf_event_context *ctx,
.can_add_hw = 1
};
+ int cpu = smp_processor_id();
lockdep_assert_held(&ctx->lock);
@@ -3314,29 +3339,49 @@ ctx_sched_in(struct perf_event_context *ctx,
* First go through the list and put on any pinned groups
* in order to give them the best chance of going on.
*/
- if (is_active & EVENT_PINNED)
- perf_cpu_tree_iterate(&ctx->pinned_groups,
- group_sched_in_pinned_callback, ¶ms);
+ if (is_active & EVENT_PINNED) {
+ if (mux) {
+ perf_cpu_tree_iterate_cpu(&ctx->pinned_groups, -1,
+ group_sched_in_pinned_callback,
+ ¶ms);
+ perf_cpu_tree_iterate_cpu(&ctx->pinned_groups, cpu,
+ group_sched_in_pinned_callback,
+ ¶ms);
+ } else {
+ perf_cpu_tree_iterate(&ctx->pinned_groups,
+ group_sched_in_pinned_callback,
+ ¶ms);
+ }
+ }
/* Then walk through the lower prio flexible groups */
- if (is_active & EVENT_FLEXIBLE)
- perf_cpu_tree_iterate(&ctx->flexible_groups,
+ if (is_active & EVENT_FLEXIBLE) {
+ if (mux) {
+ perf_cpu_tree_iterate_cpu(&ctx->flexible_groups, -1,
+ group_sched_in_flexible_callback, ¶ms);
+ perf_cpu_tree_iterate_cpu(&ctx->flexible_groups, cpu,
group_sched_in_flexible_callback, ¶ms);
+ } else {
+ perf_cpu_tree_iterate(&ctx->flexible_groups,
+ group_sched_in_flexible_callback, ¶ms);
+ }
+ }
}
static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
enum event_type_t event_type,
- struct task_struct *task)
+ struct task_struct *task, int mux)
{
struct perf_event_context *ctx = &cpuctx->ctx;
- ctx_sched_in(ctx, cpuctx, event_type, task);
+ ctx_sched_in(ctx, cpuctx, event_type, task, mux);
}
static void perf_event_context_sched_in(struct perf_event_context *ctx,
struct task_struct *task)
{
struct perf_cpu_context *cpuctx;
+ int mux = 0;
cpuctx = __get_cpu_context(ctx);
if (cpuctx->task_ctx == ctx)
@@ -3353,8 +3398,8 @@ static void perf_event_context_sched_in(struct
perf_event_context *ctx,
* events, no need to flip the cpuctx's events around.
*/
if (!RB_EMPTY_ROOT(&ctx->pinned_groups))
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
- perf_event_sched_in(cpuctx, ctx, task);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
+ perf_event_sched_in(cpuctx, ctx, task, mux);
perf_pmu_enable(ctx->pmu);
perf_ctx_unlock(cpuctx, ctx);
}
@@ -3606,6 +3651,7 @@ static int perf_rotate_context(struct
perf_cpu_context *cpuctx)
{
struct perf_event_context *ctx = NULL;
int rotate = 0;
+ int mux = 1;
if (cpuctx->ctx.nr_events) {
if (cpuctx->ctx.nr_events != cpuctx->ctx.nr_active)
@@ -3624,15 +3670,15 @@ static int perf_rotate_context(struct
perf_cpu_context *cpuctx)
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);
- cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE);
+ cpu_ctx_sched_out(cpuctx, EVENT_FLEXIBLE, mux);
if (ctx)
- ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE);
+ ctx_sched_out(ctx, cpuctx, EVENT_FLEXIBLE, mux);
rotate_ctx(&cpuctx->ctx);
if (ctx)
rotate_ctx(ctx);
- perf_event_sched_in(cpuctx, ctx, current);
+ perf_event_sched_in(cpuctx, ctx, current, mux);
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
@@ -3684,6 +3730,7 @@ static void perf_event_enable_on_exec(int ctxn)
struct perf_event *event;
unsigned long flags;
int enabled = 0;
+ int mux = 0;
local_irq_save(flags);
ctx = current->perf_event_ctxp[ctxn];
@@ -3692,7 +3739,7 @@ static void perf_event_enable_on_exec(int ctxn)
cpuctx = __get_cpu_context(ctx);
perf_ctx_lock(cpuctx, ctx);
- ctx_sched_out(ctx, cpuctx, EVENT_TIME);
+ ctx_sched_out(ctx, cpuctx, EVENT_TIME, mux);
list_for_each_entry(event, &ctx->event_list, event_entry) {
enabled |= event_enable_on_exec(event, ctx);
event_type |= get_event_type(event);
@@ -3705,7 +3752,7 @@ static void perf_event_enable_on_exec(int ctxn)
clone_ctx = unclone_ctx(ctx);
ctx_resched(cpuctx, ctx, event_type);
} else {
- ctx_sched_in(ctx, cpuctx, EVENT_TIME, current);
+ ctx_sched_in(ctx, cpuctx, EVENT_TIME, current, mux);
}
perf_ctx_unlock(cpuctx, ctx);