[PATCH v3 07/10] perf: simplify and rename visit_groups_merge

From: Ian Rogers
Date: Wed Nov 13 2019 - 19:31:21 EST


To enable a future caching optimization, pass in whether
visit_groups_merge is operating on pinned or flexible groups. The
is_pinned argument makes the func argument redundant, rename the
function to ctx_groups_sched_in as it just schedules pinned or flexible
groups in. Compute the cpu and groups arguments locally to reduce the
argument list size. Remove sched_in_data as it repeats arguments already
passed in. Remove the unused data argument to pinned_sched_in.

Signed-off-by: Ian Rogers <irogers@xxxxxxxxxx>
---
kernel/events/core.c | 106 +++++++++++++++++--------------------------
1 file changed, 41 insertions(+), 65 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index cb5fc47611c7..11594d8bbb2e 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -3509,10 +3509,18 @@ static void __heap_add(struct min_max_heap *heap, struct perf_event *event)
}
}

-static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
- struct perf_event_groups *groups, int cpu,
- int (*func)(struct perf_event *, void *),
- void *data)
+static int pinned_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event *event);
+
+static int flexible_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event *event,
+ int *can_add_hw);
+
+static int ctx_groups_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ bool is_pinned)
{
#ifdef CONFIG_CGROUP_PERF
struct cgroup_subsys_state *css = NULL;
@@ -3522,9 +3530,13 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
struct min_max_heap event_heap;
struct perf_event **evt;
struct perf_event *next;
- int ret;
+ int ret, can_add_hw = 1;
+ int cpu = smp_processor_id();
+ struct perf_event_groups *groups = is_pinned
+ ? &ctx->pinned_groups
+ : &ctx->flexible_groups;

- if (cpuctx) {
+ if (ctx == &cpuctx->ctx) {
event_heap = (struct min_max_heap){
.data = cpuctx->itr_storage,
.size = 0,
@@ -3562,7 +3574,11 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
heapify_all(&event_heap, &perf_min_heap);

while (event_heap.size) {
- ret = func(*evt, data);
+ if (is_pinned)
+ ret = pinned_sched_in(ctx, cpuctx, *evt);
+ else
+ ret = flexible_sched_in(ctx, cpuctx, *evt, &can_add_hw);
+
if (ret)
return ret;

@@ -3576,25 +3592,19 @@ static noinline int visit_groups_merge(struct perf_cpu_context *cpuctx,
return 0;
}

-struct sched_in_data {
- struct perf_event_context *ctx;
- struct perf_cpu_context *cpuctx;
- int can_add_hw;
-};
-
-static int pinned_sched_in(struct perf_event *event, void *data)
+static int pinned_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event *event)
{
- struct sched_in_data *sid = data;
-
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;

if (!event_filter_match(event))
return 0;

- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- if (!group_sched_in(event, sid->cpuctx, sid->ctx))
- list_add_tail(&event->active_list, &sid->ctx->pinned_active);
+ if (group_can_go_on(event, cpuctx, 1)) {
+ if (!group_sched_in(event, cpuctx, ctx))
+ list_add_tail(&event->active_list, &ctx->pinned_active);
}

/*
@@ -3607,65 +3617,30 @@ static int pinned_sched_in(struct perf_event *event, void *data)
return 0;
}

-static int flexible_sched_in(struct perf_event *event, void *data)
+static int flexible_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx,
+ struct perf_event *event,
+ int *can_add_hw)
{
- struct sched_in_data *sid = data;
-
if (event->state <= PERF_EVENT_STATE_OFF)
return 0;

if (!event_filter_match(event))
return 0;

- if (group_can_go_on(event, sid->cpuctx, sid->can_add_hw)) {
- int ret = group_sched_in(event, sid->cpuctx, sid->ctx);
+ if (group_can_go_on(event, cpuctx, *can_add_hw)) {
+ int ret = group_sched_in(event, cpuctx, ctx);
if (ret) {
- sid->can_add_hw = 0;
- sid->ctx->rotate_necessary = 1;
+ *can_add_hw = 0;
+ ctx->rotate_necessary = 1;
return 0;
}
- list_add_tail(&event->active_list, &sid->ctx->flexible_active);
+ list_add_tail(&event->active_list, &ctx->flexible_active);
}

return 0;
}

-static void
-ctx_pinned_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx)
-{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
-
- if (ctx != &cpuctx->ctx)
- cpuctx = NULL;
-
- visit_groups_merge(cpuctx, &ctx->pinned_groups,
- smp_processor_id(),
- pinned_sched_in, &sid);
-}
-
-static void
-ctx_flexible_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx)
-{
- struct sched_in_data sid = {
- .ctx = ctx,
- .cpuctx = cpuctx,
- .can_add_hw = 1,
- };
-
- if (ctx != &cpuctx->ctx)
- cpuctx = NULL;
-
- visit_groups_merge(cpuctx, &ctx->flexible_groups,
- smp_processor_id(),
- flexible_sched_in, &sid);
-}
-
static void
ctx_sched_in(struct perf_event_context *ctx,
struct perf_cpu_context *cpuctx,
@@ -3702,11 +3677,12 @@ ctx_sched_in(struct perf_event_context *ctx,
* in order to give them the best chance of going on.
*/
if (is_active & EVENT_PINNED)
- ctx_pinned_sched_in(ctx, cpuctx);
+ ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/true);
+

/* Then walk through the lower prio flexible groups */
if (is_active & EVENT_FLEXIBLE)
- ctx_flexible_sched_in(ctx, cpuctx);
+ ctx_groups_sched_in(ctx, cpuctx, /*is_pinned=*/false);
}

static void cpu_ctx_sched_in(struct perf_cpu_context *cpuctx,
--
2.24.0.432.g9d3f5f5b63-goog