[RFC PATCH 3/4] perf/core: Split up pinned and non pinned processing

From: Frederic Weisbecker
Date: Sun Nov 08 2009 - 15:13:48 EST


Split up pinned and non-pinned events processing in two helpers
so that it's more flexible to handle them seperately.

Signed-off-by: Frederic Weisbecker <fweisbec@xxxxxxxxx>
Cc: Peter Zijlstra <peterz@xxxxxxxxxxxxx>
Cc: Arnaldo Carvalho de Melo <acme@xxxxxxxxxx>
Cc: Mike Galbraith <efault@xxxxxx>
Cc: Paul Mackerras <paulus@xxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
---
kernel/perf_event.c | 51 +++++++++++++++++++++++++++++++++++----------------
1 files changed, 35 insertions(+), 16 deletions(-)

diff --git a/kernel/perf_event.c b/kernel/perf_event.c
index 0432c1c..50f2997 100644
--- a/kernel/perf_event.c
+++ b/kernel/perf_event.c
@@ -1246,25 +1246,11 @@ static void perf_event_cpu_sched_out(struct perf_cpu_context *cpuctx)
}

static void
-__perf_event_sched_in(struct perf_event_context *ctx,
- struct perf_cpu_context *cpuctx, int cpu)
+__perf_event_sched_in_pinned(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
{
struct perf_event *event;
- int can_add_hw = 1;
-
- spin_lock(&ctx->lock);
- ctx->is_active = 1;
- if (likely(!ctx->nr_events))
- goto out;
-
- ctx->timestamp = perf_clock();
-
- perf_disable();

- /*
- * First go through the list and put on any pinned groups
- * in order to give them the best chance of going on.
- */
list_for_each_entry(event, &ctx->pinned_grp_list, group_entry) {
if (event->state <= PERF_EVENT_STATE_OFF)
continue;
@@ -1283,6 +1269,14 @@ __perf_event_sched_in(struct perf_event_context *ctx,
event->state = PERF_EVENT_STATE_ERROR;
}
}
+}
+
+static void
+__perf_event_sched_in_volatile(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
+{
+ int can_add_hw = 1;
+ struct perf_event *event;

list_for_each_entry(event, &ctx->volatile_grp_list, group_entry) {
/*
@@ -1303,6 +1297,31 @@ __perf_event_sched_in(struct perf_event_context *ctx,
if (group_sched_in(event, cpuctx, ctx, cpu))
can_add_hw = 0;
}
+}
+
+static void
+__perf_event_sched_in(struct perf_event_context *ctx,
+ struct perf_cpu_context *cpuctx, int cpu)
+{
+ spin_lock(&ctx->lock);
+ ctx->is_active = 1;
+ if (likely(!ctx->nr_events))
+ goto out;
+
+ ctx->timestamp = perf_clock();
+
+ perf_disable();
+
+ /*
+ * First go through the list and put on any pinned groups
+ * in order to give them the best chance of going on.
+ */
+ __perf_event_sched_in_pinned(ctx, cpuctx, cpu);
+
+ /* Then handle the non-pinned groups */
+ __perf_event_sched_in_volatile(ctx, cpuctx, cpu);
+
+
perf_enable();
out:
spin_unlock(&ctx->lock);
--
1.6.2.3

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/