[PATCH v0 2/5] perf: Extend perf_event_aux() to optionally iterate through more events
From: Alexander Shishkin
Date: Fri Dec 11 2015 - 08:39:24 EST
Trace filtering code needs an iterator that can go through all events,
including inactive and filtered, to be able to update their filters'
ranges based on mmap or exec events.
This patch changes perf_event_aux() to optionally do this.
Signed-off-by: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
---
kernel/events/core.c | 38 +++++++++++++++++++++-----------------
1 file changed, 21 insertions(+), 17 deletions(-)
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 0b28116dd7..2bab4af901 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -5642,33 +5642,36 @@ typedef void (perf_event_aux_output_cb)(struct perf_event *event, void *data);
static void
perf_event_aux_ctx(struct perf_event_context *ctx,
perf_event_aux_output_cb output,
- void *data)
+ void *data, bool all)
{
struct perf_event *event;
list_for_each_entry_rcu(event, &ctx->event_list, event_entry) {
- if (event->state < PERF_EVENT_STATE_INACTIVE)
- continue;
- if (!event_filter_match(event))
- continue;
+ if (!all) {
+ if (event->state < PERF_EVENT_STATE_INACTIVE)
+ continue;
+ if (!event_filter_match(event))
+ continue;
+ }
+
output(event, data);
}
}
static void
perf_event_aux_task_ctx(perf_event_aux_output_cb output, void *data,
- struct perf_event_context *task_ctx)
+ struct perf_event_context *task_ctx, bool all)
{
rcu_read_lock();
preempt_disable();
- perf_event_aux_ctx(task_ctx, output, data);
+ perf_event_aux_ctx(task_ctx, output, data, all);
preempt_enable();
rcu_read_unlock();
}
static void
perf_event_aux(perf_event_aux_output_cb output, void *data,
- struct perf_event_context *task_ctx)
+ struct perf_event_context *task_ctx, bool all)
{
struct perf_cpu_context *cpuctx;
struct perf_event_context *ctx;
@@ -5682,7 +5685,7 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
* context.
*/
if (task_ctx) {
- perf_event_aux_task_ctx(output, data, task_ctx);
+ perf_event_aux_task_ctx(output, data, task_ctx, all);
return;
}
@@ -5691,13 +5694,13 @@ perf_event_aux(perf_event_aux_output_cb output, void *data,
cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
if (cpuctx->unique_pmu != pmu)
goto next;
- perf_event_aux_ctx(&cpuctx->ctx, output, data);
+ perf_event_aux_ctx(&cpuctx->ctx, output, data, all);
ctxn = pmu->task_ctx_nr;
if (ctxn < 0)
goto next;
ctx = rcu_dereference(current->perf_event_ctxp[ctxn]);
if (ctx)
- perf_event_aux_ctx(ctx, output, data);
+ perf_event_aux_ctx(ctx, output, data, all);
next:
put_cpu_ptr(pmu->pmu_cpu_context);
}
@@ -5725,10 +5728,11 @@ static int __perf_pmu_output_stop(void *info)
struct perf_cpu_context *cpuctx = get_cpu_ptr(pmu->pmu_cpu_context);
rcu_read_lock();
- perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, event->rb);
+ perf_event_aux_ctx(&cpuctx->ctx, __perf_event_output_stop, event->rb,
+ false);
if (cpuctx->task_ctx)
perf_event_aux_ctx(cpuctx->task_ctx, __perf_event_output_stop,
- event->rb);
+ event->rb, false);
rcu_read_unlock();
return 0;
@@ -5840,7 +5844,7 @@ static void perf_event_task(struct task_struct *task,
perf_event_aux(perf_event_task_output,
&task_event,
- task_ctx);
+ task_ctx, false);
}
void perf_event_fork(struct task_struct *task)
@@ -5919,7 +5923,7 @@ static void perf_event_comm_event(struct perf_comm_event *comm_event)
perf_event_aux(perf_event_comm_output,
comm_event,
- NULL);
+ NULL, false);
}
void perf_event_comm(struct task_struct *task, bool exec)
@@ -6150,7 +6154,7 @@ got_name:
perf_event_aux(perf_event_mmap_output,
mmap_event,
- NULL);
+ NULL, false);
kfree(buf);
}
@@ -6338,7 +6342,7 @@ static void perf_event_switch(struct task_struct *task,
perf_event_aux(perf_event_switch_output,
&switch_event,
- NULL);
+ NULL, false);
}
/*
--
2.6.2
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/