[PATCH] perf: Synchronously cleanup child events
From: Alexander Shishkin
Date: Fri Jan 15 2016 - 09:08:32 EST
The orphan cleanup workqueue doesn't always catch orphans, for example,
if they never schedule after they are orphaned. IOW, the event leak is
still very real. It also wouldn't work for kernel counters.
Also, there seems to be no reason not to carry out this cleanup
procedure synchronously during parent event's destruction.
This patch replaces the workqueue approach with a simple cleanup round
in the event's destruction path. To avoid racing with clone, we still
check that parent event has an owner in the inheritance path.
Signed-off-by: Alexander Shishkin <alexander.shishkin@xxxxxxxxxxxxxxx>
---
include/linux/perf_event.h | 3 --
kernel/events/core.c | 121 ++++++++++++++++-----------------------------
2 files changed, 43 insertions(+), 81 deletions(-)
diff --git a/include/linux/perf_event.h b/include/linux/perf_event.h
index 6612732d8f..cd9c1ace29 100644
--- a/include/linux/perf_event.h
+++ b/include/linux/perf_event.h
@@ -634,9 +634,6 @@ struct perf_event_context {
int nr_cgroups; /* cgroup evts */
void *task_ctx_data; /* pmu specific data */
struct rcu_head rcu_head;
-
- struct delayed_work orphans_remove;
- bool orphans_remove_sched;
};
/*
diff --git a/kernel/events/core.c b/kernel/events/core.c
index 630f53acce..8eb3fee429 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -49,8 +49,6 @@
#include <asm/irq_regs.h>
-static struct workqueue_struct *perf_wq;
-
typedef int (*remote_function_f)(void *);
struct remote_function_call {
@@ -1652,40 +1650,9 @@ out:
*/
static bool is_orphaned_event(struct perf_event *event)
{
- return event && !is_kernel_event(event) && !event->owner;
-}
-
-/*
- * Event has a parent but parent's task finished and it's
- * alive only because of children holding refference.
- */
-static bool is_orphaned_child(struct perf_event *event)
-{
- return is_orphaned_event(event->parent);
-}
-
-static void orphans_remove_work(struct work_struct *work);
-
-static void schedule_orphans_remove(struct perf_event_context *ctx)
-{
- if (!ctx->task || ctx->orphans_remove_sched || !perf_wq)
- return;
-
- if (queue_delayed_work(perf_wq, &ctx->orphans_remove, 1)) {
- get_ctx(ctx);
- ctx->orphans_remove_sched = true;
- }
-}
-
-static int __init perf_workqueue_init(void)
-{
- perf_wq = create_singlethread_workqueue("perf");
- WARN(!perf_wq, "failed to create perf workqueue\n");
- return perf_wq ? 0 : -1;
+ return event && !event->owner;
}
-core_initcall(perf_workqueue_init);
-
static inline int pmu_filter_match(struct perf_event *event)
{
struct pmu *pmu = event->pmu;
@@ -1746,9 +1713,6 @@ event_sched_out(struct perf_event *event,
if (event->attr.exclusive || !cpuctx->active_oncpu)
cpuctx->exclusive = 0;
- if (is_orphaned_child(event))
- schedule_orphans_remove(ctx);
-
perf_pmu_enable(event->pmu);
}
@@ -1991,9 +1955,6 @@ event_sched_in(struct perf_event *event,
if (event->attr.exclusive)
cpuctx->exclusive = 1;
- if (is_orphaned_child(event))
- schedule_orphans_remove(ctx);
-
out:
perf_pmu_enable(event->pmu);
@@ -3370,7 +3331,6 @@ static void __perf_event_init_context(struct perf_event_context *ctx)
INIT_LIST_HEAD(&ctx->flexible_groups);
INIT_LIST_HEAD(&ctx->event_list);
atomic_set(&ctx->refcount, 1);
- INIT_DELAYED_WORK(&ctx->orphans_remove, orphans_remove_work);
}
static struct perf_event_context *
@@ -3818,54 +3778,59 @@ static void put_event(struct perf_event *event)
int perf_event_release_kernel(struct perf_event *event)
{
- put_event(event);
- return 0;
-}
-EXPORT_SYMBOL_GPL(perf_event_release_kernel);
+ struct perf_event *child, *tmp;
+ LIST_HEAD(child_list);
-/*
- * Called when the last reference to the file is gone.
- */
-static int perf_release(struct inode *inode, struct file *file)
-{
- put_event(file->private_data);
- return 0;
-}
+ if (!is_kernel_event(event))
+ perf_remove_from_owner(event);
-/*
- * Remove all orphanes events from the context.
- */
-static void orphans_remove_work(struct work_struct *work)
-{
- struct perf_event_context *ctx;
- struct perf_event *event, *tmp;
+ event->owner = NULL;
- ctx = container_of(work, struct perf_event_context,
- orphans_remove.work);
+ /*
+ * event::child_mutex nests inside ctx::lock, so move children
+ * to a safe place first and avoid inversion
+ */
+ mutex_lock(&event->child_mutex);
+ list_splice_init(&event->child_list, &child_list);
+ mutex_unlock(&event->child_mutex);
- mutex_lock(&ctx->mutex);
- list_for_each_entry_safe(event, tmp, &ctx->event_list, event_entry) {
- struct perf_event *parent_event = event->parent;
+ list_for_each_entry_safe(child, tmp, &child_list, child_list) {
+ struct perf_event_context *ctx;
- if (!is_orphaned_child(event))
- continue;
+ /*
+ * This is somewhat similar to perf_free_event(),
+ * except for these events are alive and need
+ * proper perf_remove_from_context().
+ */
+ ctx = perf_event_ctx_lock(child);
+ perf_remove_from_context(child, true);
+ perf_event_ctx_unlock(child, ctx);
- perf_remove_from_context(event, true);
+ list_del(&child->child_list);
- mutex_lock(&parent_event->child_mutex);
- list_del_init(&event->child_list);
- mutex_unlock(&parent_event->child_mutex);
+ /* Children will have exactly one reference */
+ free_event(child);
- free_event(event);
- put_event(parent_event);
+ /*
+ * This matches the refcount bump in inherit_event();
+ * this can't be the last reference.
+ */
+ put_event(event);
}
- raw_spin_lock_irq(&ctx->lock);
- ctx->orphans_remove_sched = false;
- raw_spin_unlock_irq(&ctx->lock);
- mutex_unlock(&ctx->mutex);
+ /* Must be the last reference */
+ put_event(event);
+ return 0;
+}
+EXPORT_SYMBOL_GPL(perf_event_release_kernel);
- put_ctx(ctx);
+/*
+ * Called when the last reference to the file is gone.
+ */
+static int perf_release(struct inode *inode, struct file *file)
+{
+ perf_event_release_kernel(file->private_data);
+ return 0;
}
u64 perf_event_read_value(struct perf_event *event, u64 *enabled, u64 *running)
--
2.7.0.rc3