[PATCH v2 1/6] perf/core: Fix incosistency between cgroup sched_out and sched_in

From: Chengming Zhou
Date: Tue Mar 22 2022 - 08:10:04 EST


There is a race problem that can trigger WARN_ON_ONCE(cpuctx->cgrp)
in perf_cgroup_switch().

CPU1 CPU2
(in context_switch) (attach running task)
perf_cgroup_sched_out(prev, next)
cgrp1 == cgrp2 is True
next->cgroups = cgrp3
perf_cgroup_attach()
perf_cgroup_sched_in(prev, next)
cgrp1 == cgrp3 is False

The commit a8d757ef076f ("perf events: Fix slow and broken cgroup
context switch code") would save cpuctx switch out/in when the
perf_cgroup of "prev" and "next" are the same.

But perf_cgroup of task can change in concurrent with context_switch.
If cgrp1 == cgrp2 in sched_out(), cpuctx won't do switch out. Then
task perf_cgroup changed cause cgrp1 != cgrp2 in sched_in(), cpuctx
will do switch in, and trigger WARN_ON_ONCE(cpuctx->cgrp).

Even though perf_cgroup_switch will be synchronized as the context
switch disables the interrupt, it still can see the task->cgroups
is changing in the middle.

So this patch combine perf_cgroup_sched_in() into perf_cgroup_sched_out(),
rename to perf_cgroup_sched_switch(), to fix the incosistency between
cgroup sched_out and sched_in.

Fixes: a8d757ef076f ("perf events: Fix slow and broken cgroup context switch code")
Signed-off-by: Chengming Zhou <zhouchengming@xxxxxxxxxxxxx>
---
kernel/events/core.c | 101 ++++++++++---------------------------------
1 file changed, 24 insertions(+), 77 deletions(-)

diff --git a/kernel/events/core.c b/kernel/events/core.c
index 6859229497b1..8b5cf2aedfe6 100644
--- a/kernel/events/core.c
+++ b/kernel/events/core.c
@@ -828,16 +828,10 @@ perf_cgroup_set_timestamp(struct task_struct *task,

static DEFINE_PER_CPU(struct list_head, cgrp_cpuctx_list);

-#define PERF_CGROUP_SWOUT 0x1 /* cgroup switch out every event */
-#define PERF_CGROUP_SWIN 0x2 /* cgroup switch in events based on task */
-
/*
* reschedule events based on the cgroup constraint of task.
- *
- * mode SWOUT : schedule out everything
- * mode SWIN : schedule in based on cgroup for next
*/
-static void perf_cgroup_switch(struct task_struct *task, int mode)
+static void perf_cgroup_switch(struct task_struct *task)
{
struct perf_cpu_context *cpuctx, *tmp;
struct list_head *list;
@@ -856,28 +850,22 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
perf_ctx_lock(cpuctx, cpuctx->task_ctx);
perf_pmu_disable(cpuctx->ctx.pmu);

- if (mode & PERF_CGROUP_SWOUT) {
- cpu_ctx_sched_out(cpuctx, EVENT_ALL);
- /*
- * must not be done before ctxswout due
- * to event_filter_match() in event_sched_out()
- */
- cpuctx->cgrp = NULL;
- }
+ cpu_ctx_sched_out(cpuctx, EVENT_ALL);
+ /*
+ * must not be done before ctxswout due
+ * to event_filter_match() in event_sched_out()
+ */
+ cpuctx->cgrp = perf_cgroup_from_task(task,
+ &cpuctx->ctx);
+ /*
+ * set cgrp before ctxsw in to allow
+ * event_filter_match() to not have to pass
+ * task around
+ * we pass the cpuctx->ctx to perf_cgroup_from_task()
+ * because cgroup events are only per-cpu
+ */
+ cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);

- if (mode & PERF_CGROUP_SWIN) {
- WARN_ON_ONCE(cpuctx->cgrp);
- /*
- * set cgrp before ctxsw in to allow
- * event_filter_match() to not have to pass
- * task around
- * we pass the cpuctx->ctx to perf_cgroup_from_task()
- * because cgorup events are only per-cpu
- */
- cpuctx->cgrp = perf_cgroup_from_task(task,
- &cpuctx->ctx);
- cpu_ctx_sched_in(cpuctx, EVENT_ALL, task);
- }
perf_pmu_enable(cpuctx->ctx.pmu);
perf_ctx_unlock(cpuctx, cpuctx->task_ctx);
}
@@ -885,8 +873,8 @@ static void perf_cgroup_switch(struct task_struct *task, int mode)
local_irq_restore(flags);
}

-static inline void perf_cgroup_sched_out(struct task_struct *task,
- struct task_struct *next)
+static inline void perf_cgroup_sched_switch(struct task_struct *task,
+ struct task_struct *next)
{
struct perf_cgroup *cgrp1;
struct perf_cgroup *cgrp2 = NULL;
@@ -906,33 +894,7 @@ static inline void perf_cgroup_sched_out(struct task_struct *task,
* do no touch the cgroup events.
*/
if (cgrp1 != cgrp2)
- perf_cgroup_switch(task, PERF_CGROUP_SWOUT);
-
- rcu_read_unlock();
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
- struct task_struct *task)
-{
- struct perf_cgroup *cgrp1;
- struct perf_cgroup *cgrp2 = NULL;
-
- rcu_read_lock();
- /*
- * we come here when we know perf_cgroup_events > 0
- * we do not need to pass the ctx here because we know
- * we are holding the rcu lock
- */
- cgrp1 = perf_cgroup_from_task(task, NULL);
- cgrp2 = perf_cgroup_from_task(prev, NULL);
-
- /*
- * only need to schedule in cgroup events if we are changing
- * cgroup during ctxsw. Cgroup events were not scheduled
- * out of ctxsw out if that was not the case.
- */
- if (cgrp1 != cgrp2)
- perf_cgroup_switch(task, PERF_CGROUP_SWIN);
+ perf_cgroup_switch(task);

rcu_read_unlock();
}
@@ -1100,13 +1062,8 @@ static inline void update_cgrp_time_from_cpuctx(struct perf_cpu_context *cpuctx,
{
}

-static inline void perf_cgroup_sched_out(struct task_struct *task,
- struct task_struct *next)
-{
-}
-
-static inline void perf_cgroup_sched_in(struct task_struct *prev,
- struct task_struct *task)
+static inline void perf_cgroup_sched_switch(struct task_struct *task,
+ struct task_struct *next)
{
}

@@ -1124,7 +1081,7 @@ perf_cgroup_set_timestamp(struct task_struct *task,
}

static inline void
-perf_cgroup_switch(struct task_struct *task, struct task_struct *next)
+perf_cgroup_sched_switch(struct task_struct *task, struct task_struct *next)
{
}

@@ -3668,7 +3625,7 @@ void __perf_event_task_sched_out(struct task_struct *task,
* cgroup event are system-wide mode only
*/
if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
- perf_cgroup_sched_out(task, next);
+ perf_cgroup_sched_switch(task, next);
}

/*
@@ -3984,16 +3941,6 @@ void __perf_event_task_sched_in(struct task_struct *prev,
struct perf_event_context *ctx;
int ctxn;

- /*
- * If cgroup events exist on this CPU, then we need to check if we have
- * to switch in PMU state; cgroup event are system-wide mode only.
- *
- * Since cgroup events are CPU events, we must schedule these in before
- * we schedule in the task events.
- */
- if (atomic_read(this_cpu_ptr(&perf_cgroup_events)))
- perf_cgroup_sched_in(prev, task);
-
for_each_task_context_nr(ctxn) {
ctx = task->perf_event_ctxp[ctxn];
if (likely(!ctx))
@@ -13564,7 +13511,7 @@ static int __perf_cgroup_move(void *info)
{
struct task_struct *task = info;
rcu_read_lock();
- perf_cgroup_switch(task, PERF_CGROUP_SWOUT | PERF_CGROUP_SWIN);
+ perf_cgroup_switch(task);
rcu_read_unlock();
return 0;
}
--
2.20.1