Re: [tip:sched/core] sched/rt: Simplify the IPI based RT balancing logic

From: Steven Rostedt
Date: Fri Jan 19 2018 - 13:11:35 EST


On Fri, 19 Jan 2018 23:16:17 +0530
Pavan Kondeti <pkondeti@xxxxxxxxxxxxxx> wrote:

> I am thinking of another problem because of the race between
> rto_push_irq_work_func() and rq_attach_root() where rq->rd is modified.
>
> Lets say, we cache the rq->rd here and queued the IRQ work on a remote
> CPU. In the mean time, the rq_attach_root() might drop all the references
> to this cached (old) rd and wants to free it. The rq->rd is freed in
> RCU-sched callback. If that remote CPU is in RCU quiescent state, the rq->rd
> can get freed before the IRQ work is executed. This results in the corruption
> of the remote CPU's IRQ work list. Right?
>
> Taking rq->lock in rto_push_irq_work_func() also does not help here. Probably
> we have to wait for the IRQ work to finish before freeing the older root domain
> in RCU-sched callback.

I was wondering about this too. Yeah, it would require an RCU like
update. Once the rd was unreferenced, it would need to wait for the
irq works to to finish before freeing it.

The easy way to do this is to simply up the refcount when sending the
domain. Something like this:

diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index 862a513adca3..89a086ed2b16 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -1907,9 +1907,8 @@ static void push_rt_tasks(struct rq *rq)
* the rt_loop_next will cause the iterator to perform another scan.
*
*/
-static int rto_next_cpu(struct rq *rq)
+static int rto_next_cpu(struct root_domain *rd)
{
- struct root_domain *rd = rq->rd;
int next;
int cpu;

@@ -1985,19 +1984,24 @@ static void tell_cpu_to_push(struct rq *rq)
* Otherwise it is finishing up and an ipi needs to be sent.
*/
if (rq->rd->rto_cpu < 0)
- cpu = rto_next_cpu(rq);
+ cpu = rto_next_cpu(rq->rd);

raw_spin_unlock(&rq->rd->rto_lock);

rto_start_unlock(&rq->rd->rto_loop_start);

- if (cpu >= 0)
+ if (cpu >= 0) {
+ /* Make sure the rd does not get freed while pushing */
+ sched_get_rd(rq->rd);
irq_work_queue_on(&rq->rd->rto_push_work, cpu);
+ }
}

/* Called from hardirq context */
void rto_push_irq_work_func(struct irq_work *work)
{
+ struct root_domain *rd =
+ container_of(work, struct root_domain, rto_push_work);
struct rq *rq;
int cpu;

@@ -2013,18 +2017,20 @@ void rto_push_irq_work_func(struct irq_work *work)
raw_spin_unlock(&rq->lock);
}

- raw_spin_lock(&rq->rd->rto_lock);
+ raw_spin_lock(&rd->rto_lock);

/* Pass the IPI to the next rt overloaded queue */
- cpu = rto_next_cpu(rq);
+ cpu = rto_next_cpu(rd);

- raw_spin_unlock(&rq->rd->rto_lock);
+ raw_spin_unlock(&rd->rto_lock);

- if (cpu < 0)
+ if (cpu < 0) {
+ sched_put_rd(rd);
return;
+ }

/* Try the next RT overloaded CPU */
- irq_work_queue_on(&rq->rd->rto_push_work, cpu);
+ irq_work_queue_on(&rd->rto_push_work, cpu);
}
#endif /* HAVE_RT_PUSH_IPI */

diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 2e95505e23c6..fb5fc458547f 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -691,6 +691,8 @@ extern struct mutex sched_domains_mutex;
extern void init_defrootdomain(void);
extern int sched_init_domains(const struct cpumask *cpu_map);
extern void rq_attach_root(struct rq *rq, struct root_domain *rd);
+extern void sched_get_rd(struct root_domain *rd);
+extern void sched_put_rd(struct root_domain *rd);

#ifdef HAVE_RT_PUSH_IPI
extern void rto_push_irq_work_func(struct irq_work *work);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 034cbed7f88b..519b024f4e94 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -259,6 +259,19 @@ void rq_attach_root(struct rq *rq, struct root_domain *rd)
call_rcu_sched(&old_rd->rcu, free_rootdomain);
}

+void sched_get_rd(struct root_domain *rd)
+{
+ atomic_inc(&rd->refcount);
+}
+
+void sched_put_rd(struct root_domain *rd)
+{
+ if (!atomic_dec_and_test(&rd->refcount))
+ return;
+
+ call_rcu_sched(&rd->rcu, free_rootdomain);
+}
+
static int init_rootdomain(struct root_domain *rd)
{
if (!zalloc_cpumask_var(&rd->span, GFP_KERNEL))

-- Steve