[PATCH 3.12 041/116] sched: Replace post_schedule with a balance callback list

From: Jiri Slaby
Date: Fri Mar 04 2016 - 04:40:57 EST


From: Peter Zijlstra <peterz@xxxxxxxxxxxxx>

3.12-stable review patch. If anyone has any objections, please let me know.

===============

commit e3fca9e7cbfb72694a21c886fcdf9f059cfded9c upstream.

Generalize the post_schedule() stuff into a balance callback list.
This allows us to more easily use it outside of schedule() and cross
sched_class.

Signed-off-by: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: ktkhai@xxxxxxxxxxxxx
Cc: rostedt@xxxxxxxxxxx
Cc: juri.lelli@xxxxxxxxx
Cc: pang.xunlei@xxxxxxxxxx
Cc: oleg@xxxxxxxxxx
Cc: wanpeng.li@xxxxxxxxxxxxxxx
Cc: umgwanakikbuti@xxxxxxxxx
Link: http://lkml.kernel.org/r/20150611124742.424032725@xxxxxxxxxxxxx
Signed-off-by: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Signed-off-by: Byungchul Park <byungchul.park@xxxxxxx>
Signed-off-by: Jiri Slaby <jslaby@xxxxxxx>
---
kernel/sched/core.c | 36 ++++++++++++++++++++++++------------
kernel/sched/rt.c | 27 ++++++++++++++++-----------
kernel/sched/sched.h | 19 +++++++++++++++++--
3 files changed, 57 insertions(+), 25 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 0bcdceaca6e2..7bf52708993e 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1911,18 +1911,30 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *prev)
}

/* rq->lock is NOT held, but preemption is disabled */
-static inline void post_schedule(struct rq *rq)
+static void __balance_callback(struct rq *rq)
{
- if (rq->post_schedule) {
- unsigned long flags;
+ struct callback_head *head, *next;
+ void (*func)(struct rq *rq);
+ unsigned long flags;

- raw_spin_lock_irqsave(&rq->lock, flags);
- if (rq->curr->sched_class->post_schedule)
- rq->curr->sched_class->post_schedule(rq);
- raw_spin_unlock_irqrestore(&rq->lock, flags);
+ raw_spin_lock_irqsave(&rq->lock, flags);
+ head = rq->balance_callback;
+ rq->balance_callback = NULL;
+ while (head) {
+ func = (void (*)(struct rq *))head->func;
+ next = head->next;
+ head->next = NULL;
+ head = next;

- rq->post_schedule = 0;
+ func(rq);
}
+ raw_spin_unlock_irqrestore(&rq->lock, flags);
+}
+
+static inline void balance_callback(struct rq *rq)
+{
+ if (unlikely(rq->balance_callback))
+ __balance_callback(rq);
}

#else
@@ -1931,7 +1943,7 @@ static inline void pre_schedule(struct rq *rq, struct task_struct *p)
{
}

-static inline void post_schedule(struct rq *rq)
+static inline void balance_callback(struct rq *rq)
{
}

@@ -1952,7 +1964,7 @@ asmlinkage void schedule_tail(struct task_struct *prev)
* FIXME: do we need to worry about rq being invalidated by the
* task_switch?
*/
- post_schedule(rq);
+ balance_callback(rq);

#ifdef __ARCH_WANT_UNLOCKED_CTXSW
/* In this case, finish_task_switch does not reenable preemption */
@@ -2449,7 +2461,7 @@ need_resched:
} else
raw_spin_unlock_irq(&rq->lock);

- post_schedule(rq);
+ balance_callback(rq);

sched_preempt_enable_no_resched();
if (need_resched())
@@ -6516,7 +6528,7 @@ void __init sched_init(void)
rq->sd = NULL;
rq->rd = NULL;
rq->cpu_power = SCHED_POWER_SCALE;
- rq->post_schedule = 0;
+ rq->balance_callback = NULL;
rq->active_balance = 0;
rq->next_balance = jiffies;
rq->push_cpu = 0;
diff --git a/kernel/sched/rt.c b/kernel/sched/rt.c
index e849d4070c7f..25f6d7ae589d 100644
--- a/kernel/sched/rt.c
+++ b/kernel/sched/rt.c
@@ -315,6 +315,18 @@ static inline int has_pushable_tasks(struct rq *rq)
return !plist_head_empty(&rq->rt.pushable_tasks);
}

+static DEFINE_PER_CPU(struct callback_head, rt_balance_head);
+
+static void push_rt_tasks(struct rq *);
+
+static inline void queue_push_tasks(struct rq *rq)
+{
+ if (!has_pushable_tasks(rq))
+ return;
+
+ queue_balance_callback(rq, &per_cpu(rt_balance_head, rq->cpu), push_rt_tasks);
+}
+
static void enqueue_pushable_task(struct rq *rq, struct task_struct *p)
{
plist_del(&p->pushable_tasks, &rq->rt.pushable_tasks);
@@ -359,6 +371,9 @@ void dec_rt_migration(struct sched_rt_entity *rt_se, struct rt_rq *rt_rq)
{
}

+static inline void queue_push_tasks(struct rq *rq)
+{
+}
#endif /* CONFIG_SMP */

static inline int on_rt_rq(struct sched_rt_entity *rt_se)
@@ -1344,11 +1359,7 @@ static struct task_struct *pick_next_task_rt(struct rq *rq)
dequeue_pushable_task(rq, p);

#ifdef CONFIG_SMP
- /*
- * We detect this state here so that we can avoid taking the RQ
- * lock again later if there is no need to push
- */
- rq->post_schedule = has_pushable_tasks(rq);
+ queue_push_tasks(rq);
#endif

return p;
@@ -1726,11 +1737,6 @@ static void pre_schedule_rt(struct rq *rq, struct task_struct *prev)
pull_rt_task(rq);
}

-static void post_schedule_rt(struct rq *rq)
-{
- push_rt_tasks(rq);
-}
-
/*
* If we are not running and we are not going to reschedule soon, we should
* try to push tasks away now
@@ -2003,7 +2009,6 @@ const struct sched_class rt_sched_class = {
.rq_online = rq_online_rt,
.rq_offline = rq_offline_rt,
.pre_schedule = pre_schedule_rt,
- .post_schedule = post_schedule_rt,
.task_woken = task_woken_rt,
.switched_from = switched_from_rt,
#endif
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 1a1cdc3783ed..e09e3e0466f7 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -460,9 +460,10 @@ struct rq {

unsigned long cpu_power;

+ struct callback_head *balance_callback;
+
unsigned char idle_balance;
/* For active balancing */
- int post_schedule;
int active_balance;
int push_cpu;
struct cpu_stop_work active_balance_work;
@@ -554,6 +555,21 @@ static inline u64 rq_clock_task(struct rq *rq)

#ifdef CONFIG_SMP

+static inline void
+queue_balance_callback(struct rq *rq,
+ struct callback_head *head,
+ void (*func)(struct rq *rq))
+{
+ lockdep_assert_held(&rq->lock);
+
+ if (unlikely(head->next))
+ return;
+
+ head->func = (void (*)(struct callback_head *))func;
+ head->next = rq->balance_callback;
+ rq->balance_callback = head;
+}
+
#define rcu_dereference_check_sched_domain(p) \
rcu_dereference_check((p), \
lockdep_is_held(&sched_domains_mutex))
@@ -981,7 +997,6 @@ struct sched_class {
void (*migrate_task_rq)(struct task_struct *p, int next_cpu);

void (*pre_schedule) (struct rq *this_rq, struct task_struct *task);
- void (*post_schedule) (struct rq *this_rq);
void (*task_waking) (struct task_struct *task);
void (*task_woken) (struct rq *this_rq, struct task_struct *task);

--
2.7.2