[GIT PULL] scheduler fixes

From: Ingo Molnar
Date: Wed Sep 13 2017 - 13:57:54 EST


Linus,

Please pull the latest sched-urgent-for-linus git tree from:

git://git.kernel.org/pub/scm/linux/kernel/git/tip/tip.git sched-urgent-for-linus

# HEAD: 9469eb01db891b55367ee7539f1b9f7f6fd2819d sched/debug: Add debugfs knob for "sched_debug"

Three CPU hotplug related fixes and a debugging improvement.

Thanks,

Ingo

------------------>
Peter Zijlstra (4):
sched/fair: Avoid newidle balance for !active CPUs
sched/fair: Plug hole between hotplug and active_load_balance()
sched/core: WARN() when migrating to an offline CPU
sched/debug: Add debugfs knob for "sched_debug"


kernel/sched/core.c | 4 ++++
kernel/sched/debug.c | 5 +++++
kernel/sched/fair.c | 13 +++++++++++++
kernel/sched/sched.h | 2 ++
kernel/sched/topology.c | 4 +---
5 files changed, 25 insertions(+), 3 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index 136a76d80dbf..18a6966567da 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -1173,6 +1173,10 @@ void set_task_cpu(struct task_struct *p, unsigned int new_cpu)
WARN_ON_ONCE(debug_locks && !(lockdep_is_held(&p->pi_lock) ||
lockdep_is_held(&task_rq(p)->lock)));
#endif
+ /*
+ * Clearly, migrating tasks to offline CPUs is a fairly daft thing.
+ */
+ WARN_ON_ONCE(!cpu_online(new_cpu));
#endif

trace_sched_migrate_task(p, new_cpu);
diff --git a/kernel/sched/debug.c b/kernel/sched/debug.c
index 4a23bbc3111b..b19d06ea6e10 100644
--- a/kernel/sched/debug.c
+++ b/kernel/sched/debug.c
@@ -181,11 +181,16 @@ static const struct file_operations sched_feat_fops = {
.release = single_release,
};

+__read_mostly bool sched_debug_enabled;
+
static __init int sched_init_debug(void)
{
debugfs_create_file("sched_features", 0644, NULL, NULL,
&sched_feat_fops);

+ debugfs_create_bool("sched_debug", 0644, NULL,
+ &sched_debug_enabled);
+
return 0;
}
late_initcall(sched_init_debug);
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index 8415d1ec2b84..efeebed935ae 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -8448,6 +8448,12 @@ static int idle_balance(struct rq *this_rq, struct rq_flags *rf)
this_rq->idle_stamp = rq_clock(this_rq);

/*
+ * Do not pull tasks towards !active CPUs...
+ */
+ if (!cpu_active(this_cpu))
+ return 0;
+
+ /*
* This is OK, because current is on_cpu, which avoids it being picked
* for load-balance and preemption/IRQs are still disabled avoiding
* further scheduler activity on it and we're being very careful to
@@ -8554,6 +8560,13 @@ static int active_load_balance_cpu_stop(void *data)
struct rq_flags rf;

rq_lock_irq(busiest_rq, &rf);
+ /*
+ * Between queueing the stop-work and running it is a hole in which
+ * CPUs can become inactive. We should not move tasks from or to
+ * inactive CPUs.
+ */
+ if (!cpu_active(busiest_cpu) || !cpu_active(target_cpu))
+ goto out_unlock;

/* make sure the requested cpu hasn't gone down in the meantime */
if (unlikely(busiest_cpu != smp_processor_id() ||
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index ab1c7f5409a0..7ea2a0339771 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1954,6 +1954,8 @@ extern struct sched_entity *__pick_first_entity(struct cfs_rq *cfs_rq);
extern struct sched_entity *__pick_last_entity(struct cfs_rq *cfs_rq);

#ifdef CONFIG_SCHED_DEBUG
+extern bool sched_debug_enabled;
+
extern void print_cfs_stats(struct seq_file *m, int cpu);
extern void print_rt_stats(struct seq_file *m, int cpu);
extern void print_dl_stats(struct seq_file *m, int cpu);
diff --git a/kernel/sched/topology.c b/kernel/sched/topology.c
index 6f7b43982f73..2ab2aa68c796 100644
--- a/kernel/sched/topology.c
+++ b/kernel/sched/topology.c
@@ -14,11 +14,9 @@ cpumask_var_t sched_domains_tmpmask2;

#ifdef CONFIG_SCHED_DEBUG

-static __read_mostly int sched_debug_enabled;
-
static int __init sched_debug_setup(char *str)
{
- sched_debug_enabled = 1;
+ sched_debug_enabled = true;

return 0;
}