[PATCH v3 04/10] sched/fair: Dynamically update cfs_overload_cpus

From: Steve Sistare
Date: Fri Nov 09 2018 - 08:00:46 EST


An overloaded CPU has more than 1 runnable task. When a CFS task wakes
on a CPU, if h_nr_running transitions from 1 to more, then set the CPU in
the cfs_overload_cpus bitmap. When a CFS task sleeps, if h_nr_running
transitions from 2 to less, then clear the CPU in cfs_overload_cpus.

Signed-off-by: Steve Sistare <steven.sistare@xxxxxxxxxx>
---
kernel/sched/fair.c | 52 ++++++++++++++++++++++++++++++++++++++++++++++++----
1 file changed, 48 insertions(+), 4 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index ee271bb..9031d39 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -23,6 +23,7 @@
#include "sched.h"

#include <trace/events/sched.h>
+#include <linux/sparsemask.h>

/*
* Targeted preemption latency for CPU-bound tasks:
@@ -3724,6 +3725,28 @@ static inline void update_misfit_status(struct task_struct *p, struct rq *rq)
rq->misfit_task_load = task_h_load(p);
}

+static void overload_clear(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_clear_elem(rq->cpu, overload_cpus);
+ rcu_read_unlock();
+}
+
+static void overload_set(struct rq *rq)
+{
+ struct sparsemask *overload_cpus;
+
+ rcu_read_lock();
+ overload_cpus = rcu_dereference(rq->cfs_overload_cpus);
+ if (overload_cpus)
+ sparsemask_set_elem(rq->cpu, overload_cpus);
+ rcu_read_unlock();
+}
+
#else /* CONFIG_SMP */

#define UPDATE_TG 0x0
@@ -3747,6 +3770,9 @@ static inline int idle_balance(struct rq *rq, struct rq_flags *rf)
return 0;
}

+static inline void overload_clear(struct rq *rq) {}
+static inline void overload_set(struct rq *rq) {}
+
static inline void
util_est_enqueue(struct cfs_rq *cfs_rq, struct task_struct *p) {}

@@ -4441,6 +4467,7 @@ static int tg_throttle_down(struct task_group *tg, void *data)
static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
+ unsigned int prev_nr = rq->cfs.h_nr_running;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
long task_delta, dequeue = 1;
@@ -4468,8 +4495,12 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
dequeue = 0;
}

- if (!se)
+ if (!se) {
sub_nr_running(rq, task_delta);
+ if (prev_nr >= 2 && prev_nr - task_delta < 2)
+ overload_clear(rq);
+
+ }

cfs_rq->throttled = 1;
cfs_rq->throttled_clock = rq_clock(rq);
@@ -4499,6 +4530,7 @@ static void throttle_cfs_rq(struct cfs_rq *cfs_rq)
void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
{
struct rq *rq = rq_of(cfs_rq);
+ unsigned int prev_nr = rq->cfs.h_nr_running;
struct cfs_bandwidth *cfs_b = tg_cfs_bandwidth(cfs_rq->tg);
struct sched_entity *se;
int enqueue = 1;
@@ -4535,8 +4567,11 @@ void unthrottle_cfs_rq(struct cfs_rq *cfs_rq)
break;
}

- if (!se)
+ if (!se) {
add_nr_running(rq, task_delta);
+ if (prev_nr < 2 && prev_nr + task_delta >= 2)
+ overload_set(rq);
+ }

/* Determine whether we need to wake up potentially idle CPU: */
if (rq->curr == rq->idle && rq->cfs.nr_running)
@@ -5082,6 +5117,7 @@ static inline void hrtick_update(struct rq *rq)
{
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
+ unsigned int prev_nr = rq->cfs.h_nr_running;

/*
* The code below (indirectly) updates schedutil which looks at
@@ -5129,8 +5165,12 @@ static inline void hrtick_update(struct rq *rq)
update_cfs_group(se);
}

- if (!se)
+ if (!se) {
add_nr_running(rq, 1);
+ if (prev_nr == 1)
+ overload_set(rq);
+
+ }

hrtick_update(rq);
}
@@ -5147,6 +5187,7 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
struct cfs_rq *cfs_rq;
struct sched_entity *se = &p->se;
int task_sleep = flags & DEQUEUE_SLEEP;
+ unsigned int prev_nr = rq->cfs.h_nr_running;

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
@@ -5188,8 +5229,11 @@ static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
update_cfs_group(se);
}

- if (!se)
+ if (!se) {
sub_nr_running(rq, 1);
+ if (prev_nr == 2)
+ overload_clear(rq);
+ }

util_est_dequeue(&rq->cfs, p, task_sleep);
hrtick_update(rq);
--
1.8.3.1