[RFC 20/60] sched: Let {en,de}queue_entity_fair() work with a varying amount of tasks

From: Jan H. SchÃnherr
Date: Fri Sep 07 2018 - 17:42:02 EST


Make the task delta handled by enqueue_entity_fair() and dequeue_task_fair()
variable as required by unthrottle_cfs_rq() and throttle_cfs_rq().

Signed-off-by: Jan H. SchÃnherr <jschoenh@xxxxxxxxx>
---
kernel/sched/fair.c | 18 ++++++++++--------
kernel/sched/sched.h | 6 ++++--
2 files changed, 14 insertions(+), 10 deletions(-)

diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index a96328c5a864..f13fb4460b66 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -4979,7 +4979,8 @@ static inline void hrtick_update(struct rq *rq)
}
#endif

-bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
+bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
+ unsigned int task_delta)
{
struct cfs_rq *cfs_rq;

@@ -4997,14 +4998,14 @@ bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
*/
if (cfs_rq_throttled(cfs_rq))
break;
- cfs_rq->h_nr_running++;
+ cfs_rq->h_nr_running += task_delta;

flags = ENQUEUE_WAKEUP;
}

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- cfs_rq->h_nr_running++;
+ cfs_rq->h_nr_running += task_delta;

if (cfs_rq_throttled(cfs_rq))
break;
@@ -5042,7 +5043,7 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)
if (p->in_iowait)
cpufreq_update_util(rq, SCHED_CPUFREQ_IOWAIT);

- throttled = enqueue_entity_fair(rq, &p->se, flags);
+ throttled = enqueue_entity_fair(rq, &p->se, flags, 1);

if (!throttled)
add_nr_running(rq, 1);
@@ -5052,7 +5053,8 @@ enqueue_task_fair(struct rq *rq, struct task_struct *p, int flags)

static void set_next_buddy(struct sched_entity *se);

-bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
+bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
+ unsigned int task_delta)
{
struct cfs_rq *cfs_rq;
int task_sleep = flags & DEQUEUE_SLEEP;
@@ -5069,7 +5071,7 @@ bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
*/
if (cfs_rq_throttled(cfs_rq))
break;
- cfs_rq->h_nr_running--;
+ cfs_rq->h_nr_running -= task_delta;

/* Don't dequeue parent if it has other entities besides us */
if (cfs_rq->load.weight) {
@@ -5088,7 +5090,7 @@ bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)

for_each_sched_entity(se) {
cfs_rq = cfs_rq_of(se);
- cfs_rq->h_nr_running--;
+ cfs_rq->h_nr_running -= task_delta;

if (cfs_rq_throttled(cfs_rq))
break;
@@ -5107,7 +5109,7 @@ bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags)
*/
static void dequeue_task_fair(struct rq *rq, struct task_struct *p, int flags)
{
- bool throttled = dequeue_entity_fair(rq, &p->se, flags);
+ bool throttled = dequeue_entity_fair(rq, &p->se, flags, 1);

if (!throttled)
sub_nr_running(rq, 1);
diff --git a/kernel/sched/sched.h b/kernel/sched/sched.h
index 9016049f36c3..569a487ed07c 100644
--- a/kernel/sched/sched.h
+++ b/kernel/sched/sched.h
@@ -1543,8 +1543,10 @@ extern const u32 sched_prio_to_wmult[40];

#define RETRY_TASK ((void *)-1UL)

-bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags);
-bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags);
+bool enqueue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
+ unsigned int task_delta);
+bool dequeue_entity_fair(struct rq *rq, struct sched_entity *se, int flags,
+ unsigned int task_delta);

struct sched_class {
const struct sched_class *next;
--
2.9.3.1.gcba166c.dirty