[PATCH RFC 1/3] sched/fair: call __refill_cfs_bandwidth_runtime only for finite quota

From: Konstantin Khlebnikov
Date: Mon May 16 2016 - 05:36:42 EST


Both call sites (tg_set_cfs_bandwidth and do_sched_cfs_period_timer)
already have checks for infinite quota.

Signed-off-by: Konstantin Khlebnikov <khlebnikov@xxxxxxxxxxxxxx>
---
kernel/sched/core.c | 5 +++--
kernel/sched/fair.c | 6 +-----
2 files changed, 4 insertions(+), 7 deletions(-)

diff --git a/kernel/sched/core.c b/kernel/sched/core.c
index d1f7149f8704..355698188ea9 100644
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -8196,10 +8196,11 @@ static int tg_set_cfs_bandwidth(struct task_group *tg, u64 period, u64 quota)
cfs_b->period = ns_to_ktime(period);
cfs_b->quota = quota;

- __refill_cfs_bandwidth_runtime(cfs_b);
/* restart the period timer (if active) to handle new period expiry */
- if (runtime_enabled)
+ if (runtime_enabled) {
+ __refill_cfs_bandwidth_runtime(cfs_b);
start_cfs_bandwidth(cfs_b);
+ }
raw_spin_unlock_irq(&cfs_b->lock);

for_each_online_cpu(i) {
diff --git a/kernel/sched/fair.c b/kernel/sched/fair.c
index e7dd0ec169be..d26b631a9a1d 100644
--- a/kernel/sched/fair.c
+++ b/kernel/sched/fair.c
@@ -3559,12 +3559,8 @@ static inline u64 sched_cfs_bandwidth_slice(void)
*/
void __refill_cfs_bandwidth_runtime(struct cfs_bandwidth *cfs_b)
{
- u64 now;
+ u64 now = sched_clock_cpu(smp_processor_id());

- if (cfs_b->quota == RUNTIME_INF)
- return;
-
- now = sched_clock_cpu(smp_processor_id());
cfs_b->runtime = cfs_b->quota;
cfs_b->runtime_expires = now + ktime_to_ns(cfs_b->period);
}