[PATCH 14/16] sched: add bandwidth management for sched_dl.

From: Juri Lelli
Date: Fri Apr 06 2012 - 03:16:12 EST


From: Dario Faggioli <raistlin@xxxxxxxx>

In order of -deadline scheduling to be effective and useful, it is
important that some method of having the allocation of the available
CPU bandwidth to tasks and task groups under control.
This is usually called "admission control" and if it is not performed
at all, no guarantee can be given on the actual scheduling of the
-deadline tasks.

Since when RT-throttling has been introduced each task group have a
bandwidth associated to itself, calculated as a certain amount of
runtime over a period. Moreover, to make it possible to manipulate
such bandwidth, readable/writable controls have been added to both
procfs (for system wide settings) and cgroupfs (for per-group
settings).
Therefore, the same interface is being used for controlling the
bandwidth distrubution to -deadline tasks and task groups, i.e.,
new controls but with similar names, equivalent meaning and with
the same usage paradigm are added.

However, more discussion is needed in order to figure out how
we want to manage SCHED_DEADLINE bandwidth at the task group level.
Therefore, this patch adds a less sophisticated, but actually
very sensible, mechanism to ensure that a certain utilization
cap is not overcome per each root_domain (the single rq for !SMP
configurations).

Another main difference between deadline bandwidth management and
RT-throttling is that -deadline tasks have bandwidth on their own
(while -rt ones doesn't!), and thus we don't need an higher level
throttling mechanism to enforce the desired bandwidth.

This patch, therefore:
- adds system wide deadline bandwidth management by means of:
* /proc/sys/kernel/sched_dl_runtime_us,
* /proc/sys/kernel/sched_dl_period_us,
that determine (i.e., runtime / period) the total bandwidth
available on each CPU of each root_domain for -deadline tasks;
- couples the RT and deadline bandwidth management, i.e., enforces
that the sum of how much bandwidth is being devoted to -rt
-deadline tasks to stay below 100%.

This means that, for a root_domain comprising M CPUs, -deadline tasks
can be created until the sum of their bandwidths stay below:

M * (sched_dl_runtime_us / sched_dl_period_us)

It is also possible to disable this bandwidth management logic, and
be thus free of oversubscribing the system up to any arbitrary level.

Signed-off-by: Dario Faggioli <raistlin@xxxxxxxx>
Signed-off-by: Juri Lelli <juri.lelli@xxxxxxxxx>
---
include/linux/sched.h | 8 +
kernel/sched.c | 497 ++++++++++++++++++++++++++++++++++++++++++++++---
kernel/sched_dl.c | 18 ++-
kernel/sysctl.c | 14 ++
4 files changed, 512 insertions(+), 25 deletions(-)

diff --git a/include/linux/sched.h b/include/linux/sched.h
index ca45db4..534f099 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -1289,6 +1289,7 @@ struct sched_dl_entity {
u64 dl_runtime; /* maximum runtime for each instance */
u64 dl_deadline; /* relative deadline of each instance */
u64 dl_period; /* separation of two instances (period) */
+ u64 dl_bw; /* dl_runtime / dl_deadline */

/*
* Actual scheduling parameters. Initialized with the values above,
@@ -2217,6 +2218,13 @@ int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos);

+extern unsigned int sysctl_sched_dl_period;
+extern int sysctl_sched_dl_runtime;
+
+int sched_dl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos);
+
#ifdef CONFIG_SCHED_AUTOGROUP
extern unsigned int sysctl_sched_autogroup_enabled;

diff --git a/kernel/sched.c b/kernel/sched.c
index 3963e4e..d5403c8 100644
--- a/kernel/sched.c
+++ b/kernel/sched.c
@@ -158,6 +158,22 @@ struct rt_prio_array {
struct list_head queue[MAX_RT_PRIO];
};

+static unsigned long to_ratio(u64 period, u64 runtime)
+{
+ if (runtime == RUNTIME_INF)
+ return 1ULL << 20;
+
+ /*
+ * Doing this here saves a lot of checks in all
+ * the calling paths, and returning zero seems
+ * safe for them anyway.
+ */
+ if (period == 0)
+ return 0;
+
+ return div64_u64(runtime << 20, period);
+}
+
struct rt_bandwidth {
/* nests inside the rq lock: */
raw_spinlock_t rt_runtime_lock;
@@ -251,6 +267,74 @@ static void destroy_rt_bandwidth(struct rt_bandwidth *rt_b)
#endif

/*
+ * To keep the bandwidth of -deadline tasks and groups under control
+ * we need some place where:
+ * - store the maximum -deadline bandwidth of the system (the group);
+ * - cache the fraction of that bandwidth that is currently allocated.
+ *
+ * This is all done in the data structure below. It is similar to the
+ * one used for RT-throttling (rt_bandwidth), with the main difference
+ * that, since here we are only interested in admission control, we
+ * do not decrease any runtime while the group "executes", neither we
+ * need a timer to replenish it.
+ *
+ * With respect to SMP, the bandwidth is given on a per-CPU basis,
+ * meaning that:
+ * - dl_bw (< 100%) is the bandwidth of the system (group) on each CPU;
+ * - dl_total_bw array contains, in the i-eth element, the currently
+ * allocated bandwidth on the i-eth CPU.
+ * Moreover, groups consume bandwidth on each CPU, while tasks only
+ * consume bandwidth on the CPU they're running on.
+ * Finally, dl_total_bw_cpu is used to cache the index of dl_total_bw
+ * that will be shown the next time the proc or cgroup controls will
+ * be red. It on its turn can be changed by writing on its own
+ * control.
+ */
+struct dl_bandwidth {
+ raw_spinlock_t dl_runtime_lock;
+ u64 dl_runtime;
+ u64 dl_period;
+};
+
+static struct dl_bandwidth def_dl_bandwidth;
+
+static
+void init_dl_bandwidth(struct dl_bandwidth *dl_b, u64 period, u64 runtime)
+{
+ raw_spin_lock_init(&dl_b->dl_runtime_lock);
+ dl_b->dl_period = period;
+ dl_b->dl_runtime = runtime;
+}
+
+static inline int dl_bandwidth_enabled(void)
+{
+ return sysctl_sched_dl_runtime >= 0;
+}
+
+/*
+ *
+ */
+struct dl_bw {
+ raw_spinlock_t lock;
+ u64 bw, total_bw;
+};
+
+static inline u64 global_dl_period(void);
+static inline u64 global_dl_runtime(void);
+
+static void init_dl_bw(struct dl_bw *dl_b)
+{
+ raw_spin_lock_init(&dl_b->lock);
+ raw_spin_lock(&def_dl_bandwidth.dl_runtime_lock);
+ if (global_dl_runtime() == RUNTIME_INF)
+ dl_b->bw = -1;
+ else
+ dl_b->bw = to_ratio(global_dl_period(), global_dl_runtime());
+ raw_spin_unlock(&def_dl_bandwidth.dl_runtime_lock);
+ dl_b->total_bw = 0;
+}
+
+/*
* sched_domains_mutex serializes calls to init_sched_domains,
* detach_destroy_domains and partition_sched_domains.
*/
@@ -594,6 +678,8 @@ struct dl_rq {
*/
struct rb_root pushable_dl_tasks_root;
struct rb_node *pushable_dl_tasks_leftmost;
+#else
+ struct dl_bw dl_bw;
#endif
};

@@ -620,6 +706,7 @@ struct root_domain {
*/
cpumask_var_t dlo_mask;
atomic_t dlo_count;
+ struct dl_bw dl_bw;

/*
* The "RT overload" flag: it gets set if a CPU has more than
@@ -1039,6 +1126,28 @@ static inline u64 global_rt_runtime(void)
return (u64)sysctl_sched_rt_runtime * NSEC_PER_USEC;
}

+/*
+ * Maximum bandwidth available for all -deadline tasks and groups
+ * (if group scheduling is configured) on each CPU.
+ *
+ * default: 5%
+ */
+unsigned int sysctl_sched_dl_period = 1000000;
+int sysctl_sched_dl_runtime = 50000;
+
+static inline u64 global_dl_period(void)
+{
+ return (u64)sysctl_sched_dl_period * NSEC_PER_USEC;
+}
+
+static inline u64 global_dl_runtime(void)
+{
+ if (sysctl_sched_dl_runtime < 0)
+ return RUNTIME_INF;
+
+ return (u64)sysctl_sched_dl_runtime * NSEC_PER_USEC;
+}
+
#ifndef prepare_arch_switch
# define prepare_arch_switch(next) do { } while (0)
#endif
@@ -3121,6 +3230,78 @@ int sched_fork(struct task_struct *p)
return 0;
}

+static inline
+void __dl_clear(struct dl_bw *dl_b, u64 tsk_bw)
+{
+ dl_b->total_bw -= tsk_bw;
+}
+
+static inline
+void __dl_add(struct dl_bw *dl_b, u64 tsk_bw)
+{
+ dl_b->total_bw += tsk_bw;
+}
+
+static inline
+bool __dl_overflow(struct dl_bw *dl_b, int cpus, u64 old_bw, u64 new_bw)
+{
+ return dl_b->bw != -1 &&
+ dl_b->bw * cpus < dl_b->total_bw - old_bw + new_bw;
+}
+
+/*
+ * We must be sure that accepting a new task (or allowing changing the
+ * parameters of an existing one) is consistent with the bandwidth
+ * constraints. If yes, this function also accordingly updates the currently
+ * allocated bandwidth to reflect the new situation.
+ *
+ * This function is called while holding p's rq->lock.
+ */
+static int dl_overflow(struct task_struct *p, int policy,
+ const struct sched_param2 *param2)
+{
+#ifdef CONFIG_SMP
+ struct dl_bw *dl_b = &task_rq(p)->rd->dl_bw;
+#else
+ struct dl_bw *dl_b = &task_rq(p)->dl.dl_bw;
+#endif
+ u64 period = param2->sched_period;
+ u64 runtime = param2->sched_runtime;
+ u64 new_bw = dl_policy(policy) ? to_ratio(period, runtime) : 0;
+#ifdef CONFIG_SMP
+ int cpus = cpumask_weight(task_rq(p)->rd->span);
+#else
+ int cpus = 1;
+#endif
+ int err = -1;
+
+ if (new_bw == p->dl.dl_bw)
+ return 0;
+
+ /*
+ * Either if a task, enters, leave, or stays -deadline but changes
+ * its parameters, we may need to update accordingly the total
+ * allocated bandwidth of the container.
+ */
+ raw_spin_lock(&dl_b->lock);
+ if (dl_policy(policy) && !task_has_dl_policy(p) &&
+ !__dl_overflow(dl_b, cpus, 0, new_bw)) {
+ __dl_add(dl_b, new_bw);
+ err = 0;
+ } else if (dl_policy(policy) && task_has_dl_policy(p) &&
+ !__dl_overflow(dl_b, cpus, p->dl.dl_bw, new_bw)) {
+ __dl_clear(dl_b, p->dl.dl_bw);
+ __dl_add(dl_b, new_bw);
+ err = 0;
+ } else if (!dl_policy(policy) && task_has_dl_policy(p)) {
+ __dl_clear(dl_b, p->dl.dl_bw);
+ err = 0;
+ }
+ raw_spin_unlock(&dl_b->lock);
+
+ return err;
+}
+
/*
* wake_up_new_task - wake up a newly created task for the first time.
*
@@ -5521,6 +5702,7 @@ __setparam_dl(struct task_struct *p, const struct sched_param2 *param2)
dl_se->dl_period = param2->sched_period;
else
dl_se->dl_period = dl_se->dl_deadline;
+ dl_se->dl_bw = to_ratio(dl_se->dl_period, dl_se->dl_runtime);
dl_se->flags = param2->sched_flags;
dl_se->dl_throttled = 0;
dl_se->dl_new = 1;
@@ -5708,8 +5890,8 @@ recheck:
return 0;
}

-#ifdef CONFIG_RT_GROUP_SCHED
if (user) {
+#ifdef CONFIG_RT_GROUP_SCHED
/*
* Do not allow realtime tasks into groups that have no runtime
* assigned.
@@ -5720,8 +5902,25 @@ recheck:
task_rq_unlock(rq, p, &flags);
return -EPERM;
}
- }
#endif
+#ifdef CONFIG_SMP
+ if (dl_bandwidth_enabled() && dl_policy(policy)) {
+ const struct cpumask *span = rq->rd->span;
+
+ /*
+ * Don't allow tasks with an affinity mask smaller than
+ * the entire root_domain to become SCHED_DEADLINE. We
+ * will also fail if there's no bandwidth available.
+ */
+ if (!cpumask_equal(&p->cpus_allowed, span) ||
+ rq->rd->dl_bw.bw == 0) {
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return -EPERM;
+ }
+ }
+#endif
+ }

/* recheck policy now with rq lock held */
if (unlikely(oldpolicy != -1 && oldpolicy != p->policy)) {
@@ -5729,6 +5928,19 @@ recheck:
task_rq_unlock(rq, p, &flags);
goto recheck;
}
+
+ /*
+ * If setscheduling to SCHED_DEADLINE (or changing the parameters
+ * of a SCHED_DEADLINE task) we need to check if enough bandwidth
+ * is available.
+ */
+ if ((dl_policy(policy) || dl_task(p)) &&
+ dl_overflow(p, policy, param)) {
+ __task_rq_unlock(rq);
+ raw_spin_unlock_irqrestore(&p->pi_lock, flags);
+ return -EBUSY;
+ }
+
on_rq = p->on_rq;
running = task_current(rq, p);
if (on_rq)
@@ -6048,6 +6260,24 @@ long sched_setaffinity(pid_t pid, const struct cpumask *in_mask)
if (retval)
goto out_unlock;

+ /*
+ * Since bandwidth control happens on root_domain basis,
+ * if admission test is enabled, we only admit -deadline
+ * tasks allowed to run on all the CPUs in the task's
+ * root_domain.
+ */
+#ifdef CONFIG_SMP
+ if (task_has_dl_policy(p)) {
+ const struct cpumask *span = task_rq(p)->rd->span;
+
+ if (dl_bandwidth_enabled() &&
+ !cpumask_equal(in_mask, span)) {
+ retval = -EBUSY;
+ goto out_unlock;
+ }
+ }
+#endif
+
cpuset_cpus_allowed(p, cpus_allowed);
cpumask_and(new_mask, in_mask, cpus_allowed);
again:
@@ -6728,6 +6958,42 @@ out:
EXPORT_SYMBOL_GPL(set_cpus_allowed_ptr);

/*
+ * When dealing with a -deadline task, we have to check if moving it to
+ * a new CPU is possible or not. In fact, this is only true iff there
+ * is enough bandwidth available on such CPU, otherwise we want the
+ * whole migration progedure to fail over.
+ */
+static inline
+bool set_task_cpu_dl(struct task_struct *p, unsigned int cpu)
+{
+ struct dl_bw *dl_b = &task_rq(p)->rd->dl_bw;
+ struct dl_bw *cpu_b = &cpu_rq(cpu)->rd->dl_bw;
+ int ret = 1;
+ u64 bw;
+
+ if (dl_b == cpu_b)
+ return 1;
+
+ raw_spin_lock(&dl_b->lock);
+ raw_spin_lock(&cpu_b->lock);
+
+ bw = cpu_b->bw * cpumask_weight(cpu_rq(cpu)->rd->span);
+ if (dl_bandwidth_enabled() &&
+ bw < cpu_b->total_bw + p->dl.dl_bw) {
+ ret = 0;
+ goto unlock;
+ }
+ dl_b->total_bw -= p->dl.dl_bw;
+ cpu_b->total_bw += p->dl.dl_bw;
+
+unlock:
+ raw_spin_unlock(&cpu_b->lock);
+ raw_spin_unlock(&dl_b->lock);
+
+ return ret;
+}
+
+/*
* Move (not current) task off this cpu, onto dest cpu. We're doing
* this because either it can't run here any more (set_cpus_allowed()
* away from this CPU, or CPU going down), or because we're
@@ -6759,6 +7025,13 @@ static int __migrate_task(struct task_struct *p, int src_cpu, int dest_cpu)
goto fail;

/*
+ * If p is -deadline, proceed only if there is enough
+ * bandwidth available on dest_cpu
+ */
+ if (unlikely(dl_task(p)) && !set_task_cpu_dl(p, dest_cpu))
+ goto fail;
+
+ /*
* If we're not on a rq, the next wake-up will ensure we're
* placed properly.
*/
@@ -7467,6 +7740,8 @@ static int init_rootdomain(struct root_domain *rd)
if (!alloc_cpumask_var(&rd->rto_mask, GFP_KERNEL))
goto free_dlo_mask;

+ init_dl_bw(&rd->dl_bw);
+
if (cpupri_init(&rd->cpupri) != 0)
goto free_rto_mask;
return 0;
@@ -8645,6 +8920,8 @@ static void init_dl_rq(struct dl_rq *dl_rq, struct rq *rq)
dl_rq->dl_nr_migratory = 0;
dl_rq->overloaded = 0;
dl_rq->pushable_dl_tasks_root = RB_ROOT;
+#else
+ init_dl_bw(&dl_rq->dl_bw);
#endif
}

@@ -8757,6 +9034,8 @@ void __init sched_init(void)

init_rt_bandwidth(&def_rt_bandwidth,
global_rt_period(), global_rt_runtime());
+ init_dl_bandwidth(&def_dl_bandwidth,
+ global_dl_period(), global_dl_runtime());

#ifdef CONFIG_RT_GROUP_SCHED
init_rt_bandwidth(&root_task_group.rt_bandwidth,
@@ -9345,16 +9624,6 @@ unsigned long sched_group_shares(struct task_group *tg)
}
#endif

-#if defined(CONFIG_RT_GROUP_SCHED) || defined(CONFIG_CFS_BANDWIDTH)
-static unsigned long to_ratio(u64 period, u64 runtime)
-{
- if (runtime == RUNTIME_INF)
- return 1ULL << 20;
-
- return div64_u64(runtime << 20, period);
-}
-#endif
-
#ifdef CONFIG_RT_GROUP_SCHED
/*
* Ensure that the real time constraints are schedulable.
@@ -9528,10 +9797,48 @@ long sched_group_rt_period(struct task_group *tg)
do_div(rt_period_us, NSEC_PER_USEC);
return rt_period_us;
}
+#endif /* CONFIG_RT_GROUP_SCHED */

+/*
+ * Coupling of -rt and -deadline bandwidth.
+ *
+ * Here we check if the new -rt bandwidth value is consistent
+ * with the system settings for the bandwidth available
+ * to -deadline tasks.
+ *
+ * IOW, we want to enforce that
+ *
+ * rt_bandwidth + dl_bandwidth <= 100%
+ *
+ * is always true.
+ */
+static bool __sched_rt_dl_global_constraints(u64 rt_bw)
+{
+ unsigned long flags;
+ u64 dl_bw;
+ bool ret;
+
+ raw_spin_lock_irqsave(&def_dl_bandwidth.dl_runtime_lock, flags);
+ if (global_rt_runtime() == RUNTIME_INF ||
+ global_dl_runtime() == RUNTIME_INF) {
+ ret = true;
+ goto unlock;
+ }
+
+ dl_bw = to_ratio(def_dl_bandwidth.dl_period,
+ def_dl_bandwidth.dl_runtime);
+
+ ret = rt_bw + dl_bw <= to_ratio(RUNTIME_INF, RUNTIME_INF);
+unlock:
+ raw_spin_unlock_irqrestore(&def_dl_bandwidth.dl_runtime_lock, flags);
+
+ return ret;
+}
+
+#ifdef CONFIG_RT_GROUP_SCHED
static int sched_rt_global_constraints(void)
{
- u64 runtime, period;
+ u64 runtime, period, bw;
int ret = 0;

if (sysctl_sched_rt_period <= 0)
@@ -9546,6 +9853,10 @@ static int sched_rt_global_constraints(void)
if (runtime > period && runtime != RUNTIME_INF)
return -EINVAL;

+ bw = to_ratio(period, runtime);
+ if (!__sched_rt_dl_global_constraints(bw))
+ return -EINVAL;
+
mutex_lock(&rt_constraints_mutex);
read_lock(&tasklist_lock);
ret = __rt_schedulable(NULL, 0, 0);
@@ -9568,19 +9879,19 @@ int sched_rt_can_attach(struct task_group *tg, struct task_struct *tsk)
static int sched_rt_global_constraints(void)
{
unsigned long flags;
- int i;
+ int i, ret = 0;
+ u64 bw;

if (sysctl_sched_rt_period <= 0)
return -EINVAL;

- /*
- * There's always some RT tasks in the root group
- * -- migration, kstopmachine etc..
- */
- if (sysctl_sched_rt_runtime == 0)
- return -EBUSY;
-
raw_spin_lock_irqsave(&def_rt_bandwidth.rt_runtime_lock, flags);
+ bw = to_ratio(global_rt_period(), global_rt_runtime());
+ if (!__sched_rt_dl_global_constraints(bw)) {
+ ret = -EINVAL;
+ goto unlock;
+ }
+
for_each_possible_cpu(i) {
struct rt_rq *rt_rq = &cpu_rq(i)->rt;

@@ -9588,12 +9899,96 @@ static int sched_rt_global_constraints(void)
rt_rq->rt_runtime = global_rt_runtime();
raw_spin_unlock(&rt_rq->rt_runtime_lock);
}
+unlock:
raw_spin_unlock_irqrestore(&def_rt_bandwidth.rt_runtime_lock, flags);

- return 0;
+ return ret;
}
#endif /* CONFIG_RT_GROUP_SCHED */

+/*
+ * Coupling of -dl and -rt bandwidth.
+ *
+ * Here we check, while setting the system wide bandwidth available
+ * for -dl tasks and groups, if the new values are consistent with
+ * the system settings for the bandwidth available to -rt entities.
+ *
+ * IOW, we want to enforce that
+ *
+ * rt_bandwidth + dl_bandwidth <= 100%
+ *
+ * is always true.
+ */
+static bool __sched_dl_rt_global_constraints(u64 dl_bw)
+{
+ u64 rt_bw;
+ bool ret;
+
+ raw_spin_lock(&def_rt_bandwidth.rt_runtime_lock);
+ if (global_dl_runtime() == RUNTIME_INF ||
+ global_rt_runtime() == RUNTIME_INF) {
+ ret = true;
+ goto unlock;
+ }
+
+ rt_bw = to_ratio(ktime_to_ns(def_rt_bandwidth.rt_period),
+ def_rt_bandwidth.rt_runtime);
+
+ ret = rt_bw + dl_bw <= to_ratio(RUNTIME_INF, RUNTIME_INF);
+unlock:
+ raw_spin_unlock(&def_rt_bandwidth.rt_runtime_lock);
+
+ return ret;
+}
+
+static bool __sched_dl_global_constraints(u64 runtime, u64 period)
+{
+ if (!period || (runtime != RUNTIME_INF && runtime > period))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int sched_dl_global_constraints(void)
+{
+ u64 runtime = global_dl_runtime();
+ u64 period = global_dl_period();
+ u64 new_bw = to_ratio(period, runtime);
+ int ret, i;
+
+ ret = __sched_dl_global_constraints(runtime, period);
+ if (ret)
+ return ret;
+
+ if (!__sched_dl_rt_global_constraints(new_bw))
+ return -EINVAL;
+
+ /*
+ * Here we want to check the bandwidth not being set to some
+ * value smaller than the currently allocated bandwidth in
+ * any of the root_domains.
+ *
+ * FIXME: Cycling on all the CPUs is overdoing, but simpler than
+ * cycling on root_domains... Discussion on different/better
+ * solutions is welcome!
+ */
+ for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
+ struct dl_bw *dl_b = &cpu_rq(i)->rd->dl_bw;
+#else
+ struct dl_bw *dl_b = &cpu_rq(i)->dl.dl_bw;
+#endif
+ raw_spin_lock(&dl_b->lock);
+ if (new_bw < dl_b->total_bw) {
+ raw_spin_unlock(&dl_b->lock);
+ return -EBUSY;
+ }
+ raw_spin_unlock(&dl_b->lock);
+ }
+
+ return 0;
+}
+
int sched_rt_handler(struct ctl_table *table, int write,
void __user *buffer, size_t *lenp,
loff_t *ppos)
@@ -9624,6 +10019,64 @@ int sched_rt_handler(struct ctl_table *table, int write,
return ret;
}

+int sched_dl_handler(struct ctl_table *table, int write,
+ void __user *buffer, size_t *lenp,
+ loff_t *ppos)
+{
+ int ret;
+ int old_period, old_runtime;
+ static DEFINE_MUTEX(mutex);
+ unsigned long flags;
+
+ mutex_lock(&mutex);
+ old_period = sysctl_sched_dl_period;
+ old_runtime = sysctl_sched_dl_runtime;
+
+ ret = proc_dointvec(table, write, buffer, lenp, ppos);
+
+ if (!ret && write) {
+ raw_spin_lock_irqsave(&def_dl_bandwidth.dl_runtime_lock,
+ flags);
+
+ ret = sched_dl_global_constraints();
+ if (ret) {
+ sysctl_sched_dl_period = old_period;
+ sysctl_sched_dl_runtime = old_runtime;
+ } else {
+ u64 new_bw;
+ int i;
+
+ def_dl_bandwidth.dl_period = global_dl_period();
+ def_dl_bandwidth.dl_runtime = global_dl_runtime();
+ if (global_dl_runtime() == RUNTIME_INF)
+ new_bw = -1;
+ else
+ new_bw = to_ratio(global_dl_period(),
+ global_dl_runtime());
+ /*
+ * FIXME: As above...
+ */
+ for_each_possible_cpu(i) {
+#ifdef CONFIG_SMP
+ struct dl_bw *dl_b = &cpu_rq(i)->rd->dl_bw;
+#else
+ struct dl_bw *dl_b = &cpu_rq(i)->dl.dl_bw;
+#endif
+
+ raw_spin_lock(&dl_b->lock);
+ dl_b->bw = new_bw;
+ raw_spin_unlock(&dl_b->lock);
+ }
+ }
+
+ raw_spin_unlock_irqrestore(&def_dl_bandwidth.dl_runtime_lock,
+ flags);
+ }
+ mutex_unlock(&mutex);
+
+ return ret;
+}
+
#ifdef CONFIG_CGROUP_SCHED

/* return corresponding task_group object of a cgroup */
diff --git a/kernel/sched_dl.c b/kernel/sched_dl.c
index e393292..fee9434 100644
--- a/kernel/sched_dl.c
+++ b/kernel/sched_dl.c
@@ -991,6 +991,18 @@ static void task_fork_dl(struct task_struct *p)
static void task_dead_dl(struct task_struct *p)
{
struct hrtimer *timer = &p->dl.dl_timer;
+#ifdef CONFIG_SMP
+ struct dl_bw *dl_b = &task_rq(p)->rd->dl_bw;
+#else
+ struct dl_bw *dl_b = &task_rq(p)->dl.dl_bw;
+#endif
+
+ /*
+ * Since we are TASK_DEAD we won't slip out of the domain!
+ */
+ raw_spin_lock_irq(&dl_b->lock);
+ dl_b->total_bw -= p->dl.dl_bw;
+ raw_spin_unlock_irq(&dl_b->lock);

hrtimer_try_to_cancel(timer);
}
@@ -1171,7 +1183,7 @@ static struct rq *find_lock_later_rq(struct task_struct *task, struct rq *rq)
!cpumask_test_cpu(later_rq->cpu,
&task->cpus_allowed) ||
task_running(rq, task) ||
- !task->se.on_rq)) {
+ !task->on_rq)) {
raw_spin_unlock(&later_rq->lock);
later_rq = NULL;
break;
@@ -1210,7 +1222,7 @@ static struct task_struct *pick_next_pushable_dl_task(struct rq *rq)
BUG_ON(task_current(rq, p));
BUG_ON(p->dl.nr_cpus_allowed <= 1);

- BUG_ON(!p->se.on_rq);
+ BUG_ON(!p->on_rq);
BUG_ON(!dl_task(p));

return p;
@@ -1351,7 +1363,7 @@ static int pull_dl_task(struct rq *this_rq)
dl_time_before(p->dl.deadline,
this_rq->dl.earliest_dl.curr))) {
WARN_ON(p == src_rq->curr);
- WARN_ON(!p->se.on_rq);
+ WARN_ON(!p->on_rq);

/*
* Then we pull iff p has actually an earlier
diff --git a/kernel/sysctl.c b/kernel/sysctl.c
index ae27196..9ab6c9d 100644
--- a/kernel/sysctl.c
+++ b/kernel/sysctl.c
@@ -362,6 +362,20 @@ static struct ctl_table kern_table[] = {
.mode = 0644,
.proc_handler = sched_rt_handler,
},
+ {
+ .procname = "sched_dl_period_us",
+ .data = &sysctl_sched_dl_period,
+ .maxlen = sizeof(unsigned int),
+ .mode = 0644,
+ .proc_handler = sched_dl_handler,
+ },
+ {
+ .procname = "sched_dl_runtime_us",
+ .data = &sysctl_sched_dl_runtime,
+ .maxlen = sizeof(int),
+ .mode = 0644,
+ .proc_handler = sched_dl_handler,
+ },
#ifdef CONFIG_SCHED_AUTOGROUP
{
.procname = "sched_autogroup_enabled",
--
1.7.5.4

--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/