linux-next: manual merge of the akpm with the cgroup tree
From: Stephen Rothwell
Date: Wed Dec 14 2011 - 01:18:34 EST
Hi Andrew,
Today's linux-next merge of the akpm tree got conflicts in
kernel/sched/core.c and kernel/cpuset.c between commit bb9d97b6dffa
("cgroup: don't use subsys->can_attach_task() or ->attach_task()") from
the cgroup tree and commit "cpusets, cgroups: disallow attaching
kthreadd" from the akpm tree.
I fixed it up (maybe - see below) and can carry the fix as necessary.
--
Cheers,
Stephen Rothwell sfr@xxxxxxxxxxxxxxxx
diff --cc kernel/cpuset.c
index e8ed75a,793eca6..0000000
--- a/kernel/cpuset.c
+++ b/kernel/cpuset.c
@@@ -1388,34 -1418,11 +1389,36 @@@ static cpumask_var_t cpus_attach
static nodemask_t cpuset_attach_nodemask_from;
static nodemask_t cpuset_attach_nodemask_to;
-/* Set-up work for before attaching each task. */
-static void cpuset_pre_attach(struct cgroup *cont)
+/* Called by cgroups to determine if a cpuset is usable; cgroup_mutex held */
+static int cpuset_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
- struct cpuset *cs = cgroup_cs(cont);
+ struct cpuset *cs = cgroup_cs(cgrp);
+ struct task_struct *task;
+ int ret;
+
+ if (cpumask_empty(cs->cpus_allowed) || nodes_empty(cs->mems_allowed))
+ return -ENOSPC;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
+ /*
+ * Kthreads bound to specific cpus cannot be moved to a new
+ * cpuset; we cannot change their cpu affinity and
+ * isolating such threads by their set of allowed nodes is
+ * unnecessary. Thus, cpusets are not applicable for such
+ * threads. This prevents checking for success of
+ * set_cpus_allowed_ptr() on all attached tasks before
- * cpus_allowed may be changed.
++ * cpus_allowed may be changed. We also disallow attaching
++ * kthreadd, to prevent its child from becoming trapped should
++ * it then acquire PF_THREAD_BOUND.
+ */
- if (task->flags & PF_THREAD_BOUND)
++ if (task->flags & PF_THREAD_BOUND || task == kthreadd_task)
+ return -EINVAL;
+ if ((ret = security_task_setscheduler(task)))
+ return ret;
+ }
+ /* prepare for attach */
if (cs == &top_cpuset)
cpumask_copy(cpus_attach, cpu_possible_mask);
else
diff --cc kernel/sched/core.c
index 9775b8d,e1aa1f9..0000000
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@@ -7529,21 -7530,26 +7530,30 @@@ cpu_cgroup_destroy(struct cgroup_subsy
sched_destroy_group(tg);
}
-static int
-cpu_cgroup_can_attach_task(struct cgroup *cgrp, struct task_struct *tsk)
+static int cpu_cgroup_can_attach(struct cgroup_subsys *ss, struct cgroup *cgrp,
+ struct cgroup_taskset *tset)
{
- /*
- * kthreadd can fork workers for an RT workqueue in a cgroup
- * which may or may not have rt_runtime allocated. Just say no,
- * as attaching a global resource to a non-root group doesn't
- * make any sense anyway.
- */
- if (tsk == kthreadd_task)
- return -EINVAL;
+ struct task_struct *task;
+
+ cgroup_taskset_for_each(task, cgrp, tset) {
++ /*
++ * kthreadd can fork workers for an RT workqueue in a cgroup
++ * which may or may not have rt_runtime allocated. Just say no,
++ * as attaching a global resource to a non-root group doesn't
++ * make any sense anyway.
++ */
++ if (task == kthreadd_task)
++ return -EINVAL;
+
#ifdef CONFIG_RT_GROUP_SCHED
- if (!sched_rt_can_attach(cgroup_tg(cgrp), tsk))
- return -EINVAL;
+ if (!sched_rt_can_attach(cgroup_tg(cgrp), task))
+ return -EINVAL;
#else
- /* We don't support RT-tasks being in separate groups */
- if (tsk->sched_class != &fair_sched_class)
- return -EINVAL;
+ /* We don't support RT-tasks being in separate groups */
+ if (task->sched_class != &fair_sched_class)
+ return -EINVAL;
#endif
+ }
return 0;
}
Attachment:
pgp00000.pgp
Description: PGP signature