[RFC 2/2] cgroup, kthread: cleanup after sticking kthreads to the root cgroup
From: Roman Gushchin
Date: Thu Oct 12 2017 - 13:38:42 EST
Since we're not allowing to move kernel threads out of the root
cgroup anymore, we don't need some infrastructure built to
introduce partial moving ban by
commit 77f88796cee8 ("cgroup, kthread: close race window where
new kthreads can be migrated to non-root cgroups").
This patch is just a cleanup, no functional change
is performed.
Signed-off-by: Roman Gushchin <guro@xxxxxx>
Cc: Tejun Heo <tj@xxxxxxxxxx>
Cc: Oleg Nesterov <oleg@xxxxxxxxxx>
Cc: Linus Torvalds <torvalds@xxxxxxxxxxxxxxxxxxxx>
Cc: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
Cc: Peter Zijlstra (Intel) <peterz@xxxxxxxxxxxxx>
Cc: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
Cc: Chris Mason <clm@xxxxxx>
Cc: kernel-team@xxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
---
include/linux/cgroup.h | 21 ---------------------
include/linux/sched.h | 4 ----
kernel/kthread.c | 2 --
3 files changed, 27 deletions(-)
diff --git a/include/linux/cgroup.h b/include/linux/cgroup.h
index 328a70ce0e23..2b2b838beaed 100644
--- a/include/linux/cgroup.h
+++ b/include/linux/cgroup.h
@@ -626,25 +626,6 @@ static inline void pr_cont_cgroup_path(struct cgroup *cgrp)
pr_cont_kernfs_path(cgrp->kn);
}
-static inline void cgroup_init_kthreadd(void)
-{
- /*
- * kthreadd is inherited by all kthreads, keep it in the root so
- * that the new kthreads are guaranteed to stay in the root until
- * initialization is finished.
- */
- current->no_cgroup_migration = 1;
-}
-
-static inline void cgroup_kthread_ready(void)
-{
- /*
- * This kthread finished initialization. The creator should have
- * set PF_NO_SETAFFINITY if this kthread should stay in the root.
- */
- current->no_cgroup_migration = 0;
-}
-
static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
{
return &cgrp->kn->id;
@@ -672,8 +653,6 @@ static inline void cgroup_free(struct task_struct *p) {}
static inline int cgroup_init_early(void) { return 0; }
static inline int cgroup_init(void) { return 0; }
-static inline void cgroup_init_kthreadd(void) {}
-static inline void cgroup_kthread_ready(void) {}
static inline union kernfs_node_id *cgroup_get_kernfs_id(struct cgroup *cgrp)
{
return NULL;
diff --git a/include/linux/sched.h b/include/linux/sched.h
index bdd6ad6fcce1..7ee7bad521ff 100644
--- a/include/linux/sched.h
+++ b/include/linux/sched.h
@@ -650,10 +650,6 @@ struct task_struct {
#ifdef CONFIG_COMPAT_BRK
unsigned brk_randomized:1;
#endif
-#ifdef CONFIG_CGROUPS
- /* disallow userland-initiated cgroup migration */
- unsigned no_cgroup_migration:1;
-#endif
unsigned long atomic_flags; /* Flags requiring atomic access. */
diff --git a/kernel/kthread.c b/kernel/kthread.c
index e92070878ce5..0ba173e9b5b6 100644
--- a/kernel/kthread.c
+++ b/kernel/kthread.c
@@ -237,7 +237,6 @@ static int kthread(void *_create)
ret = -EINTR;
if (!test_bit(KTHREAD_SHOULD_STOP, &self->flags)) {
- cgroup_kthread_ready();
__kthread_parkme(self);
ret = threadfn(data);
}
@@ -551,7 +550,6 @@ int kthreadd(void *unused)
set_mems_allowed(node_states[N_MEMORY]);
current->flags |= PF_NOFREEZE;
- cgroup_init_kthreadd();
for (;;) {
set_current_state(TASK_INTERRUPTIBLE);
--
2.13.6