From 9693b69bb8897580752265ad34df03343ef78e4d Mon Sep 17 00:00:00 2001 From: KOSAKI Motohiro Date: Wed, 27 Apr 2011 13:51:08 +0900 Subject: [PATCH 2/2] change task->cpus_allowed to pointer Signed-off-by: KOSAKI Motohiro --- arch/x86/kernel/init_task.c | 2 ++ include/linux/cpuset.h | 3 ++- include/linux/init_task.h | 4 ++-- include/linux/sched.h | 15 +++++++++++---- kernel/cpuset.c | 9 ++++++--- kernel/fork.c | 19 +++++++++++++++++++ kernel/kthread.c | 3 ++- kernel/sched.c | 12 ++++++++++-- kernel/sched_rt.c | 2 +- 9 files changed, 55 insertions(+), 14 deletions(-) diff --git a/arch/x86/kernel/init_task.c b/arch/x86/kernel/init_task.c index 43e9ccf..4715d82 100644 --- a/arch/x86/kernel/init_task.c +++ b/arch/x86/kernel/init_task.c @@ -23,6 +23,8 @@ static struct sighand_struct init_sighand = INIT_SIGHAND(init_sighand); union thread_union init_thread_union __init_task_data = { INIT_THREAD_INFO(init_task) }; +struct cpumask init_cpus_allowed = CPU_MASK_ALL; + /* * Initial task structure. * diff --git a/include/linux/cpuset.h b/include/linux/cpuset.h index 684fe71..c20a45d 100644 --- a/include/linux/cpuset.h +++ b/include/linux/cpuset.h @@ -146,7 +146,8 @@ static inline void cpuset_cpus_allowed(struct task_struct *p, static inline int cpuset_cpus_allowed_fallback(struct task_struct *p) { - cpumask_copy(tsk_cpus_allowed(p), cpu_possible_mask); + cpumask_copy(p->cpus_allowed_ptr, cpu_possible_mask); + p->flags |= PF_THREAD_UNBOUND; return cpumask_any(cpu_active_mask); } diff --git a/include/linux/init_task.h b/include/linux/init_task.h index 10bdf82..4142dda 100644 --- a/include/linux/init_task.h +++ b/include/linux/init_task.h @@ -135,13 +135,13 @@ extern struct cred init_cred; .state = 0, \ .stack = &init_thread_info, \ .usage = ATOMIC_INIT(2), \ - .flags = PF_KTHREAD, \ + .flags = PF_KTHREAD | PF_THREAD_UNBOUND, \ .lock_depth = -1, \ .prio = MAX_PRIO-20, \ .static_prio = MAX_PRIO-20, \ .normal_prio = MAX_PRIO-20, \ .policy = SCHED_NORMAL, \ - .cpus_allowed = CPU_MASK_ALL, \ + .cpus_allowed_ptr = &init_cpus_allowed, \ .mm = NULL, \ .active_mm = &init_mm, \ .se = { \ diff --git a/include/linux/sched.h b/include/linux/sched.h index 3f7d3f9..716b24a 100644 --- a/include/linux/sched.h +++ b/include/linux/sched.h @@ -1233,7 +1233,7 @@ struct task_struct { #endif unsigned int policy; - cpumask_t cpus_allowed; + struct cpumask *cpus_allowed_ptr; #ifdef CONFIG_PREEMPT_RCU int rcu_read_lock_nesting; @@ -1544,9 +1544,6 @@ struct task_struct { #endif }; -/* Future-safe accessor for struct task_struct's cpus_allowed. */ -#define tsk_cpus_allowed(tsk) (&(tsk)->cpus_allowed) - /* * Priority of a process goes from 0..MAX_PRIO-1, valid RT * priority is 0..MAX_RT_PRIO-1, and SCHED_NORMAL/SCHED_BATCH @@ -1729,6 +1726,7 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * /* * Per process flags */ +#define PF_THREAD_UNBOUND 0x00000001 #define PF_STARTING 0x00000002 /* being created */ #define PF_EXITING 0x00000004 /* getting shut down */ #define PF_EXITPIDONE 0x00000008 /* pi exit done on shut down */ @@ -1759,6 +1757,15 @@ extern void thread_group_times(struct task_struct *p, cputime_t *ut, cputime_t * #define PF_FREEZER_SKIP 0x40000000 /* Freezer should not count it as freezable */ #define PF_FREEZER_NOSIG 0x80000000 /* Freezer won't send signals to it */ +/* Future-safe accessor for struct task_struct's cpus_allowed. */ +static inline const struct cpumask* tsk_cpus_allowed(struct task_struct *task) +{ + if (task->flags & PF_THREAD_UNBOUND) + return cpu_possible_mask; + + return task->cpus_allowed_ptr; +} + /* * Only the _current_ task can read/write to tsk->flags, but other * tasks can access tsk->flags in readonly mode for example diff --git a/kernel/cpuset.c b/kernel/cpuset.c index 0deb871..ccb4890 100644 --- a/kernel/cpuset.c +++ b/kernel/cpuset.c @@ -2189,8 +2189,10 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) rcu_read_lock(); cs = task_cs(tsk); - if (cs) - cpumask_copy(tsk_cpus_allowed(tsk), cs->cpus_allowed); + if (cs) { + cpumask_copy(tsk->cpus_allowed_ptr, cs->cpus_allowed); + tsk->flags &= ~PF_THREAD_UNBOUND; + } rcu_read_unlock(); /* @@ -2217,7 +2219,8 @@ int cpuset_cpus_allowed_fallback(struct task_struct *tsk) * Like above we can temporary set any mask and rely on * set_cpus_allowed_ptr() as synchronization point. */ - cpumask_copy(tsk_cpus_allowed(tsk), cpu_possible_mask); + cpumask_copy(tsk->cpus_allowed_ptr, cpu_possible_mask); + tsk->flags |= PF_THREAD_UNBOUND; cpu = cpumask_any(cpu_active_mask); } diff --git a/kernel/fork.c b/kernel/fork.c index cc04197..485ab7d 100644 --- a/kernel/fork.c +++ b/kernel/fork.c @@ -169,6 +169,8 @@ void free_task(struct task_struct *tsk) free_thread_info(tsk->stack); rt_mutex_debug_task_free(tsk); ftrace_graph_exit_task(tsk); + if (tsk->cpus_allowed_ptr) + kfree(tsk->cpus_allowed_ptr); free_task_struct(tsk); } EXPORT_SYMBOL(free_task); @@ -250,6 +252,19 @@ int __attribute__((weak)) arch_dup_task_struct(struct task_struct *dst, return 0; } +static int dup_task_cpus_allowed(struct task_struct *task, struct task_struct *orig) +{ + struct cpumask *cpumask; + + cpumask = kmalloc(cpumask_size(), GFP_KERNEL); + if (!cpumask) + return -ENOMEM; + cpumask_copy(cpumask, orig->cpus_allowed_ptr); + task->cpus_allowed_ptr = cpumask; + + return 0; +} + static struct task_struct *dup_task_struct(struct task_struct *orig) { struct task_struct *tsk; @@ -280,6 +295,10 @@ static struct task_struct *dup_task_struct(struct task_struct *orig) if (err) goto out; + err = dup_task_cpus_allowed(tsk, orig); + if (err) + goto out; + setup_thread_stack(tsk, orig); clear_user_return_notifier(tsk); clear_tsk_need_resched(tsk); diff --git a/kernel/kthread.c b/kernel/kthread.c index 5f35501..6d32c72 100644 --- a/kernel/kthread.c +++ b/kernel/kthread.c @@ -202,8 +202,9 @@ void kthread_bind(struct task_struct *p, unsigned int cpu) return; } - cpumask_copy(tsk_cpus_allowed(p), cpumask_of(cpu)); + cpumask_copy(p->cpus_allowed_ptr, cpumask_of(cpu)); p->rt.nr_cpus_allowed = 1; + p->flags &= ~PF_THREAD_UNBOUND; p->flags |= PF_THREAD_BOUND; } EXPORT_SYMBOL(kthread_bind); diff --git a/kernel/sched.c b/kernel/sched.c index 254d299..764576c 100644 --- a/kernel/sched.c +++ b/kernel/sched.c @@ -5815,7 +5815,10 @@ void __cpuinit init_idle(struct task_struct *idle, int cpu) idle->state = TASK_RUNNING; idle->se.exec_start = sched_clock(); - cpumask_copy(tsk_cpus_allowed(idle), cpumask_of(cpu)); + WARN_ON(!idle->cpus_allowed_ptr); + cpumask_copy(idle->cpus_allowed_ptr, cpumask_of(cpu)); + idle->flags &= ~PF_THREAD_UNBOUND; + /* * We're having a chicken and egg problem, even though we are * holding rq->lock, the cpu isn't yet set to this cpu so the @@ -5952,10 +5955,15 @@ int set_cpus_allowed_ptr(struct task_struct *p, const struct cpumask *new_mask) if (p->sched_class->set_cpus_allowed) p->sched_class->set_cpus_allowed(p, new_mask); else { - cpumask_copy(tsk_cpus_allowed(p), new_mask); + cpumask_copy(p->cpus_allowed_ptr, new_mask); p->rt.nr_cpus_allowed = cpumask_weight(new_mask); } + if (cpumask_equal(new_mask, cpu_possible_mask)) + p->flags |= PF_THREAD_UNBOUND; + else + p->flags &= ~PF_THREAD_UNBOUND; + /* Can the task run on the task's current CPU? If so, we're done */ if (cpumask_test_cpu(task_cpu(p), new_mask)) goto out; diff --git a/kernel/sched_rt.c b/kernel/sched_rt.c index 92cdf8c..291f33e 100644 --- a/kernel/sched_rt.c +++ b/kernel/sched_rt.c @@ -1583,7 +1583,7 @@ static void set_cpus_allowed_rt(struct task_struct *p, update_rt_migration(&rq->rt); } - cpumask_copy(tsk_cpus_allowed(p), new_mask); + cpumask_copy(p->cpus_allowed_ptr, new_mask); p->rt.nr_cpus_allowed = weight; } -- 1.7.3.1