On 10/7/22 06:01, Peter Zijlstra wrote:
On Thu, Sep 22, 2022 at 02:00:39PM -0400, Waiman Long wrote:Sorry, I should have worked on the latest tip tree instead.
@@ -9647,6 +9656,9 @@ void __init sched_init(void)That doesn't actually apply; I've made it:
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
per_cpu(select_rq_mask, i) = (cpumask_var_t)kzalloc_node(
cpumask_size(), GFP_KERNEL, cpu_to_node(i));
+ per_cpu(runqueues.scratch_mask, i) =
+ (cpumask_var_t)kzalloc_node(cpumask_size(),
+ GFP_KERNEL, cpu_to_node(i));
}
#endif /* CONFIG_CPUMASK_OFFSTACK */
--- a/kernel/sched/core.c
+++ b/kernel/sched/core.c
@@ -9748,6 +9748,7 @@ void __init sched_init(void)
rq->core_cookie = 0UL;
#endif
+ zalloc_cpumask_var_node(&per_cpu(runqueues.scratch_mask, i), GFP_KERNEL, cpu_to_node(i));
}
set_load_weight(&init_task, false);