[PATCH 5/6] x86/numa: push forward the setup of node to cpumask map

From: Pingfan Liu
Date: Sun Feb 24 2019 - 07:35:10 EST


At present the node to cpumask map is set up until the secondary
cpu boot up. But it is too late for the purpose of building node fall back
list at early boot stage. Considering that init_cpu_to_node() already owns
cpu to node map, it is a good place to set up node to cpumask map too. So
do it by calling numa_add_cpu(cpu) in init_cpu_to_node().

Signed-off-by: Pingfan Liu <kernelfans@xxxxxxxxx>
CC: Thomas Gleixner <tglx@xxxxxxxxxxxxx>
CC: Ingo Molnar <mingo@xxxxxxxxxx>
CC: Borislav Petkov <bp@xxxxxxxxx>
CC: "H. Peter Anvin" <hpa@xxxxxxxxx>
CC: Dave Hansen <dave.hansen@xxxxxxxxxxxxxxx>
CC: Vlastimil Babka <vbabka@xxxxxxx>
CC: Mike Rapoport <rppt@xxxxxxxxxxxxxxxxxx>
CC: Andrew Morton <akpm@xxxxxxxxxxxxxxxxxxxx>
CC: Mel Gorman <mgorman@xxxxxxx>
CC: Joonsoo Kim <iamjoonsoo.kim@xxxxxxx>
CC: Andy Lutomirski <luto@xxxxxxxxxx>
CC: Andi Kleen <ak@xxxxxxxxxxxxxxx>
CC: Petr Tesarik <ptesarik@xxxxxxx>
CC: Michal Hocko <mhocko@xxxxxxxx>
CC: Stephen Rothwell <sfr@xxxxxxxxxxxxxxxx>
CC: Jonathan Corbet <corbet@xxxxxxx>
CC: Nicholas Piggin <npiggin@xxxxxxxxx>
CC: Daniel Vacek <neelx@xxxxxxxxxx>
CC: linux-kernel@xxxxxxxxxxxxxxx
---
arch/x86/include/asm/topology.h | 4 ----
arch/x86/kernel/setup_percpu.c | 3 ---
arch/x86/mm/numa.c | 5 ++++-
3 files changed, 4 insertions(+), 8 deletions(-)

diff --git a/arch/x86/include/asm/topology.h b/arch/x86/include/asm/topology.h
index 453cf38..fad77c7 100644
--- a/arch/x86/include/asm/topology.h
+++ b/arch/x86/include/asm/topology.h
@@ -73,8 +73,6 @@ static inline const struct cpumask *cpumask_of_node(int node)
}
#endif

-extern void setup_node_to_cpumask_map(void);
-
#define pcibus_to_node(bus) __pcibus_to_node(bus)

extern int __node_distance(int, int);
@@ -96,8 +94,6 @@ static inline int early_cpu_to_node(int cpu)
return 0;
}

-static inline void setup_node_to_cpumask_map(void) { }
-
#endif

#include <asm-generic/topology.h>
diff --git a/arch/x86/kernel/setup_percpu.c b/arch/x86/kernel/setup_percpu.c
index e8796fc..206fa43 100644
--- a/arch/x86/kernel/setup_percpu.c
+++ b/arch/x86/kernel/setup_percpu.c
@@ -283,9 +283,6 @@ void __init setup_per_cpu_areas(void)
early_per_cpu_ptr(x86_cpu_to_node_map) = NULL;
#endif

- /* Setup node to cpumask map */
- setup_node_to_cpumask_map();
-
/* Setup cpu initialized, callin, callout masks */
setup_cpu_local_masks();

diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index c8dd7af..8d73e2273 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -110,7 +110,7 @@ void numa_clear_node(int cpu)
* Note: cpumask_of_node() is not valid until after this is done.
* (Use CONFIG_DEBUG_PER_CPU_MAPS to check this.)
*/
-void __init setup_node_to_cpumask_map(void)
+static void __init setup_node_to_cpumask_map(void)
{
unsigned int node;

@@ -738,6 +738,7 @@ void __init init_cpu_to_node(void)
BUG_ON(cpu_to_apicid == NULL);
rr = first_node(node_online_map);

+ setup_node_to_cpumask_map();
for_each_possible_cpu(cpu) {
int node = numa_cpu_node(cpu);

@@ -750,6 +751,7 @@ void __init init_cpu_to_node(void)
*/
if (node == NUMA_NO_NODE) {
numa_set_node(cpu, rr);
+ numa_add_cpu(cpu);
rr = next_node_in(rr, node_online_map);
continue;
}
@@ -758,6 +760,7 @@ void __init init_cpu_to_node(void)
init_memory_less_node(node);

numa_set_node(cpu, node);
+ numa_add_cpu(cpu);
}
}

--
2.7.4