[tip:x86/mm] x86, mm, numa: Move two functions calling on successful path later

From: tip-bot for Yinghai Lu
Date: Fri Jun 14 2013 - 17:36:06 EST


Commit-ID: f5127d18677d45bdd17bb3d34e21c2a3f6b0eef6
Gitweb: http://git.kernel.org/tip/f5127d18677d45bdd17bb3d34e21c2a3f6b0eef6
Author: Yinghai Lu <yinghai@xxxxxxxxxx>
AuthorDate: Thu, 13 Jun 2013 21:02:57 +0800
Committer: H. Peter Anvin <hpa@xxxxxxxxxxxxxxx>
CommitDate: Fri, 14 Jun 2013 14:04:53 -0700

x86, mm, numa: Move two functions calling on successful path later

We need to have numa info ready before init_mem_mappingi(), so that we
can call init_mem_mapping per node, and alse trim node memory ranges to
big alignment.

Currently, parsing numa info needs to allocate some buffer and need to be
called after init_mem_mapping. So try to split parsing numa info procedure
into two steps:
- The first step will be called before init_mem_mapping, and it
should not need allocate buffers.
- The second step will cantain all the buffer related code and be
executed later.

At last we will have early_initmem_init() and initmem_init().

This patch implements only the first step.

setup_node_data() and numa_init_array() are only called for successful
path, so we can move these two callings to x86_numa_init(). That will also
make numa_init() smaller and more readable.

-v2: remove online_node_map clear in numa_init(), as it is only
set in setup_node_data() at last in successful path.

Signed-off-by: Yinghai Lu <yinghai@xxxxxxxxxx>
Link: http://lkml.kernel.org/r/1371128589-8953-11-git-send-email-tangchen@xxxxxxxxxxxxxx
Reviewed-by: Tang Chen <tangchen@xxxxxxxxxxxxxx>
Tested-by: Tang Chen <tangchen@xxxxxxxxxxxxxx>
Signed-off-by: H. Peter Anvin <hpa@xxxxxxxxxxxxxxx>
---
arch/x86/mm/numa.c | 69 ++++++++++++++++++++++++++++++------------------------
1 file changed, 39 insertions(+), 30 deletions(-)

diff --git a/arch/x86/mm/numa.c b/arch/x86/mm/numa.c
index a71c4e2..07ae800 100644
--- a/arch/x86/mm/numa.c
+++ b/arch/x86/mm/numa.c
@@ -477,7 +477,7 @@ static bool __init numa_meminfo_cover_memory(const struct numa_meminfo *mi)
static int __init numa_register_memblks(struct numa_meminfo *mi)
{
unsigned long uninitialized_var(pfn_align);
- int i, nid;
+ int i;

/* Account for nodes with cpus and no memory */
node_possible_map = numa_nodes_parsed;
@@ -506,24 +506,6 @@ static int __init numa_register_memblks(struct numa_meminfo *mi)
if (!numa_meminfo_cover_memory(mi))
return -EINVAL;

- /* Finally register nodes. */
- for_each_node_mask(nid, node_possible_map) {
- u64 start = PFN_PHYS(max_pfn);
- u64 end = 0;
-
- for (i = 0; i < mi->nr_blks; i++) {
- if (nid != mi->blk[i].nid)
- continue;
- start = min(mi->blk[i].start, start);
- end = max(mi->blk[i].end, end);
- }
-
- if (start < end)
- setup_node_data(nid, start, end);
- }
-
- /* Dump memblock with node info and return. */
- memblock_dump_all();
return 0;
}

@@ -559,7 +541,6 @@ static int __init numa_init(int (*init_func)(void))

nodes_clear(numa_nodes_parsed);
nodes_clear(node_possible_map);
- nodes_clear(node_online_map);
memset(&numa_meminfo, 0, sizeof(numa_meminfo));
WARN_ON(memblock_set_node(0, ULLONG_MAX, MAX_NUMNODES));
numa_reset_distance();
@@ -577,15 +558,6 @@ static int __init numa_init(int (*init_func)(void))
if (ret < 0)
return ret;

- for (i = 0; i < nr_cpu_ids; i++) {
- int nid = early_cpu_to_node(i);
-
- if (nid == NUMA_NO_NODE)
- continue;
- if (!node_online(nid))
- numa_clear_node(i);
- }
- numa_init_array();
return 0;
}

@@ -618,7 +590,7 @@ static int __init dummy_numa_init(void)
* last fallback is dummy single node config encomapssing whole memory and
* never fails.
*/
-void __init x86_numa_init(void)
+static void __init early_x86_numa_init(void)
{
if (!numa_off) {
#ifdef CONFIG_X86_NUMAQ
@@ -638,6 +610,43 @@ void __init x86_numa_init(void)
numa_init(dummy_numa_init);
}

+void __init x86_numa_init(void)
+{
+ int i, nid;
+ struct numa_meminfo *mi = &numa_meminfo;
+
+ early_x86_numa_init();
+
+ /* Finally register nodes. */
+ for_each_node_mask(nid, node_possible_map) {
+ u64 start = PFN_PHYS(max_pfn);
+ u64 end = 0;
+
+ for (i = 0; i < mi->nr_blks; i++) {
+ if (nid != mi->blk[i].nid)
+ continue;
+ start = min(mi->blk[i].start, start);
+ end = max(mi->blk[i].end, end);
+ }
+
+ if (start < end)
+ setup_node_data(nid, start, end); /* online is set */
+ }
+
+ /* Dump memblock with node info */
+ memblock_dump_all();
+
+ for (i = 0; i < nr_cpu_ids; i++) {
+ int nid = early_cpu_to_node(i);
+
+ if (nid == NUMA_NO_NODE)
+ continue;
+ if (!node_online(nid))
+ numa_clear_node(i);
+ }
+ numa_init_array();
+}
+
static __init int find_near_online_node(int node)
{
int n, val;
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/