Re: [PATCH v5, part4 31/41] mm/ppc: prepare for removing num_physpagesand simplify mem_init()

From: Liu Jiang
Date: Wed May 15 2013 - 11:50:13 EST


On Wed 15 May 2013 08:32:34 AM CST, Benjamin Herrenschmidt wrote:
On Wed, 2013-05-08 at 23:51 +0800, Jiang Liu wrote:
Prepare for removing num_physpages and simplify mem_init().

No objection, I haven't had a chance to actually build/boot test though.

BTW. A recommended way of doing so which is pretty easy even if you
don't have access to powerpc hardware nowadays is to use
qemu-system-ppc64 with -M pseries.

You can find cross compilers for the kernel on kernel.org and you can
feed qemu with some distro installer ISO.
Hi Benjamin,'
Thanks for review! As could I assume an "Acked-by"?
I have installed all cross-compiler from ftp.kernel.org and done basic
building tests for these patches. But I haven't run new kernel with qemu
yet, that's a really good suggestion, will try it next time.
Regards!
Gerry



Cheers,
Ben.

Signed-off-by: Jiang Liu <jiang.liu@xxxxxxxxxx>
Cc: Benjamin Herrenschmidt <benh@xxxxxxxxxxxxxxxxxxx>
Cc: Paul Mackerras <paulus@xxxxxxxxx>
Cc: linuxppc-dev@xxxxxxxxxxxxxxxx
Cc: linux-kernel@xxxxxxxxxxxxxxx
---
arch/powerpc/mm/mem.c | 56 +++++++++++--------------------------------------
1 file changed, 12 insertions(+), 44 deletions(-)

diff --git a/arch/powerpc/mm/mem.c b/arch/powerpc/mm/mem.c
index b890245..4e24f1c 100644
--- a/arch/powerpc/mm/mem.c
+++ b/arch/powerpc/mm/mem.c
@@ -299,46 +299,27 @@ void __init paging_init(void)

void __init mem_init(void)
{
-#ifdef CONFIG_NEED_MULTIPLE_NODES
- int nid;
-#endif
- pg_data_t *pgdat;
- unsigned long i;
- struct page *page;
- unsigned long reservedpages = 0, codesize, initsize, datasize, bsssize;
-
#ifdef CONFIG_SWIOTLB
swiotlb_init(0);
#endif

- num_physpages = memblock_phys_mem_size() >> PAGE_SHIFT;
high_memory = (void *) __va(max_low_pfn * PAGE_SIZE);

#ifdef CONFIG_NEED_MULTIPLE_NODES
- for_each_online_node(nid) {
- if (NODE_DATA(nid)->node_spanned_pages != 0) {
- printk("freeing bootmem node %d\n", nid);
- free_all_bootmem_node(NODE_DATA(nid));
- }
+ {
+ pg_data_t *pgdat;
+
+ for_each_online_pgdat(pgdat)
+ if (pgdat->node_spanned_pages != 0) {
+ printk("freeing bootmem node %d\n",
+ pgdat->node_id);
+ free_all_bootmem_node(pgdat);
+ }
}
#else
max_mapnr = max_pfn;
free_all_bootmem();
#endif
- for_each_online_pgdat(pgdat) {
- for (i = 0; i < pgdat->node_spanned_pages; i++) {
- if (!pfn_valid(pgdat->node_start_pfn + i))
- continue;
- page = pgdat_page_nr(pgdat, i);
- if (PageReserved(page))
- reservedpages++;
- }
- }
-
- codesize = (unsigned long)&_sdata - (unsigned long)&_stext;
- datasize = (unsigned long)&_edata - (unsigned long)&_sdata;
- initsize = (unsigned long)&__init_end - (unsigned long)&__init_begin;
- bsssize = (unsigned long)&__bss_stop - (unsigned long)&__bss_start;

#ifdef CONFIG_HIGHMEM
{
@@ -348,13 +329,9 @@ void __init mem_init(void)
for (pfn = highmem_mapnr; pfn < max_mapnr; ++pfn) {
phys_addr_t paddr = (phys_addr_t)pfn << PAGE_SHIFT;
struct page *page = pfn_to_page(pfn);
- if (memblock_is_reserved(paddr))
- continue;
- free_highmem_page(page);
- reservedpages--;
+ if (!memblock_is_reserved(paddr))
+ free_highmem_page(page);
}
- printk(KERN_DEBUG "High memory: %luk\n",
- totalhigh_pages << (PAGE_SHIFT-10));
}
#endif /* CONFIG_HIGHMEM */

@@ -367,16 +344,7 @@ void __init mem_init(void)
(mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY) - 1;
#endif

- printk(KERN_INFO "Memory: %luk/%luk available (%luk kernel code, "
- "%luk reserved, %luk data, %luk bss, %luk init)\n",
- nr_free_pages() << (PAGE_SHIFT-10),
- num_physpages << (PAGE_SHIFT-10),
- codesize >> 10,
- reservedpages << (PAGE_SHIFT-10),
- datasize >> 10,
- bsssize >> 10,
- initsize >> 10);
-
+ mem_init_print_info(NULL);
#ifdef CONFIG_PPC32
pr_info("Kernel virtual memory layout:\n");
pr_info(" * 0x%08lx..0x%08lx : fixmap\n", FIXADDR_START, FIXADDR_TOP);




--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/