[PATCH 1/2] x86,mm: Introduce init_memory_mapping_ext()

From: Yinghai Lu
Date: Thu Feb 24 2011 - 20:39:58 EST



Add extra input tbl_end. it could be smaller than end.

Prepare for init_memory_mapping_high() to align boundary to 1G.
aka end could round up to 1g bound, and will be bigger then original
node end.

init_memory_mapping will call init_memory_mapping_ext with tbl=end.

Signed-off-by: Yinghai Lu <yinghai@xxxxxxxxxx>

---
arch/x86/include/asm/page_types.h | 7 +++++--
arch/x86/mm/init.c | 22 +++++++++++++++-------
2 files changed, 20 insertions(+), 9 deletions(-)

Index: linux-2.6/arch/x86/include/asm/page_types.h
===================================================================
--- linux-2.6.orig/arch/x86/include/asm/page_types.h
+++ linux-2.6/arch/x86/include/asm/page_types.h
@@ -51,8 +51,11 @@ static inline phys_addr_t get_max_mapped
return (phys_addr_t)max_pfn_mapped << PAGE_SHIFT;
}

-extern unsigned long init_memory_mapping(unsigned long start,
- unsigned long end);
+unsigned long init_memory_mapping_ext(unsigned long start,
+ unsigned long end,
+ unsigned long tbl_end);
+
+unsigned long init_memory_mapping(unsigned long start, unsigned long end);

void init_memory_mapping_high(void);

Index: linux-2.6/arch/x86/mm/init.c
===================================================================
--- linux-2.6.orig/arch/x86/mm/init.c
+++ linux-2.6/arch/x86/mm/init.c
@@ -30,10 +30,12 @@ int direct_gbpages
#endif
;

-static void __init find_early_table_space(unsigned long end, int use_pse,
+static void __init find_early_table_space(unsigned long end,
+ unsigned long tbl_end,
+ int use_pse,
int use_gbpages)
{
- unsigned long puds, pmds, ptes, tables, start = 0, good_end = end;
+ unsigned long puds, pmds, ptes, tables, start = 0;
phys_addr_t base;

puds = (end + PUD_SIZE - 1) >> PUD_SHIFT;
@@ -66,10 +68,10 @@ static void __init find_early_table_spac
/* for fixmap */
tables += roundup(__end_of_fixed_addresses * sizeof(pte_t), PAGE_SIZE);

- good_end = max_pfn_mapped << PAGE_SHIFT;
+ tbl_end = max_pfn_mapped << PAGE_SHIFT;
#endif

- base = memblock_find_in_range(start, good_end, tables, PAGE_SIZE);
+ base = memblock_find_in_range(start, tbl_end, tables, PAGE_SIZE);
if (base == MEMBLOCK_ERROR)
panic("Cannot find space for the kernel page tables");

@@ -114,8 +116,9 @@ static int __meminit save_mr(struct map_
* This runs before bootmem is initialized and gets pages directly from
* the physical memory. To access them they are temporarily mapped.
*/
-unsigned long __init_refok init_memory_mapping(unsigned long start,
- unsigned long end)
+unsigned long __init_refok init_memory_mapping_ext(unsigned long start,
+ unsigned long end,
+ unsigned long tbl_end)
{
unsigned long page_size_mask = 0;
unsigned long start_pfn, end_pfn;
@@ -258,7 +261,7 @@ unsigned long __init_refok init_memory_m
* nodes are discovered.
*/
if (!after_bootmem)
- find_early_table_space(end, use_pse, use_gbpages);
+ find_early_table_space(end, tbl_end, use_pse, use_gbpages);

for (i = 0; i < nr_range; i++)
ret = kernel_physical_mapping_init(mr[i].start, mr[i].end,
@@ -282,6 +285,11 @@ unsigned long __init_refok init_memory_m
return ret >> PAGE_SHIFT;
}

+unsigned long __init_refok init_memory_mapping(unsigned long start,
+ unsigned long end)
+{
+ return init_memory_mapping_ext(start, end, end);
+}

/*
* devmem_is_allowed() checks to see if /dev/mem access to a certain address
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/