[PATCH] x86: numa32 make sure get kva space

From: Yinghai Lu
Date: Tue Jun 03 2008 - 22:37:40 EST



when 1/3 user/kernel is used, and less memory is installed, or have big hole
below 4g, max_low_pfn is still using 3g-128m

try to go down from max_low_pfn until get it. otherwise will panic.

need to make 32bit code to use register_e820_active_regions...later

Signed-off-by: Yinghai Lu <yhlu.kernel@xxxxxxxxx>

diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index e119c53..e8d9a0a 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -306,6 +306,7 @@ unsigned long __init setup_memory(void)
{
int nid;
unsigned long system_start_pfn, system_max_low_pfn;
+ long kva_target_pfn;

/*
* When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -322,11 +323,17 @@ unsigned long __init setup_memory(void)
system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);

system_max_low_pfn = max_low_pfn = find_max_low_pfn();
- kva_start_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
- kva_start_pfn = find_e820_area(kva_start_pfn<<PAGE_SHIFT,
- max_low_pfn<<PAGE_SHIFT,
- kva_pages<<PAGE_SHIFT,
- PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
+ kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
+ do {
+ kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
+ max_low_pfn<<PAGE_SHIFT,
+ kva_pages<<PAGE_SHIFT,
+ PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
+ kva_target_pfn -= PTRS_PER_PTE;
+ } while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn);
+
+ if (kva_start_pfn == -1UL)
+ panic("Can not get kva space\n");

printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
kva_start_pfn, max_low_pfn);
--
To unsubscribe from this list: send the line "unsubscribe linux-kernel" in
the body of a message to majordomo@xxxxxxxxxxxxxxx
More majordomo info at http://vger.kernel.org/majordomo-info.html
Please read the FAQ at http://www.tux.org/lkml/