x86, numa, 32-bit: make sure get we kva space

when 1/3 user/kernel split is used, and less memory is installed, or if
we have a big hole below 4g, max_low_pfn is still using 3g-128m

try to go down from max_low_pfn until we get it. otherwise will panic.

need to make 32-bit code to use register_e820_active_regions ... later.

Signed-off-by: Yinghai Lu <yhlu.kernel@gmail.com>
Signed-off-by: Ingo Molnar <mingo@elte.hu>
diff --git a/arch/x86/mm/discontig_32.c b/arch/x86/mm/discontig_32.c
index 914a81e..7ced26a 100644
--- a/arch/x86/mm/discontig_32.c
+++ b/arch/x86/mm/discontig_32.c
@@ -328,6 +328,7 @@
 {
 	int nid;
 	unsigned long system_start_pfn, system_max_low_pfn;
+	long kva_target_pfn;
 
 	/*
 	 * When mapping a NUMA machine we allocate the node_mem_map arrays
@@ -344,11 +345,17 @@
 	system_start_pfn = min_low_pfn = PFN_UP(init_pg_tables_end);
 
 	system_max_low_pfn = max_low_pfn = find_max_low_pfn();
-	kva_start_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
-	kva_start_pfn = find_e820_area(kva_start_pfn<<PAGE_SHIFT,
-				max_low_pfn<<PAGE_SHIFT,
-				kva_pages<<PAGE_SHIFT,
-				PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
+	kva_target_pfn = round_down(max_low_pfn - kva_pages, PTRS_PER_PTE);
+	do {
+		kva_start_pfn = find_e820_area(kva_target_pfn<<PAGE_SHIFT,
+					max_low_pfn<<PAGE_SHIFT,
+					kva_pages<<PAGE_SHIFT,
+					PTRS_PER_PTE<<PAGE_SHIFT) >> PAGE_SHIFT;
+		kva_target_pfn -= PTRS_PER_PTE;
+	} while (kva_start_pfn == -1UL && kva_target_pfn > min_low_pfn);
+
+	if (kva_start_pfn == -1UL)
+		panic("Can not get kva space\n");
 
 	printk("kva_start_pfn ~ %ld find_max_low_pfn() ~ %ld\n",
 		kva_start_pfn, max_low_pfn);