mm: Add total_unmovable_pages global variable

Vmalloc will exit if the amount it needs to allocate is
greater than totalram_pages. Vmalloc cannot allocate
from the movable zone, so pages in the movable zone should
not be counted.

This change adds a new global variable: total_unmovable_pages.
It is calculated in init.c, based on totalram_pages minus
the pages in the movable zone. Vmalloc now looks at this new
global instead of totalram_pages.

total_unmovable_pages can be modified during memory_hotplug.
If the zone you are offlining/onlining is unmovable, then
you modify it similar to totalram_pages.  If the zone is
movable, then no change is needed.

Change-Id: Ie55c41051e9ad4b921eb04ecbb4798a8bd2344d6
Signed-off-by: Jack Cheung <jackc@codeaurora.org>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 5739a34..e5e3486 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -637,6 +637,9 @@
 	extern u32 dtcm_end;
 	extern u32 itcm_end;
 #endif
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	struct zone *zone;
+#endif
 
 	max_mapnr   = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
 
@@ -682,6 +685,14 @@
 #endif
 	}
 
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	for_each_zone(zone) {
+		if (zone_idx(zone) == ZONE_MOVABLE)
+			total_unmovable_pages = totalram_pages -
+							zone->spanned_pages;
+	}
+#endif
+
 	/*
 	 * Since our memory may not be contiguous, calculate the
 	 * real number of pages we have in this system
@@ -784,6 +795,7 @@
 
 void free_initmem(void)
 {
+	unsigned long reclaimed_initmem;
 #ifdef CONFIG_HAVE_TCM
 	extern char __tcm_start, __tcm_end;
 
@@ -792,10 +804,15 @@
 				    "TCM link");
 #endif
 
-	if (!machine_is_integrator() && !machine_is_cintegrator())
-		totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+	if (!machine_is_integrator() && !machine_is_cintegrator()) {
+		reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
 					    __phys_to_pfn(__pa(__init_end)),
 					    "init");
+		totalram_pages += reclaimed_initmem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+		total_unmovable_pages += reclaimed_initmem;
+#endif
+	}
 }
 
 #ifdef CONFIG_MEMORY_HOTPLUG
@@ -831,10 +848,16 @@
 
 void free_initrd_mem(unsigned long start, unsigned long end)
 {
-	if (!keep_initrd)
-		totalram_pages += free_area(__phys_to_pfn(__pa(start)),
+	unsigned long reclaimed_initrd_mem;
+	if (!keep_initrd) {
+		reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
 					    __phys_to_pfn(__pa(end)),
 					    "initrd");
+		totalram_pages += reclaimed_initrd_mem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+		total_unmovable_pages += reclaimed_initrd_mem;
+#endif
+	}
 }
 
 static int __init keepinitrd_setup(char *__unused)
diff --git a/include/linux/mm.h b/include/linux/mm.h
index 07890ac..c73fdf8 100644
--- a/include/linux/mm.h
+++ b/include/linux/mm.h
@@ -28,6 +28,9 @@
 
 extern unsigned long num_physpages;
 extern unsigned long totalram_pages;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+extern unsigned long total_unmovable_pages;
+#endif
 extern void * high_memory;
 extern int page_cluster;
 
diff --git a/mm/memory_hotplug.c b/mm/memory_hotplug.c
index faf318e..70ea0df 100644
--- a/mm/memory_hotplug.c
+++ b/mm/memory_hotplug.c
@@ -379,6 +379,10 @@
 	unsigned long pfn = page_to_pfn(page);
 
 	totalram_pages++;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	if (zone_idx(page_zone(page)) != ZONE_MOVABLE)
+		total_unmovable_pages++;
+#endif
 	if (pfn >= num_physpages)
 		num_physpages = pfn + 1;
 
@@ -965,6 +969,10 @@
 	zone->zone_pgdat->node_present_pages -= offlined_pages;
 	totalram_pages -= offlined_pages;
 
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	if (zone_idx(zone) != ZONE_MOVABLE)
+		total_unmovable_pages -= offlined_pages;
+#endif
 	init_per_zone_wmark_min();
 
 	if (!node_present_pages(node)) {
diff --git a/mm/page_alloc.c b/mm/page_alloc.c
index 5489c43..60ad5e9 100644
--- a/mm/page_alloc.c
+++ b/mm/page_alloc.c
@@ -96,6 +96,9 @@
 
 unsigned long totalram_pages __read_mostly;
 unsigned long totalreserve_pages __read_mostly;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+unsigned long total_unmovable_pages __read_mostly;
+#endif
 int percpu_pagelist_fraction;
 gfp_t gfp_allowed_mask __read_mostly = GFP_BOOT_MASK;
 
@@ -174,6 +177,9 @@
 };
 
 EXPORT_SYMBOL(totalram_pages);
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+EXPORT_SYMBOL(total_unmovable_pages);
+#endif
 
 static char * const zone_names[MAX_NR_ZONES] = {
 #ifdef CONFIG_ZONE_DMA
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 45ece89..c33d2ae 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1611,9 +1611,14 @@
 	struct vm_struct *area;
 	void *addr;
 	unsigned long real_size = size;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+	unsigned long total_pages = total_unmovable_pages;
+#else
+	unsigned long total_pages = totalram_pages;
+#endif
 
 	size = PAGE_ALIGN(size);
-	if (!size || (size >> PAGE_SHIFT) > totalram_pages)
+	if (!size || (size >> PAGE_SHIFT) > total_pages)
 		return NULL;
 
 	area = __get_vm_area_node(size, align, VM_ALLOC, start, end, node,