[PATCH] x86_64: Account mem_map in VM holes accounting

The VM needs to know about lost memory in zones to accurately
balance dirty pages. This patch accounts mem_map in there too,
which fixes a constant errror of a few percent. Also some
other misc mappings and the kernel text itself are accounted
too.

Signed-off-by: Andi Kleen <ak@suse.de>
Signed-off-by: Linus Torvalds <torvalds@osdl.org>
diff --git a/arch/x86_64/mm/init.c b/arch/x86_64/mm/init.c
index a1ad4cc..2b1d6c3 100644
--- a/arch/x86_64/mm/init.c
+++ b/arch/x86_64/mm/init.c
@@ -47,6 +47,8 @@
 
 extern char _stext[];
 
+static unsigned long dma_reserve __initdata;
+
 DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
 
 /*
@@ -354,6 +356,21 @@
 		w += z[i];
 		h[i] = e820_hole_size(s, w);
 	}
+
+	/* Add the space pace needed for mem_map to the holes too. */
+	for (i = 0; i < MAX_NR_ZONES; i++)
+		h[i] += (z[i] * sizeof(struct page)) / PAGE_SIZE;
+
+	/* The 16MB DMA zone has the kernel and other misc mappings.
+ 	   Account them too */
+	if (h[ZONE_DMA]) {
+		h[ZONE_DMA] += dma_reserve;
+		if (h[ZONE_DMA] >= z[ZONE_DMA]) {
+			printk(KERN_WARNING
+				"Kernel too large and filling up ZONE_DMA?\n");
+			h[ZONE_DMA] = z[ZONE_DMA];
+		}
+	}
 }
 
 #ifndef CONFIG_NUMA
@@ -510,6 +527,8 @@
 #else       		
 	reserve_bootmem(phys, len);    
 #endif
+	if (phys+len <= MAX_DMA_PFN*PAGE_SIZE)
+		dma_reserve += len / PAGE_SIZE;
 }
 
 int kern_addr_valid(unsigned long addr)