arm: mm: Exclude additional mem_map entries from free
A previous patch addressed the issue of move_freepages_block()
trampling on erronously freed mem_map entries for the bank end
pfn. We also need to restrict the start pfn in a
complementary manner.
Also make macro usage consistent by adopting the use of
round_down and round_up.
Signed-off-by: Michael Bohan <mbohan@codeaurora.org>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 1102b37..42d340a 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -472,7 +472,10 @@
}
/*
- * The mem_map array can get very big. Free the unused area of the memory map.
+ * The mem_map array can get very big. Free as much of the unused portion of
+ * the mem_map that we are allowed to. The page migration code moves pages
+ * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we
+ * can't free mem_map entries that may be dereferenced in this manner.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
@@ -486,7 +489,8 @@
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
- bank_start = bank_pfn_start(bank);
+ bank_start = round_down(bank_pfn_start(bank),
+ MAX_ORDER_NR_PAGES);
#ifdef CONFIG_SPARSEMEM
/*
@@ -503,12 +507,8 @@
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+ prev_bank_end = round_up(bank_pfn_end(bank),
+ MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM