Merge commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126' into msm-3.4
AU_LINUX_ANDROID_ICS.04.00.04.00.126 from msm-3.0.
First parent is from google/android-3.4.
* commit 'AU_LINUX_ANDROID_ICS.04.00.04.00.126': (8712 commits)
PRNG: Device tree entry for qrng device.
vidc:1080p: Set video core timeout value for Thumbnail mode
msm: sps: improve the debugging support in SPS driver
board-8064 msm: Overlap secure and non secure video firmware heaps.
msm: clock: Add handoff ops for 7x30 and copper XO clocks
msm_fb: display: Wait for external vsync before DTV IOMMU unmap
msm: Fix ciruclar dependency in debug UART settings
msm: gdsc: Add GDSC regulator driver for msm-copper
defconfig: Enable Mobicore Driver.
mobicore: Add mobicore driver.
mobicore: rename variable to lower case.
mobicore: rename folder.
mobicore: add makefiles
mobicore: initial import of kernel driver
ASoC: msm: Add SLIMBUS_2_RX CPU DAI
board-8064-gpio: Update FUNC for EPM SPI CS
msm_fb: display: Remove chicken bit config during video playback
mmc: msm_sdcc: enable the sanitize capability
msm-fb: display: lm2 writeback support on mpq platfroms
msm_fb: display: Disable LVDS phy & pll during panel off
...
Signed-off-by: Steve Muckle <smuckle@codeaurora.org>
diff --git a/arch/arm/mm/init.c b/arch/arm/mm/init.c
index 8f5813b..59e252b 100644
--- a/arch/arm/mm/init.c
+++ b/arch/arm/mm/init.c
@@ -13,6 +13,7 @@
#include <linux/init.h>
#include <linux/bootmem.h>
#include <linux/mman.h>
+#include <linux/mm.h>
#include <linux/export.h>
#include <linux/nodemask.h>
#include <linux/initrd.h>
@@ -20,6 +21,7 @@
#include <linux/highmem.h>
#include <linux/gfp.h>
#include <linux/memblock.h>
+#include <linux/sort.h>
#include <asm/mach-types.h>
#include <asm/memblock.h>
@@ -122,7 +124,14 @@
else
shared += page_count(page) - 1;
page++;
+#ifdef CONFIG_SPARSEMEM
+ pfn1++;
+ if (!(pfn1 % PAGES_PER_SECTION))
+ page = pfn_to_page(pfn1);
+ } while (pfn1 < pfn2);
+#else
} while (page < end);
+#endif
}
printk("%d pages of RAM\n", total);
@@ -226,6 +235,29 @@
}
#endif
+#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+static void __init arm_bootmem_free_apnm(unsigned long max_low,
+ unsigned long max_high)
+{
+ unsigned long max_zone_pfns[MAX_NR_ZONES];
+ struct memblock_region *reg;
+
+ memset(max_zone_pfns, 0, sizeof(max_zone_pfns));
+
+ max_zone_pfns[0] = max_low;
+#ifdef CONFIG_HIGHMEM
+ max_zone_pfns[ZONE_HIGHMEM] = max_high;
+#endif
+ for_each_memblock(memory, reg) {
+ unsigned long start = memblock_region_memory_base_pfn(reg);
+ unsigned long end = memblock_region_memory_end_pfn(reg);
+
+ add_active_range(0, start, end);
+ }
+ free_area_init_nodes(max_zone_pfns);
+}
+
+#else
static void __init arm_bootmem_free(unsigned long min, unsigned long max_low,
unsigned long max_high)
{
@@ -283,6 +315,7 @@
free_area_init_node(0, zone_size, min, zhole_size);
}
+#endif
#ifdef CONFIG_HAVE_ARCH_PFN_VALID
int pfn_valid(unsigned long pfn)
@@ -299,11 +332,12 @@
#else
static void __init arm_memory_present(void)
{
- struct memblock_region *reg;
-
- for_each_memblock(memory, reg)
- memory_present(0, memblock_region_memory_base_pfn(reg),
- memblock_region_memory_end_pfn(reg));
+ struct meminfo *mi = &meminfo;
+ int i;
+ for_each_bank(i, mi) {
+ memory_present(0, bank_pfn_start(&mi->bank[i]),
+ bank_pfn_end(&mi->bank[i]));
+ }
}
#endif
@@ -322,10 +356,37 @@
return phys;
}
+static int __init meminfo_cmp(const void *_a, const void *_b)
+{
+ const struct membank *a = _a, *b = _b;
+ long cmp = bank_pfn_start(a) - bank_pfn_start(b);
+ return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
+}
+
+#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+unsigned long membank0_size;
+EXPORT_SYMBOL(membank0_size);
+unsigned long membank1_start;
+EXPORT_SYMBOL(membank1_start);
+
+void __init find_membank0_hole(void)
+{
+ sort(&meminfo.bank, meminfo.nr_banks,
+ sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+
+ membank0_size = meminfo.bank[0].size;
+ membank1_start = meminfo.bank[1].start;
+}
+#endif
+
void __init arm_memblock_init(struct meminfo *mi, struct machine_desc *mdesc)
{
int i;
+#ifndef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
+ sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
+#endif
+
for (i = 0; i < mi->nr_banks; i++)
memblock_add(mi->bank[i].start, mi->bank[i].size);
@@ -369,6 +430,28 @@
memblock_dump_all();
}
+#ifdef CONFIG_MEMORY_HOTPLUG_SPARSE
+int _early_pfn_valid(unsigned long pfn)
+{
+ struct meminfo *mi = &meminfo;
+ unsigned int left = 0, right = mi->nr_banks;
+
+ do {
+ unsigned int mid = (right + left) / 2;
+ struct membank *bank = &mi->bank[mid];
+
+ if (pfn < bank_pfn_start(bank))
+ right = mid;
+ else if (pfn >= bank_pfn_end(bank))
+ left = mid + 1;
+ else
+ return 1;
+ } while (left < right);
+ return 0;
+}
+EXPORT_SYMBOL(_early_pfn_valid);
+#endif
+
void __init bootmem_init(void)
{
unsigned long min, max_low, max_high;
@@ -390,12 +473,16 @@
*/
sparse_init();
+#ifdef CONFIG_ARCH_POPULATES_NODE_MAP
+ arm_bootmem_free_apnm(max_low, max_high);
+#else
/*
* Now free the memory - free_area_init_node needs
* the sparse mem_map arrays initialized by sparse_init()
* for memmap_init_zone(), otherwise all PFNs are invalid.
*/
arm_bootmem_free(min, max_low, max_high);
+#endif
/*
* This doesn't seem to be used by the Linux memory manager any
@@ -466,7 +553,10 @@
}
/*
- * The mem_map array can get very big. Free the unused area of the memory map.
+ * The mem_map array can get very big. Free as much of the unused portion of
+ * the mem_map that we are allowed to. The page migration code moves pages
+ * in blocks that are rounded per the MAX_ORDER_NR_PAGES definition, so we
+ * can't free mem_map entries that may be dereferenced in this manner.
*/
static void __init free_unused_memmap(struct meminfo *mi)
{
@@ -480,7 +570,8 @@
for_each_bank(i, mi) {
struct membank *bank = &mi->bank[i];
- bank_start = bank_pfn_start(bank);
+ bank_start = round_down(bank_pfn_start(bank),
+ MAX_ORDER_NR_PAGES);
#ifdef CONFIG_SPARSEMEM
/*
@@ -504,12 +595,8 @@
if (prev_bank_end && prev_bank_end < bank_start)
free_memmap(prev_bank_end, bank_start);
- /*
- * Align up here since the VM subsystem insists that the
- * memmap entries are valid from the bank end aligned to
- * MAX_ORDER_NR_PAGES.
- */
- prev_bank_end = ALIGN(bank_pfn_end(bank), MAX_ORDER_NR_PAGES);
+ prev_bank_end = round_up(bank_pfn_end(bank),
+ MAX_ORDER_NR_PAGES);
}
#ifdef CONFIG_SPARSEMEM
@@ -584,6 +671,9 @@
extern u32 dtcm_end;
extern u32 itcm_end;
#endif
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+ struct zone *zone;
+#endif
max_mapnr = pfn_to_page(max_pfn + PHYS_PFN_OFFSET) - mem_map;
@@ -619,9 +709,24 @@
else if (!page_count(page))
free_pages++;
page++;
+#ifdef CONFIG_SPARSEMEM
+ pfn1++;
+ if (!(pfn1 % PAGES_PER_SECTION))
+ page = pfn_to_page(pfn1);
+ } while (pfn1 < pfn2);
+#else
} while (page < end);
+#endif
}
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+ for_each_zone(zone) {
+ if (zone_idx(zone) == ZONE_MOVABLE)
+ total_unmovable_pages = totalram_pages -
+ zone->spanned_pages;
+ }
+#endif
+
/*
* Since our memory may not be contiguous, calculate the
* real number of pages we have in this system
@@ -719,6 +824,7 @@
void free_initmem(void)
{
+ unsigned long reclaimed_initmem;
#ifdef CONFIG_HAVE_TCM
extern char __tcm_start, __tcm_end;
@@ -729,23 +835,61 @@
#endif
poison_init_mem(__init_begin, __init_end - __init_begin);
- if (!machine_is_integrator() && !machine_is_cintegrator())
- totalram_pages += free_area(__phys_to_pfn(__pa(__init_begin)),
+ if (!machine_is_integrator() && !machine_is_cintegrator()) {
+ reclaimed_initmem = free_area(__phys_to_pfn(__pa(__init_begin)),
__phys_to_pfn(__pa(__init_end)),
"init");
+ totalram_pages += reclaimed_initmem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+ total_unmovable_pages += reclaimed_initmem;
+#endif
+ }
}
+#ifdef CONFIG_MEMORY_HOTPLUG
+int arch_add_memory(int nid, u64 start, u64 size)
+{
+ struct pglist_data *pgdata = NODE_DATA(nid);
+ struct zone *zone = pgdata->node_zones + ZONE_MOVABLE;
+ unsigned long start_pfn = start >> PAGE_SHIFT;
+ unsigned long nr_pages = size >> PAGE_SHIFT;
+
+ return __add_pages(nid, zone, start_pfn, nr_pages);
+}
+
+int arch_physical_active_memory(u64 start, u64 size)
+{
+ return platform_physical_active_pages(start, size);
+}
+
+int arch_physical_remove_memory(u64 start, u64 size)
+{
+ return platform_physical_remove_pages(start, size);
+}
+
+int arch_physical_low_power_memory(u64 start, u64 size)
+{
+ return platform_physical_low_power_pages(start, size);
+}
+#endif
+
#ifdef CONFIG_BLK_DEV_INITRD
static int keep_initrd;
void free_initrd_mem(unsigned long start, unsigned long end)
{
+ unsigned long reclaimed_initrd_mem;
+
if (!keep_initrd) {
poison_init_mem((void *)start, PAGE_ALIGN(end) - start);
- totalram_pages += free_area(__phys_to_pfn(__pa(start)),
- __phys_to_pfn(__pa(end)),
- "initrd");
+ reclaimed_initrd_mem = free_area(__phys_to_pfn(__pa(start)),
+ __phys_to_pfn(__pa(end)),
+ "initrd");
+ totalram_pages += reclaimed_initrd_mem;
+#ifdef CONFIG_FIX_MOVABLE_ZONE
+ total_unmovable_pages += reclaimed_initrd_mem;
+#endif
}
}