msm: Allow lowmem to be non contiguous and mixed.
Any image that is expected to have a lifetime of
the entire system can give the virtual address
space back for use in vmalloc.
Change-Id: I81ce848cd37e8573d706fa5d1aa52147b3c8da12
Signed-off-by: Neeti Desai <neetid@codeaurora.org>
diff --git a/arch/arm/mm/mmu.c b/arch/arm/mm/mmu.c
index c2efc34..ebe47bf 100644
--- a/arch/arm/mm/mmu.c
+++ b/arch/arm/mm/mmu.c
@@ -871,6 +871,7 @@
{
struct map_desc *md;
struct vm_struct *vm;
+ int rc = 0;
if (!nr)
return;
@@ -881,11 +882,13 @@
create_mapping(md);
vm->addr = (void *)(md->virtual & PAGE_MASK);
vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
- vm->phys_addr = __pfn_to_phys(md->pfn);
- vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->phys_addr = __pfn_to_phys(md->pfn);
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
vm->flags |= VM_ARM_MTYPE(md->type);
vm->caller = iotable_init;
- vm_area_add_early(vm++);
+ rc = vm_area_check_early(vm);
+ if (!rc)
+ vm_area_add_early(vm++);
}
}
@@ -1384,12 +1387,21 @@
static void __init map_lowmem(void)
{
struct memblock_region *reg;
+ struct vm_struct *vm;
+ phys_addr_t start;
+ phys_addr_t end;
+ unsigned long vaddr;
+ unsigned long pfn;
+ unsigned long length;
+ unsigned int type;
+ int nr = 0;
/* Map all the lowmem memory banks. */
for_each_memblock(memory, reg) {
- phys_addr_t start = reg->base;
- phys_addr_t end = start + reg->size;
struct map_desc map;
+ nr++;
+ start = reg->base;
+ end = start + reg->size;
if (end > arm_lowmem_limit)
end = arm_lowmem_limit;
@@ -1441,6 +1453,32 @@
create_mapping(&map);
}
+
+ vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
+
+ for_each_memblock(memory, reg) {
+
+ start = reg->base;
+ end = start + reg->size;
+
+ if (end > arm_lowmem_limit)
+ end = arm_lowmem_limit;
+ if (start >= end)
+ break;
+
+ pfn = __phys_to_pfn(start);
+ vaddr = __phys_to_virt(start);
+ length = end - start;
+ type = MT_MEMORY;
+
+ vm->addr = (void *)(vaddr & PAGE_MASK);
+ vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
+ vm->phys_addr = __pfn_to_phys(pfn);
+ vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
+ vm->flags |= VM_ARM_MTYPE(type);
+ vm->caller = map_lowmem;
+ vm_area_add_early(vm++);
+ }
}
/*
diff --git a/include/linux/vmalloc.h b/include/linux/vmalloc.h
index 6071e91..663bc05 100644
--- a/include/linux/vmalloc.h
+++ b/include/linux/vmalloc.h
@@ -134,6 +134,7 @@
extern struct vm_struct *vmlist;
extern __init void vm_area_add_early(struct vm_struct *vm);
extern __init void vm_area_register_early(struct vm_struct *vm, size_t align);
+extern __init int vm_area_check_early(struct vm_struct *vm);
#ifdef CONFIG_SMP
# ifdef CONFIG_MMU
diff --git a/mm/vmalloc.c b/mm/vmalloc.c
index 5065adb..368977a 100644
--- a/mm/vmalloc.c
+++ b/mm/vmalloc.c
@@ -1116,7 +1116,31 @@
return mem;
}
EXPORT_SYMBOL(vm_map_ram);
+/**
+ * vm_area_check_early - check if vmap area is already mapped
+ * @vm: vm_struct to be checked
+ *
+ * This function is used to check if the vmap area has been
+ * mapped already. @vm->addr, @vm->size and @vm->flags should
+ * contain proper values.
+ *
+ */
+int __init vm_area_check_early(struct vm_struct *vm)
+{
+ struct vm_struct *tmp, **p;
+ BUG_ON(vmap_initialized);
+ for (p = &vmlist; (tmp = *p) != NULL; p = &tmp->next) {
+ if (tmp->addr >= vm->addr) {
+ if (tmp->addr < vm->addr + vm->size)
+ return 1;
+ } else {
+ if (tmp->addr + tmp->size > vm->addr)
+ return 1;
+ }
+ }
+ return 0;
+}
/**
* vm_area_add_early - add vmap area early during boot
* @vm: vm_struct to add