Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/memory.c |
| 2 | * |
| 3 | * Copyright (C) 2007 Google, Inc. |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 4 | * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/mm_types.h> |
| 19 | #include <linux/bootmem.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/memory_alloc.h> |
| 22 | #include <linux/memblock.h> |
| 23 | #include <asm/pgtable.h> |
| 24 | #include <asm/io.h> |
| 25 | #include <asm/mach/map.h> |
| 26 | #include <asm/cacheflush.h> |
| 27 | #include <asm/setup.h> |
| 28 | #include <asm/mach-types.h> |
| 29 | #include <mach/msm_memtypes.h> |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 30 | #include <mach/memory.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 31 | #include <linux/hardirq.h> |
| 32 | #if defined(CONFIG_MSM_NPA_REMOTE) |
| 33 | #include "npa_remote.h" |
| 34 | #include <linux/completion.h> |
| 35 | #include <linux/err.h> |
| 36 | #endif |
| 37 | #include <linux/android_pmem.h> |
| 38 | #include <mach/msm_iomap.h> |
| 39 | #include <mach/socinfo.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 40 | #include <linux/sched.h> |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 41 | #include <linux/of_fdt.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 42 | |
| 43 | /* fixme */ |
| 44 | #include <asm/tlbflush.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 45 | #include <../../mm/mm.h> |
Laura Abbott | f637aff | 2011-12-14 14:16:17 -0800 | [diff] [blame] | 46 | #include <linux/fmem.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 47 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 48 | #if defined(CONFIG_ARCH_MSM7X27) |
| 49 | static void *strongly_ordered_page; |
| 50 | static char strongly_ordered_mem[PAGE_SIZE*2-4]; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 51 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 52 | void __init map_page_strongly_ordered(void) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 53 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 54 | long unsigned int phys; |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 55 | struct map_desc map[1]; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 56 | |
| 57 | if (strongly_ordered_page) |
| 58 | return; |
| 59 | |
| 60 | strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem); |
| 61 | phys = __pa(strongly_ordered_page); |
| 62 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 63 | map[0].pfn = __phys_to_pfn(phys); |
| 64 | map[0].virtual = MSM_STRONGLY_ORDERED_PAGE; |
| 65 | map[0].length = PAGE_SIZE; |
| 66 | map[0].type = MT_MEMORY_SO; |
| 67 | iotable_init(map, ARRAY_SIZE(map)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 68 | |
| 69 | printk(KERN_ALERT "Initialized strongly ordered page successfully\n"); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 70 | } |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 71 | #else |
| 72 | void map_page_strongly_ordered(void) { } |
| 73 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 74 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 75 | #if defined(CONFIG_ARCH_MSM7X27) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 76 | void write_to_strongly_ordered_memory(void) |
| 77 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 78 | *(int *)MSM_STRONGLY_ORDERED_PAGE = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 79 | } |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 80 | #else |
| 81 | void write_to_strongly_ordered_memory(void) { } |
| 82 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 83 | EXPORT_SYMBOL(write_to_strongly_ordered_memory); |
| 84 | |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 85 | /* These cache related routines make the assumption (if outer cache is |
| 86 | * available) that the associated physical memory is contiguous. |
| 87 | * They will operate on all (L1 and L2 if present) caches. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 88 | */ |
| 89 | void clean_and_invalidate_caches(unsigned long vstart, |
| 90 | unsigned long length, unsigned long pstart) |
| 91 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 92 | dmac_flush_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 93 | outer_flush_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 94 | } |
| 95 | |
| 96 | void clean_caches(unsigned long vstart, |
| 97 | unsigned long length, unsigned long pstart) |
| 98 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 99 | dmac_clean_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 100 | outer_clean_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 101 | } |
| 102 | |
| 103 | void invalidate_caches(unsigned long vstart, |
| 104 | unsigned long length, unsigned long pstart) |
| 105 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 106 | dmac_inv_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 107 | outer_inv_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 108 | } |
| 109 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 110 | char *memtype_name[] = { |
| 111 | "SMI_KERNEL", |
| 112 | "SMI", |
| 113 | "EBI0", |
| 114 | "EBI1" |
| 115 | }; |
| 116 | |
| 117 | struct reserve_info *reserve_info; |
| 118 | |
Mitchel Humpherys | 29a62dd | 2012-10-03 16:43:28 -0700 | [diff] [blame] | 119 | /** |
| 120 | * calculate_reserve_limits() - calculate reserve limits for all |
| 121 | * memtypes |
| 122 | * |
| 123 | * for each memtype in the reserve_info->memtype_reserve_table, sets |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 124 | * the `limit' field to the largest size of any memblock of that |
Mitchel Humpherys | 29a62dd | 2012-10-03 16:43:28 -0700 | [diff] [blame] | 125 | * memtype. |
| 126 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 127 | static void __init calculate_reserve_limits(void) |
| 128 | { |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 129 | struct memblock_region *mr; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 130 | int memtype; |
| 131 | struct memtype_reserve *mt; |
| 132 | |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 133 | for_each_memblock(memory, mr) { |
| 134 | memtype = reserve_info->paddr_to_memtype(mr->base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 135 | if (memtype == MEMTYPE_NONE) { |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 136 | pr_warning("unknown memory type for region at %lx\n", |
| 137 | (long unsigned int)mr->base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 138 | continue; |
| 139 | } |
| 140 | mt = &reserve_info->memtype_reserve_table[memtype]; |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 141 | mt->limit = max_t(unsigned long, mt->limit, mr->size); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 142 | } |
| 143 | } |
| 144 | |
| 145 | static void __init adjust_reserve_sizes(void) |
| 146 | { |
| 147 | int i; |
| 148 | struct memtype_reserve *mt; |
| 149 | |
| 150 | mt = &reserve_info->memtype_reserve_table[0]; |
| 151 | for (i = 0; i < MEMTYPE_MAX; i++, mt++) { |
| 152 | if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) |
| 153 | mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK; |
| 154 | if (mt->size > mt->limit) { |
| 155 | pr_warning("%lx size for %s too large, setting to %lx\n", |
| 156 | mt->size, memtype_name[i], mt->limit); |
| 157 | mt->size = mt->limit; |
| 158 | } |
| 159 | } |
| 160 | } |
| 161 | |
| 162 | static void __init reserve_memory_for_mempools(void) |
| 163 | { |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 164 | int memtype, memreg_type; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 165 | struct memtype_reserve *mt; |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 166 | struct memblock_region *mr, *mr_candidate = NULL; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 167 | int ret; |
| 168 | |
| 169 | mt = &reserve_info->memtype_reserve_table[0]; |
| 170 | for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { |
| 171 | if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size) |
| 172 | continue; |
| 173 | |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 174 | /* Choose the memory block with the highest physical |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 175 | * address which is large enough, so that we will not |
| 176 | * take memory from the lowest memory bank which the kernel |
| 177 | * is in (and cause boot problems) and so that we might |
| 178 | * be able to steal memory that would otherwise become |
Mitchel Humpherys | 29a62dd | 2012-10-03 16:43:28 -0700 | [diff] [blame] | 179 | * highmem. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 180 | */ |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 181 | for_each_memblock(memory, mr) { |
| 182 | memreg_type = |
| 183 | reserve_info->paddr_to_memtype(mr->base); |
| 184 | if (memtype != memreg_type) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 185 | continue; |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 186 | if (mr->size >= mt->size |
| 187 | && (mr_candidate == NULL |
| 188 | || mr->base > mr_candidate->base)) |
| 189 | mr_candidate = mr; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 190 | } |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 191 | BUG_ON(mr_candidate == NULL); |
| 192 | /* bump mt up against the top of the region */ |
| 193 | mt->start = mr_candidate->base + mr_candidate->size - mt->size; |
Laura Abbott | 2257b9c | 2013-03-20 15:04:10 -0700 | [diff] [blame] | 194 | ret = memblock_reserve(mt->start, mt->size); |
| 195 | BUG_ON(ret); |
| 196 | ret = memblock_free(mt->start, mt->size); |
| 197 | BUG_ON(ret); |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 198 | ret = memblock_remove(mt->start, mt->size); |
| 199 | BUG_ON(ret); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 200 | } |
| 201 | } |
| 202 | |
| 203 | static void __init initialize_mempools(void) |
| 204 | { |
| 205 | struct mem_pool *mpool; |
| 206 | int memtype; |
| 207 | struct memtype_reserve *mt; |
| 208 | |
| 209 | mt = &reserve_info->memtype_reserve_table[0]; |
| 210 | for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { |
| 211 | if (!mt->size) |
| 212 | continue; |
| 213 | mpool = initialize_memory_pool(mt->start, mt->size, memtype); |
| 214 | if (!mpool) |
| 215 | pr_warning("failed to create %s mempool\n", |
| 216 | memtype_name[memtype]); |
| 217 | } |
| 218 | } |
| 219 | |
Larry Bassel | 4d4f448 | 2012-04-04 11:26:09 -0700 | [diff] [blame] | 220 | #define MAX_FIXED_AREA_SIZE 0x11000000 |
| 221 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 222 | void __init msm_reserve(void) |
| 223 | { |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 224 | unsigned long msm_fixed_area_size; |
| 225 | unsigned long msm_fixed_area_start; |
| 226 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 227 | memory_pool_init(); |
Utsab Bose | 4bb9465 | 2012-09-28 15:07:35 +0530 | [diff] [blame] | 228 | if (reserve_info->calculate_reserve_sizes) |
| 229 | reserve_info->calculate_reserve_sizes(); |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 230 | |
| 231 | msm_fixed_area_size = reserve_info->fixed_area_size; |
| 232 | msm_fixed_area_start = reserve_info->fixed_area_start; |
| 233 | if (msm_fixed_area_size) |
Larry Bassel | 4d4f448 | 2012-04-04 11:26:09 -0700 | [diff] [blame] | 234 | if (msm_fixed_area_start > reserve_info->low_unstable_address |
| 235 | - MAX_FIXED_AREA_SIZE) |
| 236 | reserve_info->low_unstable_address = |
| 237 | msm_fixed_area_start; |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 238 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 239 | calculate_reserve_limits(); |
| 240 | adjust_reserve_sizes(); |
| 241 | reserve_memory_for_mempools(); |
| 242 | initialize_mempools(); |
| 243 | } |
| 244 | |
| 245 | static int get_ebi_memtype(void) |
| 246 | { |
| 247 | /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */ |
| 248 | if (cpu_is_msm7x30() || cpu_is_msm8x55()) |
| 249 | return MEMTYPE_EBI0; |
| 250 | return MEMTYPE_EBI1; |
| 251 | } |
| 252 | |
| 253 | void *allocate_contiguous_ebi(unsigned long size, |
| 254 | unsigned long align, int cached) |
| 255 | { |
| 256 | return allocate_contiguous_memory(size, get_ebi_memtype(), |
| 257 | align, cached); |
| 258 | } |
| 259 | EXPORT_SYMBOL(allocate_contiguous_ebi); |
| 260 | |
| 261 | unsigned long allocate_contiguous_ebi_nomap(unsigned long size, |
| 262 | unsigned long align) |
| 263 | { |
Jordan Crouse | 8c78b13 | 2011-05-26 10:27:47 -0600 | [diff] [blame] | 264 | return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(), |
| 265 | align, __builtin_return_address(0)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 266 | } |
| 267 | EXPORT_SYMBOL(allocate_contiguous_ebi_nomap); |
| 268 | |
Vikram Mulukutla | c39c609 | 2011-07-29 18:36:35 -0700 | [diff] [blame] | 269 | unsigned int msm_ttbr0; |
| 270 | |
| 271 | void store_ttbr0(void) |
| 272 | { |
| 273 | /* Store TTBR0 for post-mortem debugging purposes. */ |
| 274 | asm("mrc p15, 0, %0, c2, c0, 0\n" |
| 275 | : "=r" (msm_ttbr0)); |
| 276 | } |
Laura Abbott | f637aff | 2011-12-14 14:16:17 -0800 | [diff] [blame] | 277 | |
| 278 | int request_fmem_c_region(void *unused) |
| 279 | { |
| 280 | return fmem_set_state(FMEM_C_STATE); |
| 281 | } |
| 282 | |
| 283 | int release_fmem_c_region(void *unused) |
| 284 | { |
| 285 | return fmem_set_state(FMEM_T_STATE); |
| 286 | } |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 287 | |
| 288 | static char * const memtype_names[] = { |
| 289 | [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL", |
| 290 | [MEMTYPE_SMI] = "SMI", |
| 291 | [MEMTYPE_EBI0] = "EBI0", |
| 292 | [MEMTYPE_EBI1] = "EBI1", |
| 293 | }; |
| 294 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 295 | int msm_get_memory_type_from_name(const char *memtype_name) |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 296 | { |
| 297 | int i; |
| 298 | |
| 299 | for (i = 0; i < ARRAY_SIZE(memtype_names); i++) { |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 300 | if (memtype_names[i] && |
| 301 | strcmp(memtype_name, memtype_names[i]) == 0) |
| 302 | return i; |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 303 | } |
| 304 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 305 | pr_err("Could not find memory type %s\n", memtype_name); |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 306 | return -EINVAL; |
| 307 | } |
| 308 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 309 | static int reserve_memory_type(const char *mem_name, |
| 310 | struct memtype_reserve *reserve_table, |
| 311 | int size) |
| 312 | { |
| 313 | int ret = msm_get_memory_type_from_name(mem_name); |
| 314 | |
| 315 | if (ret >= 0) { |
| 316 | reserve_table[ret].size += size; |
| 317 | ret = 0; |
| 318 | } |
| 319 | return ret; |
| 320 | } |
| 321 | |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 322 | static int __init check_for_compat(unsigned long node) |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 323 | { |
| 324 | char **start = __compat_exports_start; |
| 325 | |
| 326 | for ( ; start < __compat_exports_end; start++) |
| 327 | if (of_flat_dt_is_compatible(node, *start)) |
| 328 | return 1; |
| 329 | |
| 330 | return 0; |
| 331 | } |
| 332 | |
| 333 | int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname, |
| 334 | int depth, void *data) |
| 335 | { |
| 336 | char *memory_name_prop; |
| 337 | unsigned int *memory_remove_prop; |
| 338 | unsigned long memory_name_prop_length; |
| 339 | unsigned long memory_remove_prop_length; |
| 340 | unsigned long memory_size_prop_length; |
| 341 | unsigned int *memory_size_prop; |
| 342 | unsigned int memory_size; |
| 343 | unsigned int memory_start; |
| 344 | int ret; |
| 345 | |
| 346 | memory_name_prop = of_get_flat_dt_prop(node, |
| 347 | "qcom,memory-reservation-type", |
| 348 | &memory_name_prop_length); |
| 349 | memory_remove_prop = of_get_flat_dt_prop(node, |
| 350 | "qcom,memblock-remove", |
| 351 | &memory_remove_prop_length); |
| 352 | |
| 353 | if (memory_name_prop || memory_remove_prop) { |
| 354 | if (!check_for_compat(node)) |
| 355 | goto out; |
| 356 | } else { |
| 357 | goto out; |
| 358 | } |
| 359 | |
| 360 | if (memory_name_prop) { |
| 361 | if (strnlen(memory_name_prop, memory_name_prop_length) == 0) { |
| 362 | WARN(1, "Memory name was malformed\n"); |
| 363 | goto mem_remove; |
| 364 | } |
| 365 | |
| 366 | memory_size_prop = of_get_flat_dt_prop(node, |
| 367 | "qcom,memory-reservation-size", |
| 368 | &memory_size_prop_length); |
| 369 | |
| 370 | if (memory_size_prop && |
| 371 | (memory_size_prop_length == sizeof(unsigned int))) { |
| 372 | memory_size = be32_to_cpu(*memory_size_prop); |
| 373 | |
| 374 | if (reserve_memory_type(memory_name_prop, |
| 375 | data, memory_size) == 0) |
| 376 | pr_info("%s reserved %s size %x\n", |
| 377 | uname, memory_name_prop, memory_size); |
| 378 | else |
| 379 | WARN(1, "Node %s reserve failed\n", |
| 380 | uname); |
| 381 | } else { |
| 382 | WARN(1, "Node %s specified bad/nonexistent size\n", |
| 383 | uname); |
| 384 | } |
| 385 | } |
| 386 | |
| 387 | mem_remove: |
| 388 | |
| 389 | if (memory_remove_prop) { |
| 390 | if (memory_remove_prop_length != (2*sizeof(unsigned int))) { |
| 391 | WARN(1, "Memory remove malformed\n"); |
| 392 | goto out; |
| 393 | } |
| 394 | |
| 395 | memory_start = be32_to_cpu(memory_remove_prop[0]); |
| 396 | memory_size = be32_to_cpu(memory_remove_prop[1]); |
| 397 | |
| 398 | ret = memblock_remove(memory_start, memory_size); |
| 399 | if (ret) |
| 400 | WARN(1, "Failed to remove memory %x-%x\n", |
| 401 | memory_start, memory_start+memory_size); |
| 402 | else |
| 403 | pr_info("Node %s removed memory %x-%x\n", uname, |
| 404 | memory_start, memory_start+memory_size); |
| 405 | } |
| 406 | |
| 407 | out: |
| 408 | return 0; |
| 409 | } |
Chintan Pandya | d71c5f9 | 2012-08-23 17:14:32 +0530 | [diff] [blame] | 410 | |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 411 | /* This function scans the device tree to populate the memory hole table */ |
| 412 | int __init dt_scan_for_memory_hole(unsigned long node, const char *uname, |
| 413 | int depth, void *data) |
| 414 | { |
| 415 | unsigned int *memory_remove_prop; |
| 416 | unsigned long memory_remove_prop_length; |
| 417 | unsigned long hole_start; |
| 418 | unsigned long hole_size; |
| 419 | |
| 420 | memory_remove_prop = of_get_flat_dt_prop(node, |
| 421 | "qcom,memblock-remove", |
| 422 | &memory_remove_prop_length); |
| 423 | |
| 424 | if (memory_remove_prop) { |
| 425 | if (!check_for_compat(node)) |
| 426 | goto out; |
| 427 | } else { |
| 428 | goto out; |
| 429 | } |
| 430 | |
| 431 | if (memory_remove_prop) { |
| 432 | if (memory_remove_prop_length != (2*sizeof(unsigned int))) { |
| 433 | WARN(1, "Memory remove malformed\n"); |
| 434 | goto out; |
| 435 | } |
| 436 | |
| 437 | hole_start = be32_to_cpu(memory_remove_prop[0]); |
| 438 | hole_size = be32_to_cpu(memory_remove_prop[1]); |
| 439 | |
| 440 | if (hole_start + hole_size <= MAX_HOLE_ADDRESS) { |
| 441 | if (memory_hole_start == 0 && memory_hole_end == 0) { |
| 442 | memory_hole_start = hole_start; |
| 443 | memory_hole_end = hole_start + hole_size; |
| 444 | } else if ((memory_hole_end - memory_hole_start) |
| 445 | <= hole_size) { |
| 446 | memory_hole_start = hole_start; |
| 447 | memory_hole_end = hole_start + hole_size; |
| 448 | } |
| 449 | } |
| 450 | adjust_meminfo(hole_start, hole_size); |
| 451 | } |
| 452 | |
| 453 | out: |
| 454 | return 0; |
| 455 | } |
| 456 | |
| 457 | /* |
| 458 | * Split the memory bank to reflect the hole, if present, |
| 459 | * using the start and end of the memory hole. |
| 460 | */ |
| 461 | void adjust_meminfo(unsigned long start, unsigned long size) |
| 462 | { |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 463 | int i; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 464 | |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 465 | for (i = 0; i < meminfo.nr_banks; i++) { |
| 466 | struct membank *bank = &meminfo.bank[i]; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 467 | |
| 468 | if (((start + size) <= (bank->start + bank->size)) && |
| 469 | (start >= bank->start)) { |
| 470 | memmove(bank + 1, bank, |
| 471 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| 472 | meminfo.nr_banks++; |
| 473 | i++; |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 474 | |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 475 | bank->size = start - bank->start; |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 476 | bank[1].start = (start + size); |
| 477 | bank[1].size -= (bank->size + size); |
| 478 | bank[1].highmem = 0; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 479 | } |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 480 | } |
| 481 | } |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 482 | |
Chintan Pandya | d71c5f9 | 2012-08-23 17:14:32 +0530 | [diff] [blame] | 483 | unsigned long get_ddr_size(void) |
| 484 | { |
| 485 | unsigned int i; |
| 486 | unsigned long ret = 0; |
| 487 | |
| 488 | for (i = 0; i < meminfo.nr_banks; i++) |
| 489 | ret += meminfo.bank[i].size; |
| 490 | |
| 491 | return ret; |
| 492 | } |
Mitchel Humpherys | 6ae3ae4 | 2012-10-30 15:12:52 -0700 | [diff] [blame] | 493 | |
| 494 | /* Provide a string that anonymous device tree allocations (those not |
| 495 | * directly associated with any driver) can use for their "compatible" |
| 496 | * field */ |
| 497 | EXPORT_COMPAT("qcom,msm-contig-mem"); |