Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 1 | /* arch/arm/mach-msm/memory.c |
| 2 | * |
| 3 | * Copyright (C) 2007 Google, Inc. |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 4 | * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 5 | * |
| 6 | * This software is licensed under the terms of the GNU General Public |
| 7 | * License version 2, as published by the Free Software Foundation, and |
| 8 | * may be copied, distributed, and modified under those terms. |
| 9 | * |
| 10 | * This program is distributed in the hope that it will be useful, |
| 11 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 12 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 13 | * GNU General Public License for more details. |
| 14 | * |
| 15 | */ |
| 16 | |
| 17 | #include <linux/mm.h> |
| 18 | #include <linux/mm_types.h> |
| 19 | #include <linux/bootmem.h> |
| 20 | #include <linux/module.h> |
| 21 | #include <linux/memory_alloc.h> |
| 22 | #include <linux/memblock.h> |
Larry Bassel | 71237ba | 2013-04-02 10:55:31 -0700 | [diff] [blame] | 23 | #include <asm/memblock.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 24 | #include <asm/pgtable.h> |
| 25 | #include <asm/io.h> |
| 26 | #include <asm/mach/map.h> |
| 27 | #include <asm/cacheflush.h> |
| 28 | #include <asm/setup.h> |
| 29 | #include <asm/mach-types.h> |
| 30 | #include <mach/msm_memtypes.h> |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 31 | #include <mach/memory.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 32 | #include <linux/hardirq.h> |
| 33 | #if defined(CONFIG_MSM_NPA_REMOTE) |
| 34 | #include "npa_remote.h" |
| 35 | #include <linux/completion.h> |
| 36 | #include <linux/err.h> |
| 37 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 38 | #include <mach/msm_iomap.h> |
| 39 | #include <mach/socinfo.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 40 | #include <linux/sched.h> |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 41 | #include <linux/of_fdt.h> |
Steve Muckle | f132c6c | 2012-06-06 18:30:57 -0700 | [diff] [blame] | 42 | |
| 43 | /* fixme */ |
| 44 | #include <asm/tlbflush.h> |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 45 | #include <../../mm/mm.h> |
| 46 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 47 | #if defined(CONFIG_ARCH_MSM7X27) |
| 48 | static void *strongly_ordered_page; |
| 49 | static char strongly_ordered_mem[PAGE_SIZE*2-4]; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 50 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 51 | void __init map_page_strongly_ordered(void) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 52 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 53 | long unsigned int phys; |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 54 | struct map_desc map[1]; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 55 | |
| 56 | if (strongly_ordered_page) |
| 57 | return; |
| 58 | |
| 59 | strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem); |
| 60 | phys = __pa(strongly_ordered_page); |
| 61 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 62 | map[0].pfn = __phys_to_pfn(phys); |
| 63 | map[0].virtual = MSM_STRONGLY_ORDERED_PAGE; |
| 64 | map[0].length = PAGE_SIZE; |
| 65 | map[0].type = MT_MEMORY_SO; |
| 66 | iotable_init(map, ARRAY_SIZE(map)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 67 | |
| 68 | printk(KERN_ALERT "Initialized strongly ordered page successfully\n"); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 69 | } |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 70 | #else |
| 71 | void map_page_strongly_ordered(void) { } |
| 72 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 73 | |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 74 | #if defined(CONFIG_ARCH_MSM7X27) |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 75 | void write_to_strongly_ordered_memory(void) |
| 76 | { |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 77 | *(int *)MSM_STRONGLY_ORDERED_PAGE = 0; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 78 | } |
Trilok Soni | 80c1936 | 2012-10-15 00:55:00 +0530 | [diff] [blame] | 79 | #else |
| 80 | void write_to_strongly_ordered_memory(void) { } |
| 81 | #endif |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 82 | EXPORT_SYMBOL(write_to_strongly_ordered_memory); |
| 83 | |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 84 | /* These cache related routines make the assumption (if outer cache is |
| 85 | * available) that the associated physical memory is contiguous. |
| 86 | * They will operate on all (L1 and L2 if present) caches. |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 87 | */ |
| 88 | void clean_and_invalidate_caches(unsigned long vstart, |
| 89 | unsigned long length, unsigned long pstart) |
| 90 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 91 | dmac_flush_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 92 | outer_flush_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 93 | } |
| 94 | |
| 95 | void clean_caches(unsigned long vstart, |
| 96 | unsigned long length, unsigned long pstart) |
| 97 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 98 | dmac_clean_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 99 | outer_clean_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 100 | } |
| 101 | |
| 102 | void invalidate_caches(unsigned long vstart, |
| 103 | unsigned long length, unsigned long pstart) |
| 104 | { |
Olav Haugan | 29bb4d5 | 2012-05-30 12:57:53 -0700 | [diff] [blame] | 105 | dmac_inv_range((void *)vstart, (void *) (vstart + length)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 106 | outer_inv_range(pstart, pstart + length); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 107 | } |
| 108 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 109 | char *memtype_name[] = { |
| 110 | "SMI_KERNEL", |
| 111 | "SMI", |
| 112 | "EBI0", |
| 113 | "EBI1" |
| 114 | }; |
| 115 | |
| 116 | struct reserve_info *reserve_info; |
| 117 | |
Mitchel Humpherys | 29a62dd | 2012-10-03 16:43:28 -0700 | [diff] [blame] | 118 | /** |
| 119 | * calculate_reserve_limits() - calculate reserve limits for all |
| 120 | * memtypes |
| 121 | * |
| 122 | * for each memtype in the reserve_info->memtype_reserve_table, sets |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 123 | * the `limit' field to the largest size of any memblock of that |
Mitchel Humpherys | 29a62dd | 2012-10-03 16:43:28 -0700 | [diff] [blame] | 124 | * memtype. |
| 125 | */ |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 126 | static void __init calculate_reserve_limits(void) |
| 127 | { |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 128 | struct memblock_region *mr; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 129 | int memtype; |
| 130 | struct memtype_reserve *mt; |
| 131 | |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 132 | for_each_memblock(memory, mr) { |
| 133 | memtype = reserve_info->paddr_to_memtype(mr->base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 134 | if (memtype == MEMTYPE_NONE) { |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 135 | pr_warning("unknown memory type for region at %lx\n", |
| 136 | (long unsigned int)mr->base); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 137 | continue; |
| 138 | } |
| 139 | mt = &reserve_info->memtype_reserve_table[memtype]; |
Mitchel Humpherys | a7f2ced | 2012-10-03 17:01:40 -0700 | [diff] [blame] | 140 | mt->limit = max_t(unsigned long, mt->limit, mr->size); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 141 | } |
| 142 | } |
| 143 | |
| 144 | static void __init adjust_reserve_sizes(void) |
| 145 | { |
| 146 | int i; |
| 147 | struct memtype_reserve *mt; |
| 148 | |
| 149 | mt = &reserve_info->memtype_reserve_table[0]; |
| 150 | for (i = 0; i < MEMTYPE_MAX; i++, mt++) { |
| 151 | if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) |
| 152 | mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK; |
| 153 | if (mt->size > mt->limit) { |
Laura Abbott | 938d750 | 2013-04-09 10:44:16 -0700 | [diff] [blame] | 154 | pr_warning("%pa size for %s too large, setting to %pa\n", |
| 155 | &mt->size, memtype_name[i], &mt->limit); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 156 | mt->size = mt->limit; |
| 157 | } |
| 158 | } |
| 159 | } |
| 160 | |
| 161 | static void __init reserve_memory_for_mempools(void) |
| 162 | { |
Larry Bassel | 71237ba | 2013-04-02 10:55:31 -0700 | [diff] [blame] | 163 | int memtype; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 164 | struct memtype_reserve *mt; |
Larry Bassel | 71237ba | 2013-04-02 10:55:31 -0700 | [diff] [blame] | 165 | phys_addr_t alignment; |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 166 | |
| 167 | mt = &reserve_info->memtype_reserve_table[0]; |
| 168 | for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { |
| 169 | if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size) |
| 170 | continue; |
Larry Bassel | 71237ba | 2013-04-02 10:55:31 -0700 | [diff] [blame] | 171 | alignment = (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) ? |
| 172 | SZ_1M : PAGE_SIZE; |
| 173 | mt->start = arm_memblock_steal(mt->size, alignment); |
| 174 | BUG_ON(!mt->start); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 175 | } |
| 176 | } |
| 177 | |
| 178 | static void __init initialize_mempools(void) |
| 179 | { |
| 180 | struct mem_pool *mpool; |
| 181 | int memtype; |
| 182 | struct memtype_reserve *mt; |
| 183 | |
| 184 | mt = &reserve_info->memtype_reserve_table[0]; |
| 185 | for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) { |
| 186 | if (!mt->size) |
| 187 | continue; |
| 188 | mpool = initialize_memory_pool(mt->start, mt->size, memtype); |
| 189 | if (!mpool) |
| 190 | pr_warning("failed to create %s mempool\n", |
| 191 | memtype_name[memtype]); |
| 192 | } |
| 193 | } |
| 194 | |
Larry Bassel | 4d4f448 | 2012-04-04 11:26:09 -0700 | [diff] [blame] | 195 | #define MAX_FIXED_AREA_SIZE 0x11000000 |
| 196 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 197 | void __init msm_reserve(void) |
| 198 | { |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 199 | unsigned long msm_fixed_area_size; |
| 200 | unsigned long msm_fixed_area_start; |
| 201 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 202 | memory_pool_init(); |
Utsab Bose | 4bb9465 | 2012-09-28 15:07:35 +0530 | [diff] [blame] | 203 | if (reserve_info->calculate_reserve_sizes) |
| 204 | reserve_info->calculate_reserve_sizes(); |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 205 | |
| 206 | msm_fixed_area_size = reserve_info->fixed_area_size; |
| 207 | msm_fixed_area_start = reserve_info->fixed_area_start; |
| 208 | if (msm_fixed_area_size) |
Larry Bassel | 4d4f448 | 2012-04-04 11:26:09 -0700 | [diff] [blame] | 209 | if (msm_fixed_area_start > reserve_info->low_unstable_address |
| 210 | - MAX_FIXED_AREA_SIZE) |
| 211 | reserve_info->low_unstable_address = |
| 212 | msm_fixed_area_start; |
Larry Bassel | 2d8b42d | 2012-03-12 10:41:26 -0700 | [diff] [blame] | 213 | |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 214 | calculate_reserve_limits(); |
| 215 | adjust_reserve_sizes(); |
| 216 | reserve_memory_for_mempools(); |
| 217 | initialize_mempools(); |
| 218 | } |
| 219 | |
| 220 | static int get_ebi_memtype(void) |
| 221 | { |
| 222 | /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */ |
| 223 | if (cpu_is_msm7x30() || cpu_is_msm8x55()) |
| 224 | return MEMTYPE_EBI0; |
| 225 | return MEMTYPE_EBI1; |
| 226 | } |
| 227 | |
| 228 | void *allocate_contiguous_ebi(unsigned long size, |
| 229 | unsigned long align, int cached) |
| 230 | { |
| 231 | return allocate_contiguous_memory(size, get_ebi_memtype(), |
| 232 | align, cached); |
| 233 | } |
| 234 | EXPORT_SYMBOL(allocate_contiguous_ebi); |
| 235 | |
Laura Abbott | 771c304 | 2013-04-09 11:54:36 -0700 | [diff] [blame] | 236 | phys_addr_t allocate_contiguous_ebi_nomap(unsigned long size, |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 237 | unsigned long align) |
| 238 | { |
Jordan Crouse | 8c78b13 | 2011-05-26 10:27:47 -0600 | [diff] [blame] | 239 | return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(), |
| 240 | align, __builtin_return_address(0)); |
Bryan Huntsman | 3f2bc4d | 2011-08-16 17:27:22 -0700 | [diff] [blame] | 241 | } |
| 242 | EXPORT_SYMBOL(allocate_contiguous_ebi_nomap); |
| 243 | |
Vikram Mulukutla | c39c609 | 2011-07-29 18:36:35 -0700 | [diff] [blame] | 244 | unsigned int msm_ttbr0; |
| 245 | |
| 246 | void store_ttbr0(void) |
| 247 | { |
| 248 | /* Store TTBR0 for post-mortem debugging purposes. */ |
| 249 | asm("mrc p15, 0, %0, c2, c0, 0\n" |
| 250 | : "=r" (msm_ttbr0)); |
| 251 | } |
Laura Abbott | f637aff | 2011-12-14 14:16:17 -0800 | [diff] [blame] | 252 | |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 253 | static char * const memtype_names[] = { |
| 254 | [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL", |
| 255 | [MEMTYPE_SMI] = "SMI", |
| 256 | [MEMTYPE_EBI0] = "EBI0", |
| 257 | [MEMTYPE_EBI1] = "EBI1", |
| 258 | }; |
| 259 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 260 | int msm_get_memory_type_from_name(const char *memtype_name) |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 261 | { |
| 262 | int i; |
| 263 | |
| 264 | for (i = 0; i < ARRAY_SIZE(memtype_names); i++) { |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 265 | if (memtype_names[i] && |
| 266 | strcmp(memtype_name, memtype_names[i]) == 0) |
| 267 | return i; |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 268 | } |
| 269 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 270 | pr_err("Could not find memory type %s\n", memtype_name); |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 271 | return -EINVAL; |
| 272 | } |
| 273 | |
Olav Haugan | 9286291 | 2012-08-01 11:32:48 -0700 | [diff] [blame] | 274 | static int reserve_memory_type(const char *mem_name, |
| 275 | struct memtype_reserve *reserve_table, |
| 276 | int size) |
| 277 | { |
| 278 | int ret = msm_get_memory_type_from_name(mem_name); |
| 279 | |
| 280 | if (ret >= 0) { |
| 281 | reserve_table[ret].size += size; |
| 282 | ret = 0; |
| 283 | } |
| 284 | return ret; |
| 285 | } |
| 286 | |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 287 | static int __init check_for_compat(unsigned long node) |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 288 | { |
| 289 | char **start = __compat_exports_start; |
| 290 | |
| 291 | for ( ; start < __compat_exports_end; start++) |
| 292 | if (of_flat_dt_is_compatible(node, *start)) |
| 293 | return 1; |
| 294 | |
| 295 | return 0; |
| 296 | } |
| 297 | |
| 298 | int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname, |
| 299 | int depth, void *data) |
| 300 | { |
| 301 | char *memory_name_prop; |
| 302 | unsigned int *memory_remove_prop; |
| 303 | unsigned long memory_name_prop_length; |
| 304 | unsigned long memory_remove_prop_length; |
| 305 | unsigned long memory_size_prop_length; |
| 306 | unsigned int *memory_size_prop; |
Laura Abbott | 1641b98 | 2013-05-20 15:05:14 -0700 | [diff] [blame] | 307 | unsigned int *memory_reserve_prop; |
| 308 | unsigned long memory_reserve_prop_length; |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 309 | unsigned int memory_size; |
| 310 | unsigned int memory_start; |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 311 | unsigned int num_holes = 0; |
| 312 | int i; |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 313 | int ret; |
| 314 | |
| 315 | memory_name_prop = of_get_flat_dt_prop(node, |
| 316 | "qcom,memory-reservation-type", |
| 317 | &memory_name_prop_length); |
| 318 | memory_remove_prop = of_get_flat_dt_prop(node, |
| 319 | "qcom,memblock-remove", |
| 320 | &memory_remove_prop_length); |
| 321 | |
Laura Abbott | 1641b98 | 2013-05-20 15:05:14 -0700 | [diff] [blame] | 322 | memory_reserve_prop = of_get_flat_dt_prop(node, |
| 323 | "qcom,memblock-reserve", |
| 324 | &memory_reserve_prop_length); |
| 325 | |
| 326 | if (memory_name_prop || memory_remove_prop || memory_reserve_prop) { |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 327 | if (!check_for_compat(node)) |
| 328 | goto out; |
| 329 | } else { |
| 330 | goto out; |
| 331 | } |
| 332 | |
| 333 | if (memory_name_prop) { |
| 334 | if (strnlen(memory_name_prop, memory_name_prop_length) == 0) { |
| 335 | WARN(1, "Memory name was malformed\n"); |
| 336 | goto mem_remove; |
| 337 | } |
| 338 | |
| 339 | memory_size_prop = of_get_flat_dt_prop(node, |
| 340 | "qcom,memory-reservation-size", |
| 341 | &memory_size_prop_length); |
| 342 | |
| 343 | if (memory_size_prop && |
| 344 | (memory_size_prop_length == sizeof(unsigned int))) { |
| 345 | memory_size = be32_to_cpu(*memory_size_prop); |
| 346 | |
| 347 | if (reserve_memory_type(memory_name_prop, |
| 348 | data, memory_size) == 0) |
| 349 | pr_info("%s reserved %s size %x\n", |
| 350 | uname, memory_name_prop, memory_size); |
| 351 | else |
| 352 | WARN(1, "Node %s reserve failed\n", |
| 353 | uname); |
| 354 | } else { |
| 355 | WARN(1, "Node %s specified bad/nonexistent size\n", |
| 356 | uname); |
| 357 | } |
| 358 | } |
| 359 | |
| 360 | mem_remove: |
| 361 | |
| 362 | if (memory_remove_prop) { |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 363 | if (!memory_remove_prop_length || (memory_remove_prop_length % |
| 364 | (2 * sizeof(unsigned int)) != 0)) { |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 365 | WARN(1, "Memory remove malformed\n"); |
Laura Abbott | 1641b98 | 2013-05-20 15:05:14 -0700 | [diff] [blame] | 366 | goto mem_reserve; |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 367 | } |
| 368 | |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 369 | num_holes = memory_remove_prop_length / |
| 370 | (2 * sizeof(unsigned int)); |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 371 | |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 372 | for (i = 0; i < (num_holes * 2); i += 2) { |
| 373 | memory_start = be32_to_cpu(memory_remove_prop[i]); |
| 374 | memory_size = be32_to_cpu(memory_remove_prop[i+1]); |
| 375 | |
| 376 | ret = memblock_remove(memory_start, memory_size); |
| 377 | if (ret) |
| 378 | WARN(1, "Failed to remove memory %x-%x\n", |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 379 | memory_start, memory_start+memory_size); |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 380 | else |
| 381 | pr_info("Node %s removed memory %x-%x\n", uname, |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 382 | memory_start, memory_start+memory_size); |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 383 | } |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 384 | } |
| 385 | |
Laura Abbott | 1641b98 | 2013-05-20 15:05:14 -0700 | [diff] [blame] | 386 | mem_reserve: |
| 387 | |
| 388 | if (memory_reserve_prop) { |
| 389 | if (memory_reserve_prop_length != (2*sizeof(unsigned int))) { |
| 390 | WARN(1, "Memory reserve malformed\n"); |
| 391 | goto out; |
| 392 | } |
| 393 | |
| 394 | memory_start = be32_to_cpu(memory_reserve_prop[0]); |
| 395 | memory_size = be32_to_cpu(memory_reserve_prop[1]); |
| 396 | |
| 397 | ret = memblock_reserve(memory_start, memory_size); |
| 398 | if (ret) |
| 399 | WARN(1, "Failed to reserve memory %x-%x\n", |
| 400 | memory_start, memory_start+memory_size); |
| 401 | else |
| 402 | pr_info("Node %s memblock_reserve memory %x-%x\n", |
| 403 | uname, memory_start, memory_start+memory_size); |
| 404 | } |
| 405 | |
Laura Abbott | d8d0f77 | 2012-07-10 10:27:06 -0700 | [diff] [blame] | 406 | out: |
| 407 | return 0; |
| 408 | } |
Chintan Pandya | d71c5f9 | 2012-08-23 17:14:32 +0530 | [diff] [blame] | 409 | |
Neeti Desai | 5346431 | 2013-05-09 16:11:45 -0700 | [diff] [blame] | 410 | /* Function to remove any meminfo blocks which are of size zero */ |
| 411 | static void merge_meminfo(void) |
| 412 | { |
| 413 | int i = 0; |
| 414 | |
| 415 | while (i < meminfo.nr_banks) { |
| 416 | struct membank *bank = &meminfo.bank[i]; |
| 417 | |
| 418 | if (bank->size == 0) { |
| 419 | memmove(bank, bank + 1, |
| 420 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| 421 | meminfo.nr_banks--; |
| 422 | continue; |
| 423 | } |
| 424 | i++; |
| 425 | } |
| 426 | } |
| 427 | |
| 428 | /* |
| 429 | * Function to scan the device tree and adjust the meminfo table to |
| 430 | * reflect the memory holes. |
| 431 | */ |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 432 | int __init dt_scan_for_memory_hole(unsigned long node, const char *uname, |
| 433 | int depth, void *data) |
| 434 | { |
| 435 | unsigned int *memory_remove_prop; |
| 436 | unsigned long memory_remove_prop_length; |
| 437 | unsigned long hole_start; |
| 438 | unsigned long hole_size; |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 439 | unsigned int num_holes = 0; |
| 440 | int i = 0; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 441 | |
| 442 | memory_remove_prop = of_get_flat_dt_prop(node, |
| 443 | "qcom,memblock-remove", |
| 444 | &memory_remove_prop_length); |
| 445 | |
| 446 | if (memory_remove_prop) { |
| 447 | if (!check_for_compat(node)) |
| 448 | goto out; |
| 449 | } else { |
| 450 | goto out; |
| 451 | } |
| 452 | |
| 453 | if (memory_remove_prop) { |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 454 | if (!memory_remove_prop_length || (memory_remove_prop_length % |
| 455 | (2 * sizeof(unsigned int)) != 0)) { |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 456 | WARN(1, "Memory remove malformed\n"); |
| 457 | goto out; |
| 458 | } |
| 459 | |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 460 | num_holes = memory_remove_prop_length / |
| 461 | (2 * sizeof(unsigned int)); |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 462 | |
Neeti Desai | a06e350 | 2013-07-15 16:47:35 -0700 | [diff] [blame] | 463 | for (i = 0; i < (num_holes * 2); i += 2) { |
| 464 | hole_start = be32_to_cpu(memory_remove_prop[i]); |
| 465 | hole_size = be32_to_cpu(memory_remove_prop[i+1]); |
| 466 | |
| 467 | adjust_meminfo(hole_start, hole_size); |
| 468 | } |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 469 | } |
| 470 | |
| 471 | out: |
| 472 | return 0; |
| 473 | } |
| 474 | |
| 475 | /* |
| 476 | * Split the memory bank to reflect the hole, if present, |
| 477 | * using the start and end of the memory hole. |
| 478 | */ |
| 479 | void adjust_meminfo(unsigned long start, unsigned long size) |
| 480 | { |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 481 | int i; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 482 | |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 483 | for (i = 0; i < meminfo.nr_banks; i++) { |
| 484 | struct membank *bank = &meminfo.bank[i]; |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 485 | |
| 486 | if (((start + size) <= (bank->start + bank->size)) && |
| 487 | (start >= bank->start)) { |
| 488 | memmove(bank + 1, bank, |
| 489 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| 490 | meminfo.nr_banks++; |
| 491 | i++; |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 492 | |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 493 | bank->size = start - bank->start; |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 494 | bank[1].start = (start + size); |
| 495 | bank[1].size -= (bank->size + size); |
| 496 | bank[1].highmem = 0; |
Neeti Desai | 5346431 | 2013-05-09 16:11:45 -0700 | [diff] [blame] | 497 | merge_meminfo(); |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 498 | } |
Neeti Desai | 1b2cb55 | 2012-11-01 21:57:36 -0700 | [diff] [blame] | 499 | } |
| 500 | } |
Larry Bassel | 38e22da | 2013-02-25 10:54:16 -0800 | [diff] [blame] | 501 | |
Chintan Pandya | d71c5f9 | 2012-08-23 17:14:32 +0530 | [diff] [blame] | 502 | unsigned long get_ddr_size(void) |
| 503 | { |
| 504 | unsigned int i; |
| 505 | unsigned long ret = 0; |
| 506 | |
| 507 | for (i = 0; i < meminfo.nr_banks; i++) |
| 508 | ret += meminfo.bank[i].size; |
| 509 | |
| 510 | return ret; |
| 511 | } |
Mitchel Humpherys | 6ae3ae4 | 2012-10-30 15:12:52 -0700 | [diff] [blame] | 512 | |
| 513 | /* Provide a string that anonymous device tree allocations (those not |
| 514 | * directly associated with any driver) can use for their "compatible" |
| 515 | * field */ |
| 516 | EXPORT_COMPAT("qcom,msm-contig-mem"); |