blob: a974018e27b0b8d25b967622ed0837cd430be09f [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel38e22da2013-02-25 10:54:16 -08004 * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
Larry Bassel71237ba2013-04-02 10:55:31 -070023#include <asm/memblock.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070024#include <asm/pgtable.h>
25#include <asm/io.h>
26#include <asm/mach/map.h>
27#include <asm/cacheflush.h>
28#include <asm/setup.h>
29#include <asm/mach-types.h>
30#include <mach/msm_memtypes.h>
Neeti Desai1b2cb552012-11-01 21:57:36 -070031#include <mach/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070032#include <linux/hardirq.h>
33#if defined(CONFIG_MSM_NPA_REMOTE)
34#include "npa_remote.h"
35#include <linux/completion.h>
36#include <linux/err.h>
37#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070038#include <mach/msm_iomap.h>
39#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070040#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070041#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070042
43/* fixme */
44#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include <../../mm/mm.h>
46
Trilok Soni80c19362012-10-15 00:55:00 +053047#if defined(CONFIG_ARCH_MSM7X27)
48static void *strongly_ordered_page;
49static char strongly_ordered_mem[PAGE_SIZE*2-4];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Trilok Soni80c19362012-10-15 00:55:00 +053051void __init map_page_strongly_ordered(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 long unsigned int phys;
Trilok Soni80c19362012-10-15 00:55:00 +053054 struct map_desc map[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
56 if (strongly_ordered_page)
57 return;
58
59 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
60 phys = __pa(strongly_ordered_page);
61
Trilok Soni80c19362012-10-15 00:55:00 +053062 map[0].pfn = __phys_to_pfn(phys);
63 map[0].virtual = MSM_STRONGLY_ORDERED_PAGE;
64 map[0].length = PAGE_SIZE;
65 map[0].type = MT_MEMORY_SO;
66 iotable_init(map, ARRAY_SIZE(map));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
68 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069}
Trilok Soni80c19362012-10-15 00:55:00 +053070#else
71void map_page_strongly_ordered(void) { }
72#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Trilok Soni80c19362012-10-15 00:55:00 +053074#if defined(CONFIG_ARCH_MSM7X27)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075void write_to_strongly_ordered_memory(void)
76{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078}
Trilok Soni80c19362012-10-15 00:55:00 +053079#else
80void write_to_strongly_ordered_memory(void) { }
81#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082EXPORT_SYMBOL(write_to_strongly_ordered_memory);
83
Olav Haugan29bb4d52012-05-30 12:57:53 -070084/* These cache related routines make the assumption (if outer cache is
85 * available) that the associated physical memory is contiguous.
86 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 */
88void clean_and_invalidate_caches(unsigned long vstart,
89 unsigned long length, unsigned long pstart)
90{
Olav Haugan29bb4d52012-05-30 12:57:53 -070091 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
95void clean_caches(unsigned long vstart,
96 unsigned long length, unsigned long pstart)
97{
Olav Haugan29bb4d52012-05-30 12:57:53 -070098 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100}
101
102void invalidate_caches(unsigned long vstart,
103 unsigned long length, unsigned long pstart)
104{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700105 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107}
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109char *memtype_name[] = {
110 "SMI_KERNEL",
111 "SMI",
112 "EBI0",
113 "EBI1"
114};
115
116struct reserve_info *reserve_info;
117
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700118/**
119 * calculate_reserve_limits() - calculate reserve limits for all
120 * memtypes
121 *
122 * for each memtype in the reserve_info->memtype_reserve_table, sets
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700123 * the `limit' field to the largest size of any memblock of that
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700124 * memtype.
125 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126static void __init calculate_reserve_limits(void)
127{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700128 struct memblock_region *mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 int memtype;
130 struct memtype_reserve *mt;
131
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700132 for_each_memblock(memory, mr) {
133 memtype = reserve_info->paddr_to_memtype(mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 if (memtype == MEMTYPE_NONE) {
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700135 pr_warning("unknown memory type for region at %lx\n",
136 (long unsigned int)mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 continue;
138 }
139 mt = &reserve_info->memtype_reserve_table[memtype];
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700140 mt->limit = max_t(unsigned long, mt->limit, mr->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 }
142}
143
144static void __init adjust_reserve_sizes(void)
145{
146 int i;
147 struct memtype_reserve *mt;
148
149 mt = &reserve_info->memtype_reserve_table[0];
150 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
151 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
152 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
153 if (mt->size > mt->limit) {
Laura Abbott938d7502013-04-09 10:44:16 -0700154 pr_warning("%pa size for %s too large, setting to %pa\n",
155 &mt->size, memtype_name[i], &mt->limit);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700156 mt->size = mt->limit;
157 }
158 }
159}
160
161static void __init reserve_memory_for_mempools(void)
162{
Larry Bassel71237ba2013-04-02 10:55:31 -0700163 int memtype;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 struct memtype_reserve *mt;
Larry Bassel71237ba2013-04-02 10:55:31 -0700165 phys_addr_t alignment;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166
167 mt = &reserve_info->memtype_reserve_table[0];
168 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
169 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
170 continue;
Larry Bassel71237ba2013-04-02 10:55:31 -0700171 alignment = (mt->flags & MEMTYPE_FLAGS_1M_ALIGN) ?
172 SZ_1M : PAGE_SIZE;
173 mt->start = arm_memblock_steal(mt->size, alignment);
174 BUG_ON(!mt->start);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 }
176}
177
178static void __init initialize_mempools(void)
179{
180 struct mem_pool *mpool;
181 int memtype;
182 struct memtype_reserve *mt;
183
184 mt = &reserve_info->memtype_reserve_table[0];
185 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
186 if (!mt->size)
187 continue;
188 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
189 if (!mpool)
190 pr_warning("failed to create %s mempool\n",
191 memtype_name[memtype]);
192 }
193}
194
Larry Bassel4d4f4482012-04-04 11:26:09 -0700195#define MAX_FIXED_AREA_SIZE 0x11000000
196
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197void __init msm_reserve(void)
198{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700199 unsigned long msm_fixed_area_size;
200 unsigned long msm_fixed_area_start;
201
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700202 memory_pool_init();
Utsab Bose4bb94652012-09-28 15:07:35 +0530203 if (reserve_info->calculate_reserve_sizes)
204 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700205
206 msm_fixed_area_size = reserve_info->fixed_area_size;
207 msm_fixed_area_start = reserve_info->fixed_area_start;
208 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700209 if (msm_fixed_area_start > reserve_info->low_unstable_address
210 - MAX_FIXED_AREA_SIZE)
211 reserve_info->low_unstable_address =
212 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700213
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700214 calculate_reserve_limits();
215 adjust_reserve_sizes();
216 reserve_memory_for_mempools();
217 initialize_mempools();
218}
219
220static int get_ebi_memtype(void)
221{
222 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
223 if (cpu_is_msm7x30() || cpu_is_msm8x55())
224 return MEMTYPE_EBI0;
225 return MEMTYPE_EBI1;
226}
227
228void *allocate_contiguous_ebi(unsigned long size,
229 unsigned long align, int cached)
230{
231 return allocate_contiguous_memory(size, get_ebi_memtype(),
232 align, cached);
233}
234EXPORT_SYMBOL(allocate_contiguous_ebi);
235
Laura Abbott771c3042013-04-09 11:54:36 -0700236phys_addr_t allocate_contiguous_ebi_nomap(unsigned long size,
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700237 unsigned long align)
238{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600239 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
240 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241}
242EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
243
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700244unsigned int msm_ttbr0;
245
246void store_ttbr0(void)
247{
248 /* Store TTBR0 for post-mortem debugging purposes. */
249 asm("mrc p15, 0, %0, c2, c0, 0\n"
250 : "=r" (msm_ttbr0));
251}
Laura Abbottf637aff2011-12-14 14:16:17 -0800252
Laura Abbottd8d0f772012-07-10 10:27:06 -0700253static char * const memtype_names[] = {
254 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
255 [MEMTYPE_SMI] = "SMI",
256 [MEMTYPE_EBI0] = "EBI0",
257 [MEMTYPE_EBI1] = "EBI1",
258};
259
Olav Haugan92862912012-08-01 11:32:48 -0700260int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700261{
262 int i;
263
264 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700265 if (memtype_names[i] &&
266 strcmp(memtype_name, memtype_names[i]) == 0)
267 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700268 }
269
Olav Haugan92862912012-08-01 11:32:48 -0700270 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700271 return -EINVAL;
272}
273
Olav Haugan92862912012-08-01 11:32:48 -0700274static int reserve_memory_type(const char *mem_name,
275 struct memtype_reserve *reserve_table,
276 int size)
277{
278 int ret = msm_get_memory_type_from_name(mem_name);
279
280 if (ret >= 0) {
281 reserve_table[ret].size += size;
282 ret = 0;
283 }
284 return ret;
285}
286
Neeti Desai1b2cb552012-11-01 21:57:36 -0700287static int __init check_for_compat(unsigned long node)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700288{
289 char **start = __compat_exports_start;
290
291 for ( ; start < __compat_exports_end; start++)
292 if (of_flat_dt_is_compatible(node, *start))
293 return 1;
294
295 return 0;
296}
297
298int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
299 int depth, void *data)
300{
301 char *memory_name_prop;
302 unsigned int *memory_remove_prop;
303 unsigned long memory_name_prop_length;
304 unsigned long memory_remove_prop_length;
305 unsigned long memory_size_prop_length;
306 unsigned int *memory_size_prop;
Laura Abbott1641b982013-05-20 15:05:14 -0700307 unsigned int *memory_reserve_prop;
308 unsigned long memory_reserve_prop_length;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700309 unsigned int memory_size;
310 unsigned int memory_start;
Neeti Desaia06e3502013-07-15 16:47:35 -0700311 unsigned int num_holes = 0;
312 int i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700313 int ret;
314
315 memory_name_prop = of_get_flat_dt_prop(node,
316 "qcom,memory-reservation-type",
317 &memory_name_prop_length);
318 memory_remove_prop = of_get_flat_dt_prop(node,
319 "qcom,memblock-remove",
320 &memory_remove_prop_length);
321
Laura Abbott1641b982013-05-20 15:05:14 -0700322 memory_reserve_prop = of_get_flat_dt_prop(node,
323 "qcom,memblock-reserve",
324 &memory_reserve_prop_length);
325
326 if (memory_name_prop || memory_remove_prop || memory_reserve_prop) {
Laura Abbottd8d0f772012-07-10 10:27:06 -0700327 if (!check_for_compat(node))
328 goto out;
329 } else {
330 goto out;
331 }
332
333 if (memory_name_prop) {
334 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
335 WARN(1, "Memory name was malformed\n");
336 goto mem_remove;
337 }
338
339 memory_size_prop = of_get_flat_dt_prop(node,
340 "qcom,memory-reservation-size",
341 &memory_size_prop_length);
342
343 if (memory_size_prop &&
344 (memory_size_prop_length == sizeof(unsigned int))) {
345 memory_size = be32_to_cpu(*memory_size_prop);
346
347 if (reserve_memory_type(memory_name_prop,
348 data, memory_size) == 0)
349 pr_info("%s reserved %s size %x\n",
350 uname, memory_name_prop, memory_size);
351 else
352 WARN(1, "Node %s reserve failed\n",
353 uname);
354 } else {
355 WARN(1, "Node %s specified bad/nonexistent size\n",
356 uname);
357 }
358 }
359
360mem_remove:
361
362 if (memory_remove_prop) {
Neeti Desaia06e3502013-07-15 16:47:35 -0700363 if (!memory_remove_prop_length || (memory_remove_prop_length %
364 (2 * sizeof(unsigned int)) != 0)) {
Laura Abbottd8d0f772012-07-10 10:27:06 -0700365 WARN(1, "Memory remove malformed\n");
Laura Abbott1641b982013-05-20 15:05:14 -0700366 goto mem_reserve;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700367 }
368
Neeti Desaia06e3502013-07-15 16:47:35 -0700369 num_holes = memory_remove_prop_length /
370 (2 * sizeof(unsigned int));
Laura Abbottd8d0f772012-07-10 10:27:06 -0700371
Neeti Desaia06e3502013-07-15 16:47:35 -0700372 for (i = 0; i < (num_holes * 2); i += 2) {
373 memory_start = be32_to_cpu(memory_remove_prop[i]);
374 memory_size = be32_to_cpu(memory_remove_prop[i+1]);
375
376 ret = memblock_remove(memory_start, memory_size);
377 if (ret)
378 WARN(1, "Failed to remove memory %x-%x\n",
Laura Abbottd8d0f772012-07-10 10:27:06 -0700379 memory_start, memory_start+memory_size);
Neeti Desaia06e3502013-07-15 16:47:35 -0700380 else
381 pr_info("Node %s removed memory %x-%x\n", uname,
Laura Abbottd8d0f772012-07-10 10:27:06 -0700382 memory_start, memory_start+memory_size);
Neeti Desaia06e3502013-07-15 16:47:35 -0700383 }
Laura Abbottd8d0f772012-07-10 10:27:06 -0700384 }
385
Laura Abbott1641b982013-05-20 15:05:14 -0700386mem_reserve:
387
388 if (memory_reserve_prop) {
389 if (memory_reserve_prop_length != (2*sizeof(unsigned int))) {
390 WARN(1, "Memory reserve malformed\n");
391 goto out;
392 }
393
394 memory_start = be32_to_cpu(memory_reserve_prop[0]);
395 memory_size = be32_to_cpu(memory_reserve_prop[1]);
396
397 ret = memblock_reserve(memory_start, memory_size);
398 if (ret)
399 WARN(1, "Failed to reserve memory %x-%x\n",
400 memory_start, memory_start+memory_size);
401 else
402 pr_info("Node %s memblock_reserve memory %x-%x\n",
403 uname, memory_start, memory_start+memory_size);
404 }
405
Laura Abbottd8d0f772012-07-10 10:27:06 -0700406out:
407 return 0;
408}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530409
Neeti Desai53464312013-05-09 16:11:45 -0700410/* Function to remove any meminfo blocks which are of size zero */
411static void merge_meminfo(void)
412{
413 int i = 0;
414
415 while (i < meminfo.nr_banks) {
416 struct membank *bank = &meminfo.bank[i];
417
418 if (bank->size == 0) {
419 memmove(bank, bank + 1,
420 (meminfo.nr_banks - i) * sizeof(*bank));
421 meminfo.nr_banks--;
422 continue;
423 }
424 i++;
425 }
426}
427
428/*
429 * Function to scan the device tree and adjust the meminfo table to
430 * reflect the memory holes.
431 */
Neeti Desai1b2cb552012-11-01 21:57:36 -0700432int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
433 int depth, void *data)
434{
435 unsigned int *memory_remove_prop;
436 unsigned long memory_remove_prop_length;
437 unsigned long hole_start;
438 unsigned long hole_size;
Neeti Desaia06e3502013-07-15 16:47:35 -0700439 unsigned int num_holes = 0;
440 int i = 0;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700441
442 memory_remove_prop = of_get_flat_dt_prop(node,
443 "qcom,memblock-remove",
444 &memory_remove_prop_length);
445
446 if (memory_remove_prop) {
447 if (!check_for_compat(node))
448 goto out;
449 } else {
450 goto out;
451 }
452
453 if (memory_remove_prop) {
Neeti Desaia06e3502013-07-15 16:47:35 -0700454 if (!memory_remove_prop_length || (memory_remove_prop_length %
455 (2 * sizeof(unsigned int)) != 0)) {
Neeti Desai1b2cb552012-11-01 21:57:36 -0700456 WARN(1, "Memory remove malformed\n");
457 goto out;
458 }
459
Neeti Desaia06e3502013-07-15 16:47:35 -0700460 num_holes = memory_remove_prop_length /
461 (2 * sizeof(unsigned int));
Neeti Desai1b2cb552012-11-01 21:57:36 -0700462
Neeti Desaia06e3502013-07-15 16:47:35 -0700463 for (i = 0; i < (num_holes * 2); i += 2) {
464 hole_start = be32_to_cpu(memory_remove_prop[i]);
465 hole_size = be32_to_cpu(memory_remove_prop[i+1]);
466
467 adjust_meminfo(hole_start, hole_size);
468 }
Neeti Desai1b2cb552012-11-01 21:57:36 -0700469 }
470
471out:
472 return 0;
473}
474
475/*
476 * Split the memory bank to reflect the hole, if present,
477 * using the start and end of the memory hole.
478 */
479void adjust_meminfo(unsigned long start, unsigned long size)
480{
Larry Bassel38e22da2013-02-25 10:54:16 -0800481 int i;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700482
Larry Bassel38e22da2013-02-25 10:54:16 -0800483 for (i = 0; i < meminfo.nr_banks; i++) {
484 struct membank *bank = &meminfo.bank[i];
Neeti Desai1b2cb552012-11-01 21:57:36 -0700485
486 if (((start + size) <= (bank->start + bank->size)) &&
487 (start >= bank->start)) {
488 memmove(bank + 1, bank,
489 (meminfo.nr_banks - i) * sizeof(*bank));
490 meminfo.nr_banks++;
491 i++;
Larry Bassel38e22da2013-02-25 10:54:16 -0800492
Neeti Desai1b2cb552012-11-01 21:57:36 -0700493 bank->size = start - bank->start;
Larry Bassel38e22da2013-02-25 10:54:16 -0800494 bank[1].start = (start + size);
495 bank[1].size -= (bank->size + size);
496 bank[1].highmem = 0;
Neeti Desai53464312013-05-09 16:11:45 -0700497 merge_meminfo();
Neeti Desai1b2cb552012-11-01 21:57:36 -0700498 }
Neeti Desai1b2cb552012-11-01 21:57:36 -0700499 }
500}
Larry Bassel38e22da2013-02-25 10:54:16 -0800501
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530502unsigned long get_ddr_size(void)
503{
504 unsigned int i;
505 unsigned long ret = 0;
506
507 for (i = 0; i < meminfo.nr_banks; i++)
508 ret += meminfo.bank[i].size;
509
510 return ret;
511}
Mitchel Humpherys6ae3ae42012-10-30 15:12:52 -0700512
513/* Provide a string that anonymous device tree allocations (those not
514 * directly associated with any driver) can use for their "compatible"
515 * field */
516EXPORT_COMPAT("qcom,msm-contig-mem");