blob: 90cb49e1f7172ec4c898afd8495cdf016f6b451c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Duy Truong790f06d2013-02-13 16:38:12 -08004 * Copyright (c) 2009-2012, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
Neeti Desai1b2cb552012-11-01 21:57:36 -070030#include <mach/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <linux/hardirq.h>
32#if defined(CONFIG_MSM_NPA_REMOTE)
33#include "npa_remote.h"
34#include <linux/completion.h>
35#include <linux/err.h>
36#endif
37#include <linux/android_pmem.h>
38#include <mach/msm_iomap.h>
39#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070040#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070041#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070042
43/* fixme */
44#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080046#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Trilok Soni80c19362012-10-15 00:55:00 +053048#if defined(CONFIG_ARCH_MSM7X27)
49static void *strongly_ordered_page;
50static char strongly_ordered_mem[PAGE_SIZE*2-4];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Trilok Soni80c19362012-10-15 00:55:00 +053052void __init map_page_strongly_ordered(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 long unsigned int phys;
Trilok Soni80c19362012-10-15 00:55:00 +053055 struct map_desc map[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57 if (strongly_ordered_page)
58 return;
59
60 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
61 phys = __pa(strongly_ordered_page);
62
Trilok Soni80c19362012-10-15 00:55:00 +053063 map[0].pfn = __phys_to_pfn(phys);
64 map[0].virtual = MSM_STRONGLY_ORDERED_PAGE;
65 map[0].length = PAGE_SIZE;
66 map[0].type = MT_MEMORY_SO;
67 iotable_init(map, ARRAY_SIZE(map));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070}
Trilok Soni80c19362012-10-15 00:55:00 +053071#else
72void map_page_strongly_ordered(void) { }
73#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Trilok Soni80c19362012-10-15 00:55:00 +053075#if defined(CONFIG_ARCH_MSM7X27)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076void write_to_strongly_ordered_memory(void)
77{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079}
Trilok Soni80c19362012-10-15 00:55:00 +053080#else
81void write_to_strongly_ordered_memory(void) { }
82#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083EXPORT_SYMBOL(write_to_strongly_ordered_memory);
84
Olav Haugan29bb4d52012-05-30 12:57:53 -070085/* These cache related routines make the assumption (if outer cache is
86 * available) that the associated physical memory is contiguous.
87 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 */
89void clean_and_invalidate_caches(unsigned long vstart,
90 unsigned long length, unsigned long pstart)
91{
Olav Haugan29bb4d52012-05-30 12:57:53 -070092 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094}
95
96void clean_caches(unsigned long vstart,
97 unsigned long length, unsigned long pstart)
98{
Olav Haugan29bb4d52012-05-30 12:57:53 -070099 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101}
102
103void invalidate_caches(unsigned long vstart,
104 unsigned long length, unsigned long pstart)
105{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700106 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108}
109
Stephen Boyd50ca18e2012-02-21 01:26:00 -0800110void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700111{
112 void *unused_addr = NULL;
113 unsigned long addr, tmp_size, unused_size;
114
115 /* Allocate maximum size needed, see where it ends up.
116 * Then free it -- in this path there are no other allocators
117 * so we can depend on getting the same address back
118 * when we allocate a smaller piece that is aligned
119 * at the end (if necessary) and the piece we really want,
120 * then free the unused first piece.
121 */
122
123 tmp_size = size + alignment - PAGE_SIZE;
124 addr = (unsigned long)alloc_bootmem(tmp_size);
125 free_bootmem(__pa(addr), tmp_size);
126
127 unused_size = alignment - (addr % alignment);
128 if (unused_size)
129 unused_addr = alloc_bootmem(unused_size);
130
131 addr = (unsigned long)alloc_bootmem(size);
132 if (unused_size)
133 free_bootmem(__pa(unused_addr), unused_size);
134
135 return (void *)addr;
136}
137
Larry Bassela4414b12011-08-04 11:11:02 -0700138int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700139
Larry Bassela4414b12011-08-04 11:11:02 -0700140int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141{
Larry Bassela7eadea2011-07-14 10:46:00 -0700142 if (!change_memory_power)
143 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700144 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700145}
146
Larry Bassela4414b12011-08-04 11:11:02 -0700147int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700148{
Larry Bassela7eadea2011-07-14 10:46:00 -0700149 if (!change_memory_power)
150 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700151 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700152}
153
Larry Bassela4414b12011-08-04 11:11:02 -0700154int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700155{
Larry Bassela7eadea2011-07-14 10:46:00 -0700156 if (!change_memory_power)
157 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700158 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700159}
160
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161char *memtype_name[] = {
162 "SMI_KERNEL",
163 "SMI",
164 "EBI0",
165 "EBI1"
166};
167
168struct reserve_info *reserve_info;
169
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700170/**
171 * calculate_reserve_limits() - calculate reserve limits for all
172 * memtypes
173 *
174 * for each memtype in the reserve_info->memtype_reserve_table, sets
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700175 * the `limit' field to the largest size of any memblock of that
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700176 * memtype.
177 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700178static void __init calculate_reserve_limits(void)
179{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700180 struct memblock_region *mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700181 int memtype;
182 struct memtype_reserve *mt;
183
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700184 for_each_memblock(memory, mr) {
185 memtype = reserve_info->paddr_to_memtype(mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700186 if (memtype == MEMTYPE_NONE) {
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700187 pr_warning("unknown memory type for region at %lx\n",
188 (long unsigned int)mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 continue;
190 }
191 mt = &reserve_info->memtype_reserve_table[memtype];
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700192 mt->limit = max_t(unsigned long, mt->limit, mr->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700193 }
194}
195
196static void __init adjust_reserve_sizes(void)
197{
198 int i;
199 struct memtype_reserve *mt;
200
201 mt = &reserve_info->memtype_reserve_table[0];
202 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
203 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
204 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
205 if (mt->size > mt->limit) {
206 pr_warning("%lx size for %s too large, setting to %lx\n",
207 mt->size, memtype_name[i], mt->limit);
208 mt->size = mt->limit;
209 }
210 }
211}
212
213static void __init reserve_memory_for_mempools(void)
214{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700215 int memtype, memreg_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700216 struct memtype_reserve *mt;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700217 struct memblock_region *mr, *mr_candidate = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218 int ret;
219
220 mt = &reserve_info->memtype_reserve_table[0];
221 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
222 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
223 continue;
224
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700225 /* Choose the memory block with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 * address which is large enough, so that we will not
227 * take memory from the lowest memory bank which the kernel
228 * is in (and cause boot problems) and so that we might
229 * be able to steal memory that would otherwise become
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700230 * highmem.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700231 */
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700232 for_each_memblock(memory, mr) {
233 memreg_type =
234 reserve_info->paddr_to_memtype(mr->base);
235 if (memtype != memreg_type)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700236 continue;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700237 if (mr->size >= mt->size
238 && (mr_candidate == NULL
239 || mr->base > mr_candidate->base))
240 mr_candidate = mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700241 }
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700242 BUG_ON(mr_candidate == NULL);
243 /* bump mt up against the top of the region */
244 mt->start = mr_candidate->base + mr_candidate->size - mt->size;
245 ret = memblock_remove(mt->start, mt->size);
246 BUG_ON(ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700247 }
248}
249
250static void __init initialize_mempools(void)
251{
252 struct mem_pool *mpool;
253 int memtype;
254 struct memtype_reserve *mt;
255
256 mt = &reserve_info->memtype_reserve_table[0];
257 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
258 if (!mt->size)
259 continue;
260 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
261 if (!mpool)
262 pr_warning("failed to create %s mempool\n",
263 memtype_name[memtype]);
264 }
265}
266
Larry Bassel4d4f4482012-04-04 11:26:09 -0700267#define MAX_FIXED_AREA_SIZE 0x11000000
268
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700269void __init msm_reserve(void)
270{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700271 unsigned long msm_fixed_area_size;
272 unsigned long msm_fixed_area_start;
273
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700274 memory_pool_init();
Utsab Bose4bb94652012-09-28 15:07:35 +0530275 if (reserve_info->calculate_reserve_sizes)
276 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700277
278 msm_fixed_area_size = reserve_info->fixed_area_size;
279 msm_fixed_area_start = reserve_info->fixed_area_start;
280 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700281 if (msm_fixed_area_start > reserve_info->low_unstable_address
282 - MAX_FIXED_AREA_SIZE)
283 reserve_info->low_unstable_address =
284 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700285
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700286 calculate_reserve_limits();
287 adjust_reserve_sizes();
288 reserve_memory_for_mempools();
289 initialize_mempools();
290}
291
292static int get_ebi_memtype(void)
293{
294 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
295 if (cpu_is_msm7x30() || cpu_is_msm8x55())
296 return MEMTYPE_EBI0;
297 return MEMTYPE_EBI1;
298}
299
300void *allocate_contiguous_ebi(unsigned long size,
301 unsigned long align, int cached)
302{
303 return allocate_contiguous_memory(size, get_ebi_memtype(),
304 align, cached);
305}
306EXPORT_SYMBOL(allocate_contiguous_ebi);
307
308unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
309 unsigned long align)
310{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600311 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
312 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700313}
314EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
315
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700316unsigned int msm_ttbr0;
317
318void store_ttbr0(void)
319{
320 /* Store TTBR0 for post-mortem debugging purposes. */
321 asm("mrc p15, 0, %0, c2, c0, 0\n"
322 : "=r" (msm_ttbr0));
323}
Laura Abbottf637aff2011-12-14 14:16:17 -0800324
325int request_fmem_c_region(void *unused)
326{
327 return fmem_set_state(FMEM_C_STATE);
328}
329
330int release_fmem_c_region(void *unused)
331{
332 return fmem_set_state(FMEM_T_STATE);
333}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700334
335static char * const memtype_names[] = {
336 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
337 [MEMTYPE_SMI] = "SMI",
338 [MEMTYPE_EBI0] = "EBI0",
339 [MEMTYPE_EBI1] = "EBI1",
340};
341
Olav Haugan92862912012-08-01 11:32:48 -0700342int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700343{
344 int i;
345
346 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700347 if (memtype_names[i] &&
348 strcmp(memtype_name, memtype_names[i]) == 0)
349 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700350 }
351
Olav Haugan92862912012-08-01 11:32:48 -0700352 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700353 return -EINVAL;
354}
355
Olav Haugan92862912012-08-01 11:32:48 -0700356static int reserve_memory_type(const char *mem_name,
357 struct memtype_reserve *reserve_table,
358 int size)
359{
360 int ret = msm_get_memory_type_from_name(mem_name);
361
362 if (ret >= 0) {
363 reserve_table[ret].size += size;
364 ret = 0;
365 }
366 return ret;
367}
368
Neeti Desai1b2cb552012-11-01 21:57:36 -0700369static int __init check_for_compat(unsigned long node)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700370{
371 char **start = __compat_exports_start;
372
373 for ( ; start < __compat_exports_end; start++)
374 if (of_flat_dt_is_compatible(node, *start))
375 return 1;
376
377 return 0;
378}
379
380int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
381 int depth, void *data)
382{
383 char *memory_name_prop;
384 unsigned int *memory_remove_prop;
385 unsigned long memory_name_prop_length;
386 unsigned long memory_remove_prop_length;
387 unsigned long memory_size_prop_length;
388 unsigned int *memory_size_prop;
389 unsigned int memory_size;
390 unsigned int memory_start;
391 int ret;
392
393 memory_name_prop = of_get_flat_dt_prop(node,
394 "qcom,memory-reservation-type",
395 &memory_name_prop_length);
396 memory_remove_prop = of_get_flat_dt_prop(node,
397 "qcom,memblock-remove",
398 &memory_remove_prop_length);
399
400 if (memory_name_prop || memory_remove_prop) {
401 if (!check_for_compat(node))
402 goto out;
403 } else {
404 goto out;
405 }
406
407 if (memory_name_prop) {
408 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
409 WARN(1, "Memory name was malformed\n");
410 goto mem_remove;
411 }
412
413 memory_size_prop = of_get_flat_dt_prop(node,
414 "qcom,memory-reservation-size",
415 &memory_size_prop_length);
416
417 if (memory_size_prop &&
418 (memory_size_prop_length == sizeof(unsigned int))) {
419 memory_size = be32_to_cpu(*memory_size_prop);
420
421 if (reserve_memory_type(memory_name_prop,
422 data, memory_size) == 0)
423 pr_info("%s reserved %s size %x\n",
424 uname, memory_name_prop, memory_size);
425 else
426 WARN(1, "Node %s reserve failed\n",
427 uname);
428 } else {
429 WARN(1, "Node %s specified bad/nonexistent size\n",
430 uname);
431 }
432 }
433
434mem_remove:
435
436 if (memory_remove_prop) {
437 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
438 WARN(1, "Memory remove malformed\n");
439 goto out;
440 }
441
442 memory_start = be32_to_cpu(memory_remove_prop[0]);
443 memory_size = be32_to_cpu(memory_remove_prop[1]);
444
445 ret = memblock_remove(memory_start, memory_size);
446 if (ret)
447 WARN(1, "Failed to remove memory %x-%x\n",
448 memory_start, memory_start+memory_size);
449 else
450 pr_info("Node %s removed memory %x-%x\n", uname,
451 memory_start, memory_start+memory_size);
452 }
453
454out:
455 return 0;
456}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530457
Neeti Desai1b2cb552012-11-01 21:57:36 -0700458/* This function scans the device tree to populate the memory hole table */
459int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
460 int depth, void *data)
461{
462 unsigned int *memory_remove_prop;
463 unsigned long memory_remove_prop_length;
464 unsigned long hole_start;
465 unsigned long hole_size;
466
467 memory_remove_prop = of_get_flat_dt_prop(node,
468 "qcom,memblock-remove",
469 &memory_remove_prop_length);
470
471 if (memory_remove_prop) {
472 if (!check_for_compat(node))
473 goto out;
474 } else {
475 goto out;
476 }
477
478 if (memory_remove_prop) {
479 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
480 WARN(1, "Memory remove malformed\n");
481 goto out;
482 }
483
484 hole_start = be32_to_cpu(memory_remove_prop[0]);
485 hole_size = be32_to_cpu(memory_remove_prop[1]);
486
487 if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
488 if (memory_hole_start == 0 && memory_hole_end == 0) {
489 memory_hole_start = hole_start;
490 memory_hole_end = hole_start + hole_size;
491 } else if ((memory_hole_end - memory_hole_start)
492 <= hole_size) {
493 memory_hole_start = hole_start;
494 memory_hole_end = hole_start + hole_size;
495 }
496 }
497 adjust_meminfo(hole_start, hole_size);
498 }
499
500out:
501 return 0;
502}
503
504/*
505 * Split the memory bank to reflect the hole, if present,
506 * using the start and end of the memory hole.
507 */
508void adjust_meminfo(unsigned long start, unsigned long size)
509{
510 int i, j;
511
512 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
513 struct membank *bank = &meminfo.bank[j];
514 *bank = meminfo.bank[i];
515
516 if (((start + size) <= (bank->start + bank->size)) &&
517 (start >= bank->start)) {
518 memmove(bank + 1, bank,
519 (meminfo.nr_banks - i) * sizeof(*bank));
520 meminfo.nr_banks++;
521 i++;
522 bank[1].size -= (start + size);
523 bank[1].start = (start + size);
524 bank[1].highmem = 0;
525 j++;
526 bank->size = start - bank->start;
527 }
528 j++;
529 }
530}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530531unsigned long get_ddr_size(void)
532{
533 unsigned int i;
534 unsigned long ret = 0;
535
536 for (i = 0; i < meminfo.nr_banks; i++)
537 ret += meminfo.bank[i].size;
538
539 return ret;
540}
Mitchel Humpherys6ae3ae42012-10-30 15:12:52 -0700541
542/* Provide a string that anonymous device tree allocations (those not
543 * directly associated with any driver) can use for their "compatible"
544 * field */
545EXPORT_COMPAT("qcom,msm-contig-mem");