blob: 74c1c4a1f71c041d5f068d8525d6b49083bbc147 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel48e4f5f2012-02-14 13:54:12 -08004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070040#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041
42/* fixme */
43#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080045#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047void *strongly_ordered_page;
48char strongly_ordered_mem[PAGE_SIZE*2-4];
49
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050void map_page_strongly_ordered(void)
51{
52#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
53 long unsigned int phys;
54 struct map_desc map;
55
56 if (strongly_ordered_page)
57 return;
58
59 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
60 phys = __pa(strongly_ordered_page);
61
62 map.pfn = __phys_to_pfn(phys);
63 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
64 map.length = PAGE_SIZE;
65 map.type = MT_DEVICE_STRONGLY_ORDERED;
66 create_mapping(&map);
67
68 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
69#endif
70}
71EXPORT_SYMBOL(map_page_strongly_ordered);
72
73void write_to_strongly_ordered_memory(void)
74{
75#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
76 if (!strongly_ordered_page) {
77 if (!in_interrupt())
78 map_page_strongly_ordered();
79 else {
80 printk(KERN_ALERT "Cannot map strongly ordered page in "
81 "Interrupt Context\n");
82 /* capture it here before the allocation fails later */
83 BUG();
84 }
85 }
86 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
87#endif
88}
89EXPORT_SYMBOL(write_to_strongly_ordered_memory);
90
Olav Haugan29bb4d52012-05-30 12:57:53 -070091/* These cache related routines make the assumption (if outer cache is
92 * available) that the associated physical memory is contiguous.
93 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094 */
95void clean_and_invalidate_caches(unsigned long vstart,
96 unsigned long length, unsigned long pstart)
97{
Olav Haugan29bb4d52012-05-30 12:57:53 -070098 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100}
101
102void clean_caches(unsigned long vstart,
103 unsigned long length, unsigned long pstart)
104{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700105 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107}
108
109void invalidate_caches(unsigned long vstart,
110 unsigned long length, unsigned long pstart)
111{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700112 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700113 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700114}
115
Stephen Boyd50ca18e2012-02-21 01:26:00 -0800116void * __init alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700117{
118 void *unused_addr = NULL;
119 unsigned long addr, tmp_size, unused_size;
120
121 /* Allocate maximum size needed, see where it ends up.
122 * Then free it -- in this path there are no other allocators
123 * so we can depend on getting the same address back
124 * when we allocate a smaller piece that is aligned
125 * at the end (if necessary) and the piece we really want,
126 * then free the unused first piece.
127 */
128
129 tmp_size = size + alignment - PAGE_SIZE;
130 addr = (unsigned long)alloc_bootmem(tmp_size);
131 free_bootmem(__pa(addr), tmp_size);
132
133 unused_size = alignment - (addr % alignment);
134 if (unused_size)
135 unused_addr = alloc_bootmem(unused_size);
136
137 addr = (unsigned long)alloc_bootmem(size);
138 if (unused_size)
139 free_bootmem(__pa(unused_addr), unused_size);
140
141 return (void *)addr;
142}
143
Larry Bassela4414b12011-08-04 11:11:02 -0700144int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700145
Larry Bassela4414b12011-08-04 11:11:02 -0700146int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700147{
Larry Bassela7eadea2011-07-14 10:46:00 -0700148 if (!change_memory_power)
149 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700150 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700151}
152
Larry Bassela4414b12011-08-04 11:11:02 -0700153int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700154{
Larry Bassela7eadea2011-07-14 10:46:00 -0700155 if (!change_memory_power)
156 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700157 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700158}
159
Larry Bassela4414b12011-08-04 11:11:02 -0700160int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700161{
Larry Bassela7eadea2011-07-14 10:46:00 -0700162 if (!change_memory_power)
163 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700164 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165}
166
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167char *memtype_name[] = {
168 "SMI_KERNEL",
169 "SMI",
170 "EBI0",
171 "EBI1"
172};
173
174struct reserve_info *reserve_info;
175
Larry Bassel7fb0b252011-07-22 14:18:50 -0700176static unsigned long stable_size(struct membank *mb,
177 unsigned long unstable_limit)
178{
Olav Haugan5b633022011-10-28 13:26:31 -0700179 unsigned long upper_limit = mb->start + mb->size;
180
181 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700182 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700183
184 /* Check for 32 bit roll-over */
185 if (upper_limit >= mb->start) {
186 /* If we didn't roll over we can safely make the check below */
187 if (upper_limit <= unstable_limit)
188 return mb->size;
189 }
190
Larry Bassel7fb0b252011-07-22 14:18:50 -0700191 if (mb->start >= unstable_limit)
192 return 0;
193 return unstable_limit - mb->start;
194}
195
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800196/* stable size of all memory banks contiguous to and below this one */
197static unsigned long total_stable_size(unsigned long bank)
198{
199 int i;
200 struct membank *mb = &meminfo.bank[bank];
201 int memtype = reserve_info->paddr_to_memtype(mb->start);
202 unsigned long size;
203
204 size = stable_size(mb, reserve_info->low_unstable_address);
205 for (i = bank - 1, mb = &meminfo.bank[bank - 1]; i >= 0; i--, mb--) {
206 if (mb->start + mb->size != (mb + 1)->start)
207 break;
208 if (reserve_info->paddr_to_memtype(mb->start) != memtype)
209 break;
210 size += stable_size(mb, reserve_info->low_unstable_address);
211 }
212 return size;
213}
214
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700215static void __init calculate_reserve_limits(void)
216{
217 int i;
218 struct membank *mb;
219 int memtype;
220 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700221 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222
223 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
224 memtype = reserve_info->paddr_to_memtype(mb->start);
225 if (memtype == MEMTYPE_NONE) {
226 pr_warning("unknown memory type for bank at %lx\n",
227 (long unsigned int)mb->start);
228 continue;
229 }
230 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800231 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700232 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700233 }
234}
235
236static void __init adjust_reserve_sizes(void)
237{
238 int i;
239 struct memtype_reserve *mt;
240
241 mt = &reserve_info->memtype_reserve_table[0];
242 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
243 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
244 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
245 if (mt->size > mt->limit) {
246 pr_warning("%lx size for %s too large, setting to %lx\n",
247 mt->size, memtype_name[i], mt->limit);
248 mt->size = mt->limit;
249 }
250 }
251}
252
253static void __init reserve_memory_for_mempools(void)
254{
255 int i, memtype, membank_type;
256 struct memtype_reserve *mt;
257 struct membank *mb;
258 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700259 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700260
261 mt = &reserve_info->memtype_reserve_table[0];
262 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
263 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
264 continue;
265
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800266 /* We know we will find memory bank(s) of the proper size
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700267 * as we have limited the size of the memory pool for
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800268 * each memory type to the largest total size of the memory
269 * banks which are contiguous and of the correct memory type.
270 * Choose the memory bank with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700271 * address which is large enough, so that we will not
272 * take memory from the lowest memory bank which the kernel
273 * is in (and cause boot problems) and so that we might
274 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700275 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700276 */
277 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
278 mb = &meminfo.bank[i];
279 membank_type =
280 reserve_info->paddr_to_memtype(mb->start);
281 if (memtype != membank_type)
282 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800283 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700284 if (size >= mt->size) {
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800285 size = stable_size(mb,
286 reserve_info->low_unstable_address);
Larry Bassel4d4f4482012-04-04 11:26:09 -0700287 if (!size)
288 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800289 /* mt->size may be larger than size, all this
290 * means is that we are carving the memory pool
291 * out of multiple contiguous memory banks.
292 */
Olav Haugan5b633022011-10-28 13:26:31 -0700293 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700294 ret = memblock_remove(mt->start, mt->size);
295 BUG_ON(ret);
296 break;
297 }
298 }
299 }
300}
301
302static void __init initialize_mempools(void)
303{
304 struct mem_pool *mpool;
305 int memtype;
306 struct memtype_reserve *mt;
307
308 mt = &reserve_info->memtype_reserve_table[0];
309 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
310 if (!mt->size)
311 continue;
312 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
313 if (!mpool)
314 pr_warning("failed to create %s mempool\n",
315 memtype_name[memtype]);
316 }
317}
318
Larry Bassel4d4f4482012-04-04 11:26:09 -0700319#define MAX_FIXED_AREA_SIZE 0x11000000
320
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700321void __init msm_reserve(void)
322{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700323 unsigned long msm_fixed_area_size;
324 unsigned long msm_fixed_area_start;
325
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700326 memory_pool_init();
327 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700328
329 msm_fixed_area_size = reserve_info->fixed_area_size;
330 msm_fixed_area_start = reserve_info->fixed_area_start;
331 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700332 if (msm_fixed_area_start > reserve_info->low_unstable_address
333 - MAX_FIXED_AREA_SIZE)
334 reserve_info->low_unstable_address =
335 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700336
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700337 calculate_reserve_limits();
338 adjust_reserve_sizes();
339 reserve_memory_for_mempools();
340 initialize_mempools();
341}
342
343static int get_ebi_memtype(void)
344{
345 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
346 if (cpu_is_msm7x30() || cpu_is_msm8x55())
347 return MEMTYPE_EBI0;
348 return MEMTYPE_EBI1;
349}
350
351void *allocate_contiguous_ebi(unsigned long size,
352 unsigned long align, int cached)
353{
354 return allocate_contiguous_memory(size, get_ebi_memtype(),
355 align, cached);
356}
357EXPORT_SYMBOL(allocate_contiguous_ebi);
358
359unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
360 unsigned long align)
361{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600362 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
363 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700364}
365EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
366
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700367unsigned int msm_ttbr0;
368
369void store_ttbr0(void)
370{
371 /* Store TTBR0 for post-mortem debugging purposes. */
372 asm("mrc p15, 0, %0, c2, c0, 0\n"
373 : "=r" (msm_ttbr0));
374}
Laura Abbottf637aff2011-12-14 14:16:17 -0800375
376int request_fmem_c_region(void *unused)
377{
378 return fmem_set_state(FMEM_C_STATE);
379}
380
381int release_fmem_c_region(void *unused)
382{
383 return fmem_set_state(FMEM_T_STATE);
384}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700385
386static char * const memtype_names[] = {
387 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
388 [MEMTYPE_SMI] = "SMI",
389 [MEMTYPE_EBI0] = "EBI0",
390 [MEMTYPE_EBI1] = "EBI1",
391};
392
Olav Haugan92862912012-08-01 11:32:48 -0700393int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700394{
395 int i;
396
397 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700398 if (memtype_names[i] &&
399 strcmp(memtype_name, memtype_names[i]) == 0)
400 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700401 }
402
Olav Haugan92862912012-08-01 11:32:48 -0700403 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700404 return -EINVAL;
405}
406
Olav Haugan92862912012-08-01 11:32:48 -0700407static int reserve_memory_type(const char *mem_name,
408 struct memtype_reserve *reserve_table,
409 int size)
410{
411 int ret = msm_get_memory_type_from_name(mem_name);
412
413 if (ret >= 0) {
414 reserve_table[ret].size += size;
415 ret = 0;
416 }
417 return ret;
418}
419
Laura Abbottd8d0f772012-07-10 10:27:06 -0700420static int check_for_compat(unsigned long node)
421{
422 char **start = __compat_exports_start;
423
424 for ( ; start < __compat_exports_end; start++)
425 if (of_flat_dt_is_compatible(node, *start))
426 return 1;
427
428 return 0;
429}
430
431int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
432 int depth, void *data)
433{
434 char *memory_name_prop;
435 unsigned int *memory_remove_prop;
436 unsigned long memory_name_prop_length;
437 unsigned long memory_remove_prop_length;
438 unsigned long memory_size_prop_length;
439 unsigned int *memory_size_prop;
440 unsigned int memory_size;
441 unsigned int memory_start;
442 int ret;
443
444 memory_name_prop = of_get_flat_dt_prop(node,
445 "qcom,memory-reservation-type",
446 &memory_name_prop_length);
447 memory_remove_prop = of_get_flat_dt_prop(node,
448 "qcom,memblock-remove",
449 &memory_remove_prop_length);
450
451 if (memory_name_prop || memory_remove_prop) {
452 if (!check_for_compat(node))
453 goto out;
454 } else {
455 goto out;
456 }
457
458 if (memory_name_prop) {
459 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
460 WARN(1, "Memory name was malformed\n");
461 goto mem_remove;
462 }
463
464 memory_size_prop = of_get_flat_dt_prop(node,
465 "qcom,memory-reservation-size",
466 &memory_size_prop_length);
467
468 if (memory_size_prop &&
469 (memory_size_prop_length == sizeof(unsigned int))) {
470 memory_size = be32_to_cpu(*memory_size_prop);
471
472 if (reserve_memory_type(memory_name_prop,
473 data, memory_size) == 0)
474 pr_info("%s reserved %s size %x\n",
475 uname, memory_name_prop, memory_size);
476 else
477 WARN(1, "Node %s reserve failed\n",
478 uname);
479 } else {
480 WARN(1, "Node %s specified bad/nonexistent size\n",
481 uname);
482 }
483 }
484
485mem_remove:
486
487 if (memory_remove_prop) {
488 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
489 WARN(1, "Memory remove malformed\n");
490 goto out;
491 }
492
493 memory_start = be32_to_cpu(memory_remove_prop[0]);
494 memory_size = be32_to_cpu(memory_remove_prop[1]);
495
496 ret = memblock_remove(memory_start, memory_size);
497 if (ret)
498 WARN(1, "Failed to remove memory %x-%x\n",
499 memory_start, memory_start+memory_size);
500 else
501 pr_info("Node %s removed memory %x-%x\n", uname,
502 memory_start, memory_start+memory_size);
503 }
504
505out:
506 return 0;
507}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530508
509unsigned long get_ddr_size(void)
510{
511 unsigned int i;
512 unsigned long ret = 0;
513
514 for (i = 0; i < meminfo.nr_banks; i++)
515 ret += meminfo.bank[i].size;
516
517 return ret;
518}