blob: 786dad8dbbf80cd0a7844dacee58e28fe7058c6c [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel38e22da2013-02-25 10:54:16 -08004 * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
Neeti Desai1b2cb552012-11-01 21:57:36 -070030#include <mach/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <linux/hardirq.h>
32#if defined(CONFIG_MSM_NPA_REMOTE)
33#include "npa_remote.h"
34#include <linux/completion.h>
35#include <linux/err.h>
36#endif
37#include <linux/android_pmem.h>
38#include <mach/msm_iomap.h>
39#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070040#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070041#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070042
43/* fixme */
44#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080046#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Trilok Soni80c19362012-10-15 00:55:00 +053048#if defined(CONFIG_ARCH_MSM7X27)
49static void *strongly_ordered_page;
50static char strongly_ordered_mem[PAGE_SIZE*2-4];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Trilok Soni80c19362012-10-15 00:55:00 +053052void __init map_page_strongly_ordered(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 long unsigned int phys;
Trilok Soni80c19362012-10-15 00:55:00 +053055 struct map_desc map[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57 if (strongly_ordered_page)
58 return;
59
60 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
61 phys = __pa(strongly_ordered_page);
62
Trilok Soni80c19362012-10-15 00:55:00 +053063 map[0].pfn = __phys_to_pfn(phys);
64 map[0].virtual = MSM_STRONGLY_ORDERED_PAGE;
65 map[0].length = PAGE_SIZE;
66 map[0].type = MT_MEMORY_SO;
67 iotable_init(map, ARRAY_SIZE(map));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070}
Trilok Soni80c19362012-10-15 00:55:00 +053071#else
72void map_page_strongly_ordered(void) { }
73#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Trilok Soni80c19362012-10-15 00:55:00 +053075#if defined(CONFIG_ARCH_MSM7X27)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076void write_to_strongly_ordered_memory(void)
77{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079}
Trilok Soni80c19362012-10-15 00:55:00 +053080#else
81void write_to_strongly_ordered_memory(void) { }
82#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083EXPORT_SYMBOL(write_to_strongly_ordered_memory);
84
Olav Haugan29bb4d52012-05-30 12:57:53 -070085/* These cache related routines make the assumption (if outer cache is
86 * available) that the associated physical memory is contiguous.
87 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 */
89void clean_and_invalidate_caches(unsigned long vstart,
90 unsigned long length, unsigned long pstart)
91{
Olav Haugan29bb4d52012-05-30 12:57:53 -070092 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094}
95
96void clean_caches(unsigned long vstart,
97 unsigned long length, unsigned long pstart)
98{
Olav Haugan29bb4d52012-05-30 12:57:53 -070099 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101}
102
103void invalidate_caches(unsigned long vstart,
104 unsigned long length, unsigned long pstart)
105{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700106 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108}
109
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110char *memtype_name[] = {
111 "SMI_KERNEL",
112 "SMI",
113 "EBI0",
114 "EBI1"
115};
116
117struct reserve_info *reserve_info;
118
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700119/**
120 * calculate_reserve_limits() - calculate reserve limits for all
121 * memtypes
122 *
123 * for each memtype in the reserve_info->memtype_reserve_table, sets
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700124 * the `limit' field to the largest size of any memblock of that
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700125 * memtype.
126 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127static void __init calculate_reserve_limits(void)
128{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700129 struct memblock_region *mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 int memtype;
131 struct memtype_reserve *mt;
132
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700133 for_each_memblock(memory, mr) {
134 memtype = reserve_info->paddr_to_memtype(mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 if (memtype == MEMTYPE_NONE) {
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700136 pr_warning("unknown memory type for region at %lx\n",
137 (long unsigned int)mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 continue;
139 }
140 mt = &reserve_info->memtype_reserve_table[memtype];
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700141 mt->limit = max_t(unsigned long, mt->limit, mr->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 }
143}
144
145static void __init adjust_reserve_sizes(void)
146{
147 int i;
148 struct memtype_reserve *mt;
149
150 mt = &reserve_info->memtype_reserve_table[0];
151 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
152 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
153 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
154 if (mt->size > mt->limit) {
155 pr_warning("%lx size for %s too large, setting to %lx\n",
156 mt->size, memtype_name[i], mt->limit);
157 mt->size = mt->limit;
158 }
159 }
160}
161
162static void __init reserve_memory_for_mempools(void)
163{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700164 int memtype, memreg_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 struct memtype_reserve *mt;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700166 struct memblock_region *mr, *mr_candidate = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 int ret;
168
169 mt = &reserve_info->memtype_reserve_table[0];
170 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
171 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
172 continue;
173
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700174 /* Choose the memory block with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 * address which is large enough, so that we will not
176 * take memory from the lowest memory bank which the kernel
177 * is in (and cause boot problems) and so that we might
178 * be able to steal memory that would otherwise become
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700179 * highmem.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 */
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700181 for_each_memblock(memory, mr) {
182 memreg_type =
183 reserve_info->paddr_to_memtype(mr->base);
184 if (memtype != memreg_type)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 continue;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700186 if (mr->size >= mt->size
187 && (mr_candidate == NULL
188 || mr->base > mr_candidate->base))
189 mr_candidate = mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 }
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700191 BUG_ON(mr_candidate == NULL);
192 /* bump mt up against the top of the region */
193 mt->start = mr_candidate->base + mr_candidate->size - mt->size;
Laura Abbott2257b9c2013-03-20 15:04:10 -0700194 ret = memblock_reserve(mt->start, mt->size);
195 BUG_ON(ret);
196 ret = memblock_free(mt->start, mt->size);
197 BUG_ON(ret);
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700198 ret = memblock_remove(mt->start, mt->size);
199 BUG_ON(ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200 }
201}
202
203static void __init initialize_mempools(void)
204{
205 struct mem_pool *mpool;
206 int memtype;
207 struct memtype_reserve *mt;
208
209 mt = &reserve_info->memtype_reserve_table[0];
210 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
211 if (!mt->size)
212 continue;
213 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
214 if (!mpool)
215 pr_warning("failed to create %s mempool\n",
216 memtype_name[memtype]);
217 }
218}
219
Larry Bassel4d4f4482012-04-04 11:26:09 -0700220#define MAX_FIXED_AREA_SIZE 0x11000000
221
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700222void __init msm_reserve(void)
223{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700224 unsigned long msm_fixed_area_size;
225 unsigned long msm_fixed_area_start;
226
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700227 memory_pool_init();
Utsab Bose4bb94652012-09-28 15:07:35 +0530228 if (reserve_info->calculate_reserve_sizes)
229 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700230
231 msm_fixed_area_size = reserve_info->fixed_area_size;
232 msm_fixed_area_start = reserve_info->fixed_area_start;
233 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700234 if (msm_fixed_area_start > reserve_info->low_unstable_address
235 - MAX_FIXED_AREA_SIZE)
236 reserve_info->low_unstable_address =
237 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239 calculate_reserve_limits();
240 adjust_reserve_sizes();
241 reserve_memory_for_mempools();
242 initialize_mempools();
243}
244
245static int get_ebi_memtype(void)
246{
247 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
248 if (cpu_is_msm7x30() || cpu_is_msm8x55())
249 return MEMTYPE_EBI0;
250 return MEMTYPE_EBI1;
251}
252
253void *allocate_contiguous_ebi(unsigned long size,
254 unsigned long align, int cached)
255{
256 return allocate_contiguous_memory(size, get_ebi_memtype(),
257 align, cached);
258}
259EXPORT_SYMBOL(allocate_contiguous_ebi);
260
261unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
262 unsigned long align)
263{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600264 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
265 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266}
267EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
268
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700269unsigned int msm_ttbr0;
270
271void store_ttbr0(void)
272{
273 /* Store TTBR0 for post-mortem debugging purposes. */
274 asm("mrc p15, 0, %0, c2, c0, 0\n"
275 : "=r" (msm_ttbr0));
276}
Laura Abbottf637aff2011-12-14 14:16:17 -0800277
278int request_fmem_c_region(void *unused)
279{
280 return fmem_set_state(FMEM_C_STATE);
281}
282
283int release_fmem_c_region(void *unused)
284{
285 return fmem_set_state(FMEM_T_STATE);
286}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700287
288static char * const memtype_names[] = {
289 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
290 [MEMTYPE_SMI] = "SMI",
291 [MEMTYPE_EBI0] = "EBI0",
292 [MEMTYPE_EBI1] = "EBI1",
293};
294
Olav Haugan92862912012-08-01 11:32:48 -0700295int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700296{
297 int i;
298
299 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700300 if (memtype_names[i] &&
301 strcmp(memtype_name, memtype_names[i]) == 0)
302 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700303 }
304
Olav Haugan92862912012-08-01 11:32:48 -0700305 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700306 return -EINVAL;
307}
308
Olav Haugan92862912012-08-01 11:32:48 -0700309static int reserve_memory_type(const char *mem_name,
310 struct memtype_reserve *reserve_table,
311 int size)
312{
313 int ret = msm_get_memory_type_from_name(mem_name);
314
315 if (ret >= 0) {
316 reserve_table[ret].size += size;
317 ret = 0;
318 }
319 return ret;
320}
321
Neeti Desai1b2cb552012-11-01 21:57:36 -0700322static int __init check_for_compat(unsigned long node)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700323{
324 char **start = __compat_exports_start;
325
326 for ( ; start < __compat_exports_end; start++)
327 if (of_flat_dt_is_compatible(node, *start))
328 return 1;
329
330 return 0;
331}
332
333int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
334 int depth, void *data)
335{
336 char *memory_name_prop;
337 unsigned int *memory_remove_prop;
338 unsigned long memory_name_prop_length;
339 unsigned long memory_remove_prop_length;
340 unsigned long memory_size_prop_length;
341 unsigned int *memory_size_prop;
342 unsigned int memory_size;
343 unsigned int memory_start;
344 int ret;
345
346 memory_name_prop = of_get_flat_dt_prop(node,
347 "qcom,memory-reservation-type",
348 &memory_name_prop_length);
349 memory_remove_prop = of_get_flat_dt_prop(node,
350 "qcom,memblock-remove",
351 &memory_remove_prop_length);
352
353 if (memory_name_prop || memory_remove_prop) {
354 if (!check_for_compat(node))
355 goto out;
356 } else {
357 goto out;
358 }
359
360 if (memory_name_prop) {
361 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
362 WARN(1, "Memory name was malformed\n");
363 goto mem_remove;
364 }
365
366 memory_size_prop = of_get_flat_dt_prop(node,
367 "qcom,memory-reservation-size",
368 &memory_size_prop_length);
369
370 if (memory_size_prop &&
371 (memory_size_prop_length == sizeof(unsigned int))) {
372 memory_size = be32_to_cpu(*memory_size_prop);
373
374 if (reserve_memory_type(memory_name_prop,
375 data, memory_size) == 0)
376 pr_info("%s reserved %s size %x\n",
377 uname, memory_name_prop, memory_size);
378 else
379 WARN(1, "Node %s reserve failed\n",
380 uname);
381 } else {
382 WARN(1, "Node %s specified bad/nonexistent size\n",
383 uname);
384 }
385 }
386
387mem_remove:
388
389 if (memory_remove_prop) {
390 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
391 WARN(1, "Memory remove malformed\n");
392 goto out;
393 }
394
395 memory_start = be32_to_cpu(memory_remove_prop[0]);
396 memory_size = be32_to_cpu(memory_remove_prop[1]);
397
398 ret = memblock_remove(memory_start, memory_size);
399 if (ret)
400 WARN(1, "Failed to remove memory %x-%x\n",
401 memory_start, memory_start+memory_size);
402 else
403 pr_info("Node %s removed memory %x-%x\n", uname,
404 memory_start, memory_start+memory_size);
405 }
406
407out:
408 return 0;
409}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530410
Neeti Desai1b2cb552012-11-01 21:57:36 -0700411/* This function scans the device tree to populate the memory hole table */
412int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
413 int depth, void *data)
414{
415 unsigned int *memory_remove_prop;
416 unsigned long memory_remove_prop_length;
417 unsigned long hole_start;
418 unsigned long hole_size;
419
420 memory_remove_prop = of_get_flat_dt_prop(node,
421 "qcom,memblock-remove",
422 &memory_remove_prop_length);
423
424 if (memory_remove_prop) {
425 if (!check_for_compat(node))
426 goto out;
427 } else {
428 goto out;
429 }
430
431 if (memory_remove_prop) {
432 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
433 WARN(1, "Memory remove malformed\n");
434 goto out;
435 }
436
437 hole_start = be32_to_cpu(memory_remove_prop[0]);
438 hole_size = be32_to_cpu(memory_remove_prop[1]);
439
440 if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
441 if (memory_hole_start == 0 && memory_hole_end == 0) {
442 memory_hole_start = hole_start;
443 memory_hole_end = hole_start + hole_size;
444 } else if ((memory_hole_end - memory_hole_start)
445 <= hole_size) {
446 memory_hole_start = hole_start;
447 memory_hole_end = hole_start + hole_size;
448 }
449 }
450 adjust_meminfo(hole_start, hole_size);
451 }
452
453out:
454 return 0;
455}
456
457/*
458 * Split the memory bank to reflect the hole, if present,
459 * using the start and end of the memory hole.
460 */
461void adjust_meminfo(unsigned long start, unsigned long size)
462{
Larry Bassel38e22da2013-02-25 10:54:16 -0800463 int i;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700464
Larry Bassel38e22da2013-02-25 10:54:16 -0800465 for (i = 0; i < meminfo.nr_banks; i++) {
466 struct membank *bank = &meminfo.bank[i];
Neeti Desai1b2cb552012-11-01 21:57:36 -0700467
468 if (((start + size) <= (bank->start + bank->size)) &&
469 (start >= bank->start)) {
470 memmove(bank + 1, bank,
471 (meminfo.nr_banks - i) * sizeof(*bank));
472 meminfo.nr_banks++;
473 i++;
Larry Bassel38e22da2013-02-25 10:54:16 -0800474
Neeti Desai1b2cb552012-11-01 21:57:36 -0700475 bank->size = start - bank->start;
Larry Bassel38e22da2013-02-25 10:54:16 -0800476 bank[1].start = (start + size);
477 bank[1].size -= (bank->size + size);
478 bank[1].highmem = 0;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700479 }
Neeti Desai1b2cb552012-11-01 21:57:36 -0700480 }
481}
Larry Bassel38e22da2013-02-25 10:54:16 -0800482
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530483unsigned long get_ddr_size(void)
484{
485 unsigned int i;
486 unsigned long ret = 0;
487
488 for (i = 0; i < meminfo.nr_banks; i++)
489 ret += meminfo.bank[i].size;
490
491 return ret;
492}
Mitchel Humpherys6ae3ae42012-10-30 15:12:52 -0700493
494/* Provide a string that anonymous device tree allocations (those not
495 * directly associated with any driver) can use for their "compatible"
496 * field */
497EXPORT_COMPAT("qcom,msm-contig-mem");