blob: edfb45b36b6a93f6327dc53ec61160bbc8f024c7 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel38e22da2013-02-25 10:54:16 -08004 * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
Neeti Desai1b2cb552012-11-01 21:57:36 -070030#include <mach/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <linux/hardirq.h>
32#if defined(CONFIG_MSM_NPA_REMOTE)
33#include "npa_remote.h"
34#include <linux/completion.h>
35#include <linux/err.h>
36#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070037#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070039#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070040#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070041
42/* fixme */
43#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070044#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080045#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070046
Trilok Soni80c19362012-10-15 00:55:00 +053047#if defined(CONFIG_ARCH_MSM7X27)
48static void *strongly_ordered_page;
49static char strongly_ordered_mem[PAGE_SIZE*2-4];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070050
Trilok Soni80c19362012-10-15 00:55:00 +053051void __init map_page_strongly_ordered(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070052{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053 long unsigned int phys;
Trilok Soni80c19362012-10-15 00:55:00 +053054 struct map_desc map[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055
56 if (strongly_ordered_page)
57 return;
58
59 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
60 phys = __pa(strongly_ordered_page);
61
Trilok Soni80c19362012-10-15 00:55:00 +053062 map[0].pfn = __phys_to_pfn(phys);
63 map[0].virtual = MSM_STRONGLY_ORDERED_PAGE;
64 map[0].length = PAGE_SIZE;
65 map[0].type = MT_MEMORY_SO;
66 iotable_init(map, ARRAY_SIZE(map));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070067
68 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070069}
Trilok Soni80c19362012-10-15 00:55:00 +053070#else
71void map_page_strongly_ordered(void) { }
72#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070073
Trilok Soni80c19362012-10-15 00:55:00 +053074#if defined(CONFIG_ARCH_MSM7X27)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070075void write_to_strongly_ordered_memory(void)
76{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070077 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078}
Trilok Soni80c19362012-10-15 00:55:00 +053079#else
80void write_to_strongly_ordered_memory(void) { }
81#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070082EXPORT_SYMBOL(write_to_strongly_ordered_memory);
83
Olav Haugan29bb4d52012-05-30 12:57:53 -070084/* These cache related routines make the assumption (if outer cache is
85 * available) that the associated physical memory is contiguous.
86 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070087 */
88void clean_and_invalidate_caches(unsigned long vstart,
89 unsigned long length, unsigned long pstart)
90{
Olav Haugan29bb4d52012-05-30 12:57:53 -070091 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070092 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093}
94
95void clean_caches(unsigned long vstart,
96 unsigned long length, unsigned long pstart)
97{
Olav Haugan29bb4d52012-05-30 12:57:53 -070098 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070099 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100}
101
102void invalidate_caches(unsigned long vstart,
103 unsigned long length, unsigned long pstart)
104{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700105 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700106 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107}
108
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700109char *memtype_name[] = {
110 "SMI_KERNEL",
111 "SMI",
112 "EBI0",
113 "EBI1"
114};
115
116struct reserve_info *reserve_info;
117
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700118/**
119 * calculate_reserve_limits() - calculate reserve limits for all
120 * memtypes
121 *
122 * for each memtype in the reserve_info->memtype_reserve_table, sets
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700123 * the `limit' field to the largest size of any memblock of that
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700124 * memtype.
125 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700126static void __init calculate_reserve_limits(void)
127{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700128 struct memblock_region *mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700129 int memtype;
130 struct memtype_reserve *mt;
131
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700132 for_each_memblock(memory, mr) {
133 memtype = reserve_info->paddr_to_memtype(mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700134 if (memtype == MEMTYPE_NONE) {
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700135 pr_warning("unknown memory type for region at %lx\n",
136 (long unsigned int)mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700137 continue;
138 }
139 mt = &reserve_info->memtype_reserve_table[memtype];
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700140 mt->limit = max_t(unsigned long, mt->limit, mr->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700141 }
142}
143
144static void __init adjust_reserve_sizes(void)
145{
146 int i;
147 struct memtype_reserve *mt;
148
149 mt = &reserve_info->memtype_reserve_table[0];
150 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
151 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
152 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
153 if (mt->size > mt->limit) {
154 pr_warning("%lx size for %s too large, setting to %lx\n",
155 mt->size, memtype_name[i], mt->limit);
156 mt->size = mt->limit;
157 }
158 }
159}
160
161static void __init reserve_memory_for_mempools(void)
162{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700163 int memtype, memreg_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700164 struct memtype_reserve *mt;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700165 struct memblock_region *mr, *mr_candidate = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700166 int ret;
167
168 mt = &reserve_info->memtype_reserve_table[0];
169 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
170 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
171 continue;
172
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700173 /* Choose the memory block with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700174 * address which is large enough, so that we will not
175 * take memory from the lowest memory bank which the kernel
176 * is in (and cause boot problems) and so that we might
177 * be able to steal memory that would otherwise become
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700178 * highmem.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700179 */
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700180 for_each_memblock(memory, mr) {
181 memreg_type =
182 reserve_info->paddr_to_memtype(mr->base);
183 if (memtype != memreg_type)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184 continue;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700185 if (mr->size >= mt->size
186 && (mr_candidate == NULL
187 || mr->base > mr_candidate->base))
188 mr_candidate = mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700189 }
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700190 BUG_ON(mr_candidate == NULL);
191 /* bump mt up against the top of the region */
192 mt->start = mr_candidate->base + mr_candidate->size - mt->size;
Laura Abbott2257b9c2013-03-20 15:04:10 -0700193 ret = memblock_reserve(mt->start, mt->size);
194 BUG_ON(ret);
195 ret = memblock_free(mt->start, mt->size);
196 BUG_ON(ret);
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700197 ret = memblock_remove(mt->start, mt->size);
198 BUG_ON(ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700199 }
200}
201
202static void __init initialize_mempools(void)
203{
204 struct mem_pool *mpool;
205 int memtype;
206 struct memtype_reserve *mt;
207
208 mt = &reserve_info->memtype_reserve_table[0];
209 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
210 if (!mt->size)
211 continue;
212 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
213 if (!mpool)
214 pr_warning("failed to create %s mempool\n",
215 memtype_name[memtype]);
216 }
217}
218
Larry Bassel4d4f4482012-04-04 11:26:09 -0700219#define MAX_FIXED_AREA_SIZE 0x11000000
220
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700221void __init msm_reserve(void)
222{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700223 unsigned long msm_fixed_area_size;
224 unsigned long msm_fixed_area_start;
225
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700226 memory_pool_init();
Utsab Bose4bb94652012-09-28 15:07:35 +0530227 if (reserve_info->calculate_reserve_sizes)
228 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700229
230 msm_fixed_area_size = reserve_info->fixed_area_size;
231 msm_fixed_area_start = reserve_info->fixed_area_start;
232 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700233 if (msm_fixed_area_start > reserve_info->low_unstable_address
234 - MAX_FIXED_AREA_SIZE)
235 reserve_info->low_unstable_address =
236 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700237
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700238 calculate_reserve_limits();
239 adjust_reserve_sizes();
240 reserve_memory_for_mempools();
241 initialize_mempools();
242}
243
244static int get_ebi_memtype(void)
245{
246 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
247 if (cpu_is_msm7x30() || cpu_is_msm8x55())
248 return MEMTYPE_EBI0;
249 return MEMTYPE_EBI1;
250}
251
252void *allocate_contiguous_ebi(unsigned long size,
253 unsigned long align, int cached)
254{
255 return allocate_contiguous_memory(size, get_ebi_memtype(),
256 align, cached);
257}
258EXPORT_SYMBOL(allocate_contiguous_ebi);
259
260unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
261 unsigned long align)
262{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600263 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
264 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700265}
266EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
267
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700268unsigned int msm_ttbr0;
269
270void store_ttbr0(void)
271{
272 /* Store TTBR0 for post-mortem debugging purposes. */
273 asm("mrc p15, 0, %0, c2, c0, 0\n"
274 : "=r" (msm_ttbr0));
275}
Laura Abbottf637aff2011-12-14 14:16:17 -0800276
277int request_fmem_c_region(void *unused)
278{
279 return fmem_set_state(FMEM_C_STATE);
280}
281
282int release_fmem_c_region(void *unused)
283{
284 return fmem_set_state(FMEM_T_STATE);
285}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700286
287static char * const memtype_names[] = {
288 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
289 [MEMTYPE_SMI] = "SMI",
290 [MEMTYPE_EBI0] = "EBI0",
291 [MEMTYPE_EBI1] = "EBI1",
292};
293
Olav Haugan92862912012-08-01 11:32:48 -0700294int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700295{
296 int i;
297
298 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700299 if (memtype_names[i] &&
300 strcmp(memtype_name, memtype_names[i]) == 0)
301 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700302 }
303
Olav Haugan92862912012-08-01 11:32:48 -0700304 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700305 return -EINVAL;
306}
307
Olav Haugan92862912012-08-01 11:32:48 -0700308static int reserve_memory_type(const char *mem_name,
309 struct memtype_reserve *reserve_table,
310 int size)
311{
312 int ret = msm_get_memory_type_from_name(mem_name);
313
314 if (ret >= 0) {
315 reserve_table[ret].size += size;
316 ret = 0;
317 }
318 return ret;
319}
320
Neeti Desai1b2cb552012-11-01 21:57:36 -0700321static int __init check_for_compat(unsigned long node)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700322{
323 char **start = __compat_exports_start;
324
325 for ( ; start < __compat_exports_end; start++)
326 if (of_flat_dt_is_compatible(node, *start))
327 return 1;
328
329 return 0;
330}
331
332int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
333 int depth, void *data)
334{
335 char *memory_name_prop;
336 unsigned int *memory_remove_prop;
337 unsigned long memory_name_prop_length;
338 unsigned long memory_remove_prop_length;
339 unsigned long memory_size_prop_length;
340 unsigned int *memory_size_prop;
341 unsigned int memory_size;
342 unsigned int memory_start;
343 int ret;
344
345 memory_name_prop = of_get_flat_dt_prop(node,
346 "qcom,memory-reservation-type",
347 &memory_name_prop_length);
348 memory_remove_prop = of_get_flat_dt_prop(node,
349 "qcom,memblock-remove",
350 &memory_remove_prop_length);
351
352 if (memory_name_prop || memory_remove_prop) {
353 if (!check_for_compat(node))
354 goto out;
355 } else {
356 goto out;
357 }
358
359 if (memory_name_prop) {
360 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
361 WARN(1, "Memory name was malformed\n");
362 goto mem_remove;
363 }
364
365 memory_size_prop = of_get_flat_dt_prop(node,
366 "qcom,memory-reservation-size",
367 &memory_size_prop_length);
368
369 if (memory_size_prop &&
370 (memory_size_prop_length == sizeof(unsigned int))) {
371 memory_size = be32_to_cpu(*memory_size_prop);
372
373 if (reserve_memory_type(memory_name_prop,
374 data, memory_size) == 0)
375 pr_info("%s reserved %s size %x\n",
376 uname, memory_name_prop, memory_size);
377 else
378 WARN(1, "Node %s reserve failed\n",
379 uname);
380 } else {
381 WARN(1, "Node %s specified bad/nonexistent size\n",
382 uname);
383 }
384 }
385
386mem_remove:
387
388 if (memory_remove_prop) {
389 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
390 WARN(1, "Memory remove malformed\n");
391 goto out;
392 }
393
394 memory_start = be32_to_cpu(memory_remove_prop[0]);
395 memory_size = be32_to_cpu(memory_remove_prop[1]);
396
397 ret = memblock_remove(memory_start, memory_size);
398 if (ret)
399 WARN(1, "Failed to remove memory %x-%x\n",
400 memory_start, memory_start+memory_size);
401 else
402 pr_info("Node %s removed memory %x-%x\n", uname,
403 memory_start, memory_start+memory_size);
404 }
405
406out:
407 return 0;
408}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530409
Neeti Desai1b2cb552012-11-01 21:57:36 -0700410/* This function scans the device tree to populate the memory hole table */
411int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
412 int depth, void *data)
413{
414 unsigned int *memory_remove_prop;
415 unsigned long memory_remove_prop_length;
416 unsigned long hole_start;
417 unsigned long hole_size;
418
419 memory_remove_prop = of_get_flat_dt_prop(node,
420 "qcom,memblock-remove",
421 &memory_remove_prop_length);
422
423 if (memory_remove_prop) {
424 if (!check_for_compat(node))
425 goto out;
426 } else {
427 goto out;
428 }
429
430 if (memory_remove_prop) {
431 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
432 WARN(1, "Memory remove malformed\n");
433 goto out;
434 }
435
436 hole_start = be32_to_cpu(memory_remove_prop[0]);
437 hole_size = be32_to_cpu(memory_remove_prop[1]);
438
439 if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
440 if (memory_hole_start == 0 && memory_hole_end == 0) {
441 memory_hole_start = hole_start;
442 memory_hole_end = hole_start + hole_size;
443 } else if ((memory_hole_end - memory_hole_start)
444 <= hole_size) {
445 memory_hole_start = hole_start;
446 memory_hole_end = hole_start + hole_size;
447 }
448 }
449 adjust_meminfo(hole_start, hole_size);
450 }
451
452out:
453 return 0;
454}
455
456/*
457 * Split the memory bank to reflect the hole, if present,
458 * using the start and end of the memory hole.
459 */
460void adjust_meminfo(unsigned long start, unsigned long size)
461{
Larry Bassel38e22da2013-02-25 10:54:16 -0800462 int i;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700463
Larry Bassel38e22da2013-02-25 10:54:16 -0800464 for (i = 0; i < meminfo.nr_banks; i++) {
465 struct membank *bank = &meminfo.bank[i];
Neeti Desai1b2cb552012-11-01 21:57:36 -0700466
467 if (((start + size) <= (bank->start + bank->size)) &&
468 (start >= bank->start)) {
469 memmove(bank + 1, bank,
470 (meminfo.nr_banks - i) * sizeof(*bank));
471 meminfo.nr_banks++;
472 i++;
Larry Bassel38e22da2013-02-25 10:54:16 -0800473
Neeti Desai1b2cb552012-11-01 21:57:36 -0700474 bank->size = start - bank->start;
Larry Bassel38e22da2013-02-25 10:54:16 -0800475 bank[1].start = (start + size);
476 bank[1].size -= (bank->size + size);
477 bank[1].highmem = 0;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700478 }
Neeti Desai1b2cb552012-11-01 21:57:36 -0700479 }
480}
Larry Bassel38e22da2013-02-25 10:54:16 -0800481
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530482unsigned long get_ddr_size(void)
483{
484 unsigned int i;
485 unsigned long ret = 0;
486
487 for (i = 0; i < meminfo.nr_banks; i++)
488 ret += meminfo.bank[i].size;
489
490 return ret;
491}
Mitchel Humpherys6ae3ae42012-10-30 15:12:52 -0700492
493/* Provide a string that anonymous device tree allocations (those not
494 * directly associated with any driver) can use for their "compatible"
495 * field */
496EXPORT_COMPAT("qcom,msm-contig-mem");