blob: 5f11806527b4696eb08c1e402c812a25ca70cc7d [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel38e22da2013-02-25 10:54:16 -08004 * Copyright (c) 2009-2013, The Linux Foundation. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
Neeti Desai1b2cb552012-11-01 21:57:36 -070030#include <mach/memory.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070031#include <linux/hardirq.h>
32#if defined(CONFIG_MSM_NPA_REMOTE)
33#include "npa_remote.h"
34#include <linux/completion.h>
35#include <linux/err.h>
36#endif
37#include <linux/android_pmem.h>
38#include <mach/msm_iomap.h>
39#include <mach/socinfo.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070040#include <linux/sched.h>
Laura Abbottd8d0f772012-07-10 10:27:06 -070041#include <linux/of_fdt.h>
Steve Mucklef132c6c2012-06-06 18:30:57 -070042
43/* fixme */
44#include <asm/tlbflush.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080046#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070047
Trilok Soni80c19362012-10-15 00:55:00 +053048#if defined(CONFIG_ARCH_MSM7X27)
49static void *strongly_ordered_page;
50static char strongly_ordered_mem[PAGE_SIZE*2-4];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070051
Trilok Soni80c19362012-10-15 00:55:00 +053052void __init map_page_strongly_ordered(void)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070053{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070054 long unsigned int phys;
Trilok Soni80c19362012-10-15 00:55:00 +053055 struct map_desc map[1];
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070056
57 if (strongly_ordered_page)
58 return;
59
60 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
61 phys = __pa(strongly_ordered_page);
62
Trilok Soni80c19362012-10-15 00:55:00 +053063 map[0].pfn = __phys_to_pfn(phys);
64 map[0].virtual = MSM_STRONGLY_ORDERED_PAGE;
65 map[0].length = PAGE_SIZE;
66 map[0].type = MT_MEMORY_SO;
67 iotable_init(map, ARRAY_SIZE(map));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070068
69 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070070}
Trilok Soni80c19362012-10-15 00:55:00 +053071#else
72void map_page_strongly_ordered(void) { }
73#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070074
Trilok Soni80c19362012-10-15 00:55:00 +053075#if defined(CONFIG_ARCH_MSM7X27)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070076void write_to_strongly_ordered_memory(void)
77{
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070078 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070079}
Trilok Soni80c19362012-10-15 00:55:00 +053080#else
81void write_to_strongly_ordered_memory(void) { }
82#endif
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070083EXPORT_SYMBOL(write_to_strongly_ordered_memory);
84
Olav Haugan29bb4d52012-05-30 12:57:53 -070085/* These cache related routines make the assumption (if outer cache is
86 * available) that the associated physical memory is contiguous.
87 * They will operate on all (L1 and L2 if present) caches.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070088 */
89void clean_and_invalidate_caches(unsigned long vstart,
90 unsigned long length, unsigned long pstart)
91{
Olav Haugan29bb4d52012-05-30 12:57:53 -070092 dmac_flush_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070093 outer_flush_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070094}
95
96void clean_caches(unsigned long vstart,
97 unsigned long length, unsigned long pstart)
98{
Olav Haugan29bb4d52012-05-30 12:57:53 -070099 dmac_clean_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700100 outer_clean_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700101}
102
103void invalidate_caches(unsigned long vstart,
104 unsigned long length, unsigned long pstart)
105{
Olav Haugan29bb4d52012-05-30 12:57:53 -0700106 dmac_inv_range((void *)vstart, (void *) (vstart + length));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700107 outer_inv_range(pstart, pstart + length);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700108}
109
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700110char *memtype_name[] = {
111 "SMI_KERNEL",
112 "SMI",
113 "EBI0",
114 "EBI1"
115};
116
117struct reserve_info *reserve_info;
118
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700119/**
120 * calculate_reserve_limits() - calculate reserve limits for all
121 * memtypes
122 *
123 * for each memtype in the reserve_info->memtype_reserve_table, sets
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700124 * the `limit' field to the largest size of any memblock of that
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700125 * memtype.
126 */
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700127static void __init calculate_reserve_limits(void)
128{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700129 struct memblock_region *mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700130 int memtype;
131 struct memtype_reserve *mt;
132
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700133 for_each_memblock(memory, mr) {
134 memtype = reserve_info->paddr_to_memtype(mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700135 if (memtype == MEMTYPE_NONE) {
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700136 pr_warning("unknown memory type for region at %lx\n",
137 (long unsigned int)mr->base);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700138 continue;
139 }
140 mt = &reserve_info->memtype_reserve_table[memtype];
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700141 mt->limit = max_t(unsigned long, mt->limit, mr->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700142 }
143}
144
145static void __init adjust_reserve_sizes(void)
146{
147 int i;
148 struct memtype_reserve *mt;
149
150 mt = &reserve_info->memtype_reserve_table[0];
151 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
152 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
153 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
154 if (mt->size > mt->limit) {
155 pr_warning("%lx size for %s too large, setting to %lx\n",
156 mt->size, memtype_name[i], mt->limit);
157 mt->size = mt->limit;
158 }
159 }
160}
161
162static void __init reserve_memory_for_mempools(void)
163{
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700164 int memtype, memreg_type;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700165 struct memtype_reserve *mt;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700166 struct memblock_region *mr, *mr_candidate = NULL;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700167 int ret;
168
169 mt = &reserve_info->memtype_reserve_table[0];
170 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
171 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
172 continue;
173
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700174 /* Choose the memory block with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700175 * address which is large enough, so that we will not
176 * take memory from the lowest memory bank which the kernel
177 * is in (and cause boot problems) and so that we might
178 * be able to steal memory that would otherwise become
Mitchel Humpherys29a62dd2012-10-03 16:43:28 -0700179 * highmem.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180 */
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700181 for_each_memblock(memory, mr) {
182 memreg_type =
183 reserve_info->paddr_to_memtype(mr->base);
184 if (memtype != memreg_type)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700185 continue;
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700186 if (mr->size >= mt->size
187 && (mr_candidate == NULL
188 || mr->base > mr_candidate->base))
189 mr_candidate = mr;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190 }
Mitchel Humpherysa7f2ced2012-10-03 17:01:40 -0700191 BUG_ON(mr_candidate == NULL);
192 /* bump mt up against the top of the region */
193 mt->start = mr_candidate->base + mr_candidate->size - mt->size;
194 ret = memblock_remove(mt->start, mt->size);
195 BUG_ON(ret);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700196 }
197}
198
199static void __init initialize_mempools(void)
200{
201 struct mem_pool *mpool;
202 int memtype;
203 struct memtype_reserve *mt;
204
205 mt = &reserve_info->memtype_reserve_table[0];
206 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
207 if (!mt->size)
208 continue;
209 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
210 if (!mpool)
211 pr_warning("failed to create %s mempool\n",
212 memtype_name[memtype]);
213 }
214}
215
Larry Bassel4d4f4482012-04-04 11:26:09 -0700216#define MAX_FIXED_AREA_SIZE 0x11000000
217
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700218void __init msm_reserve(void)
219{
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700220 unsigned long msm_fixed_area_size;
221 unsigned long msm_fixed_area_start;
222
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700223 memory_pool_init();
Utsab Bose4bb94652012-09-28 15:07:35 +0530224 if (reserve_info->calculate_reserve_sizes)
225 reserve_info->calculate_reserve_sizes();
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700226
227 msm_fixed_area_size = reserve_info->fixed_area_size;
228 msm_fixed_area_start = reserve_info->fixed_area_start;
229 if (msm_fixed_area_size)
Larry Bassel4d4f4482012-04-04 11:26:09 -0700230 if (msm_fixed_area_start > reserve_info->low_unstable_address
231 - MAX_FIXED_AREA_SIZE)
232 reserve_info->low_unstable_address =
233 msm_fixed_area_start;
Larry Bassel2d8b42d2012-03-12 10:41:26 -0700234
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700235 calculate_reserve_limits();
236 adjust_reserve_sizes();
237 reserve_memory_for_mempools();
238 initialize_mempools();
239}
240
241static int get_ebi_memtype(void)
242{
243 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
244 if (cpu_is_msm7x30() || cpu_is_msm8x55())
245 return MEMTYPE_EBI0;
246 return MEMTYPE_EBI1;
247}
248
249void *allocate_contiguous_ebi(unsigned long size,
250 unsigned long align, int cached)
251{
252 return allocate_contiguous_memory(size, get_ebi_memtype(),
253 align, cached);
254}
255EXPORT_SYMBOL(allocate_contiguous_ebi);
256
257unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
258 unsigned long align)
259{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600260 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
261 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700262}
263EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
264
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700265unsigned int msm_ttbr0;
266
267void store_ttbr0(void)
268{
269 /* Store TTBR0 for post-mortem debugging purposes. */
270 asm("mrc p15, 0, %0, c2, c0, 0\n"
271 : "=r" (msm_ttbr0));
272}
Laura Abbottf637aff2011-12-14 14:16:17 -0800273
274int request_fmem_c_region(void *unused)
275{
276 return fmem_set_state(FMEM_C_STATE);
277}
278
279int release_fmem_c_region(void *unused)
280{
281 return fmem_set_state(FMEM_T_STATE);
282}
Laura Abbottd8d0f772012-07-10 10:27:06 -0700283
284static char * const memtype_names[] = {
285 [MEMTYPE_SMI_KERNEL] = "SMI_KERNEL",
286 [MEMTYPE_SMI] = "SMI",
287 [MEMTYPE_EBI0] = "EBI0",
288 [MEMTYPE_EBI1] = "EBI1",
289};
290
Olav Haugan92862912012-08-01 11:32:48 -0700291int msm_get_memory_type_from_name(const char *memtype_name)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700292{
293 int i;
294
295 for (i = 0; i < ARRAY_SIZE(memtype_names); i++) {
Olav Haugan92862912012-08-01 11:32:48 -0700296 if (memtype_names[i] &&
297 strcmp(memtype_name, memtype_names[i]) == 0)
298 return i;
Laura Abbottd8d0f772012-07-10 10:27:06 -0700299 }
300
Olav Haugan92862912012-08-01 11:32:48 -0700301 pr_err("Could not find memory type %s\n", memtype_name);
Laura Abbottd8d0f772012-07-10 10:27:06 -0700302 return -EINVAL;
303}
304
Olav Haugan92862912012-08-01 11:32:48 -0700305static int reserve_memory_type(const char *mem_name,
306 struct memtype_reserve *reserve_table,
307 int size)
308{
309 int ret = msm_get_memory_type_from_name(mem_name);
310
311 if (ret >= 0) {
312 reserve_table[ret].size += size;
313 ret = 0;
314 }
315 return ret;
316}
317
Neeti Desai1b2cb552012-11-01 21:57:36 -0700318static int __init check_for_compat(unsigned long node)
Laura Abbottd8d0f772012-07-10 10:27:06 -0700319{
320 char **start = __compat_exports_start;
321
322 for ( ; start < __compat_exports_end; start++)
323 if (of_flat_dt_is_compatible(node, *start))
324 return 1;
325
326 return 0;
327}
328
329int __init dt_scan_for_memory_reserve(unsigned long node, const char *uname,
330 int depth, void *data)
331{
332 char *memory_name_prop;
333 unsigned int *memory_remove_prop;
334 unsigned long memory_name_prop_length;
335 unsigned long memory_remove_prop_length;
336 unsigned long memory_size_prop_length;
337 unsigned int *memory_size_prop;
338 unsigned int memory_size;
339 unsigned int memory_start;
340 int ret;
341
342 memory_name_prop = of_get_flat_dt_prop(node,
343 "qcom,memory-reservation-type",
344 &memory_name_prop_length);
345 memory_remove_prop = of_get_flat_dt_prop(node,
346 "qcom,memblock-remove",
347 &memory_remove_prop_length);
348
349 if (memory_name_prop || memory_remove_prop) {
350 if (!check_for_compat(node))
351 goto out;
352 } else {
353 goto out;
354 }
355
356 if (memory_name_prop) {
357 if (strnlen(memory_name_prop, memory_name_prop_length) == 0) {
358 WARN(1, "Memory name was malformed\n");
359 goto mem_remove;
360 }
361
362 memory_size_prop = of_get_flat_dt_prop(node,
363 "qcom,memory-reservation-size",
364 &memory_size_prop_length);
365
366 if (memory_size_prop &&
367 (memory_size_prop_length == sizeof(unsigned int))) {
368 memory_size = be32_to_cpu(*memory_size_prop);
369
370 if (reserve_memory_type(memory_name_prop,
371 data, memory_size) == 0)
372 pr_info("%s reserved %s size %x\n",
373 uname, memory_name_prop, memory_size);
374 else
375 WARN(1, "Node %s reserve failed\n",
376 uname);
377 } else {
378 WARN(1, "Node %s specified bad/nonexistent size\n",
379 uname);
380 }
381 }
382
383mem_remove:
384
385 if (memory_remove_prop) {
386 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
387 WARN(1, "Memory remove malformed\n");
388 goto out;
389 }
390
391 memory_start = be32_to_cpu(memory_remove_prop[0]);
392 memory_size = be32_to_cpu(memory_remove_prop[1]);
393
394 ret = memblock_remove(memory_start, memory_size);
395 if (ret)
396 WARN(1, "Failed to remove memory %x-%x\n",
397 memory_start, memory_start+memory_size);
398 else
399 pr_info("Node %s removed memory %x-%x\n", uname,
400 memory_start, memory_start+memory_size);
401 }
402
403out:
404 return 0;
405}
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530406
Neeti Desai1b2cb552012-11-01 21:57:36 -0700407/* This function scans the device tree to populate the memory hole table */
408int __init dt_scan_for_memory_hole(unsigned long node, const char *uname,
409 int depth, void *data)
410{
411 unsigned int *memory_remove_prop;
412 unsigned long memory_remove_prop_length;
413 unsigned long hole_start;
414 unsigned long hole_size;
415
416 memory_remove_prop = of_get_flat_dt_prop(node,
417 "qcom,memblock-remove",
418 &memory_remove_prop_length);
419
420 if (memory_remove_prop) {
421 if (!check_for_compat(node))
422 goto out;
423 } else {
424 goto out;
425 }
426
427 if (memory_remove_prop) {
428 if (memory_remove_prop_length != (2*sizeof(unsigned int))) {
429 WARN(1, "Memory remove malformed\n");
430 goto out;
431 }
432
433 hole_start = be32_to_cpu(memory_remove_prop[0]);
434 hole_size = be32_to_cpu(memory_remove_prop[1]);
435
436 if (hole_start + hole_size <= MAX_HOLE_ADDRESS) {
437 if (memory_hole_start == 0 && memory_hole_end == 0) {
438 memory_hole_start = hole_start;
439 memory_hole_end = hole_start + hole_size;
440 } else if ((memory_hole_end - memory_hole_start)
441 <= hole_size) {
442 memory_hole_start = hole_start;
443 memory_hole_end = hole_start + hole_size;
444 }
445 }
446 adjust_meminfo(hole_start, hole_size);
447 }
448
449out:
450 return 0;
451}
452
453/*
454 * Split the memory bank to reflect the hole, if present,
455 * using the start and end of the memory hole.
456 */
457void adjust_meminfo(unsigned long start, unsigned long size)
458{
Larry Bassel38e22da2013-02-25 10:54:16 -0800459 int i;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700460
Larry Bassel38e22da2013-02-25 10:54:16 -0800461 for (i = 0; i < meminfo.nr_banks; i++) {
462 struct membank *bank = &meminfo.bank[i];
Neeti Desai1b2cb552012-11-01 21:57:36 -0700463
464 if (((start + size) <= (bank->start + bank->size)) &&
465 (start >= bank->start)) {
466 memmove(bank + 1, bank,
467 (meminfo.nr_banks - i) * sizeof(*bank));
468 meminfo.nr_banks++;
469 i++;
Larry Bassel38e22da2013-02-25 10:54:16 -0800470
Neeti Desai1b2cb552012-11-01 21:57:36 -0700471 bank->size = start - bank->start;
Larry Bassel38e22da2013-02-25 10:54:16 -0800472 bank[1].start = (start + size);
473 bank[1].size -= (bank->size + size);
474 bank[1].highmem = 0;
Neeti Desai1b2cb552012-11-01 21:57:36 -0700475 }
Neeti Desai1b2cb552012-11-01 21:57:36 -0700476 }
477}
Larry Bassel38e22da2013-02-25 10:54:16 -0800478
Chintan Pandyad71c5f92012-08-23 17:14:32 +0530479unsigned long get_ddr_size(void)
480{
481 unsigned int i;
482 unsigned long ret = 0;
483
484 for (i = 0; i < meminfo.nr_banks; i++)
485 ret += meminfo.bank[i].size;
486
487 return ret;
488}
Mitchel Humpherys6ae3ae42012-10-30 15:12:52 -0700489
490/* Provide a string that anonymous device tree allocations (those not
491 * directly associated with any driver) can use for their "compatible"
492 * field */
493EXPORT_COMPAT("qcom,msm-contig-mem");