blob: d26d76b805e2856a68ef0a320a215a2649fc58e9 [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
Larry Bassel48e4f5f2012-02-14 13:54:12 -08004 * Copyright (c) 2009-2012, Code Aurora Forum. All rights reserved.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07005 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
39#include <../../mm/mm.h>
Laura Abbottf637aff2011-12-14 14:16:17 -080040#include <linux/fmem.h>
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070041
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070042void *strongly_ordered_page;
43char strongly_ordered_mem[PAGE_SIZE*2-4];
44
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070045void map_page_strongly_ordered(void)
46{
47#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
48 long unsigned int phys;
49 struct map_desc map;
50
51 if (strongly_ordered_page)
52 return;
53
54 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
55 phys = __pa(strongly_ordered_page);
56
57 map.pfn = __phys_to_pfn(phys);
58 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
59 map.length = PAGE_SIZE;
60 map.type = MT_DEVICE_STRONGLY_ORDERED;
61 create_mapping(&map);
62
63 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
64#endif
65}
66EXPORT_SYMBOL(map_page_strongly_ordered);
67
68void write_to_strongly_ordered_memory(void)
69{
70#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
71 if (!strongly_ordered_page) {
72 if (!in_interrupt())
73 map_page_strongly_ordered();
74 else {
75 printk(KERN_ALERT "Cannot map strongly ordered page in "
76 "Interrupt Context\n");
77 /* capture it here before the allocation fails later */
78 BUG();
79 }
80 }
81 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
82#endif
83}
84EXPORT_SYMBOL(write_to_strongly_ordered_memory);
85
86void flush_axi_bus_buffer(void)
87{
88#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
89 __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
90 : : "r" (0) : "memory");
91 write_to_strongly_ordered_memory();
92#endif
93}
94
95#define CACHE_LINE_SIZE 32
96
97/* These cache related routines make the assumption that the associated
98 * physical memory is contiguous. They will operate on all (L1
99 * and L2 if present) caches.
100 */
101void clean_and_invalidate_caches(unsigned long vstart,
102 unsigned long length, unsigned long pstart)
103{
104 unsigned long vaddr;
105
106 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
107 asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
108#ifdef CONFIG_OUTER_CACHE
109 outer_flush_range(pstart, pstart + length);
110#endif
111 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
112 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
113
114 flush_axi_bus_buffer();
115}
116
117void clean_caches(unsigned long vstart,
118 unsigned long length, unsigned long pstart)
119{
120 unsigned long vaddr;
121
122 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
123 asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
124#ifdef CONFIG_OUTER_CACHE
125 outer_clean_range(pstart, pstart + length);
126#endif
127 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
128 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
129
130 flush_axi_bus_buffer();
131}
132
133void invalidate_caches(unsigned long vstart,
134 unsigned long length, unsigned long pstart)
135{
136 unsigned long vaddr;
137
138 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
139 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
140#ifdef CONFIG_OUTER_CACHE
141 outer_inv_range(pstart, pstart + length);
142#endif
143 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
144 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
145
146 flush_axi_bus_buffer();
147}
148
149void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
150{
151 void *unused_addr = NULL;
152 unsigned long addr, tmp_size, unused_size;
153
154 /* Allocate maximum size needed, see where it ends up.
155 * Then free it -- in this path there are no other allocators
156 * so we can depend on getting the same address back
157 * when we allocate a smaller piece that is aligned
158 * at the end (if necessary) and the piece we really want,
159 * then free the unused first piece.
160 */
161
162 tmp_size = size + alignment - PAGE_SIZE;
163 addr = (unsigned long)alloc_bootmem(tmp_size);
164 free_bootmem(__pa(addr), tmp_size);
165
166 unused_size = alignment - (addr % alignment);
167 if (unused_size)
168 unused_addr = alloc_bootmem(unused_size);
169
170 addr = (unsigned long)alloc_bootmem(size);
171 if (unused_size)
172 free_bootmem(__pa(unused_addr), unused_size);
173
174 return (void *)addr;
175}
176
Larry Bassela4414b12011-08-04 11:11:02 -0700177int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700178
Larry Bassela4414b12011-08-04 11:11:02 -0700179int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700180{
Larry Bassela7eadea2011-07-14 10:46:00 -0700181 if (!change_memory_power)
182 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700183 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700184}
185
Larry Bassela4414b12011-08-04 11:11:02 -0700186int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700187{
Larry Bassela7eadea2011-07-14 10:46:00 -0700188 if (!change_memory_power)
189 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700190 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700191}
192
Larry Bassela4414b12011-08-04 11:11:02 -0700193int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194{
Larry Bassela7eadea2011-07-14 10:46:00 -0700195 if (!change_memory_power)
196 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700197 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700198}
199
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700200char *memtype_name[] = {
201 "SMI_KERNEL",
202 "SMI",
203 "EBI0",
204 "EBI1"
205};
206
207struct reserve_info *reserve_info;
208
Larry Bassel7fb0b252011-07-22 14:18:50 -0700209static unsigned long stable_size(struct membank *mb,
210 unsigned long unstable_limit)
211{
Olav Haugan5b633022011-10-28 13:26:31 -0700212 unsigned long upper_limit = mb->start + mb->size;
213
214 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700215 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700216
217 /* Check for 32 bit roll-over */
218 if (upper_limit >= mb->start) {
219 /* If we didn't roll over we can safely make the check below */
220 if (upper_limit <= unstable_limit)
221 return mb->size;
222 }
223
Larry Bassel7fb0b252011-07-22 14:18:50 -0700224 if (mb->start >= unstable_limit)
225 return 0;
226 return unstable_limit - mb->start;
227}
228
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800229/* stable size of all memory banks contiguous to and below this one */
230static unsigned long total_stable_size(unsigned long bank)
231{
232 int i;
233 struct membank *mb = &meminfo.bank[bank];
234 int memtype = reserve_info->paddr_to_memtype(mb->start);
235 unsigned long size;
236
237 size = stable_size(mb, reserve_info->low_unstable_address);
238 for (i = bank - 1, mb = &meminfo.bank[bank - 1]; i >= 0; i--, mb--) {
239 if (mb->start + mb->size != (mb + 1)->start)
240 break;
241 if (reserve_info->paddr_to_memtype(mb->start) != memtype)
242 break;
243 size += stable_size(mb, reserve_info->low_unstable_address);
244 }
245 return size;
246}
247
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700248static void __init calculate_reserve_limits(void)
249{
250 int i;
251 struct membank *mb;
252 int memtype;
253 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700254 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700255
256 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
257 memtype = reserve_info->paddr_to_memtype(mb->start);
258 if (memtype == MEMTYPE_NONE) {
259 pr_warning("unknown memory type for bank at %lx\n",
260 (long unsigned int)mb->start);
261 continue;
262 }
263 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800264 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700265 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 }
267}
268
269static void __init adjust_reserve_sizes(void)
270{
271 int i;
272 struct memtype_reserve *mt;
273
274 mt = &reserve_info->memtype_reserve_table[0];
275 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
276 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
277 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
278 if (mt->size > mt->limit) {
279 pr_warning("%lx size for %s too large, setting to %lx\n",
280 mt->size, memtype_name[i], mt->limit);
281 mt->size = mt->limit;
282 }
283 }
284}
285
286static void __init reserve_memory_for_mempools(void)
287{
288 int i, memtype, membank_type;
289 struct memtype_reserve *mt;
290 struct membank *mb;
291 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700292 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700293
294 mt = &reserve_info->memtype_reserve_table[0];
295 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
296 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
297 continue;
298
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800299 /* We know we will find memory bank(s) of the proper size
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700300 * as we have limited the size of the memory pool for
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800301 * each memory type to the largest total size of the memory
302 * banks which are contiguous and of the correct memory type.
303 * Choose the memory bank with the highest physical
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700304 * address which is large enough, so that we will not
305 * take memory from the lowest memory bank which the kernel
306 * is in (and cause boot problems) and so that we might
307 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700308 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700309 */
310 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
311 mb = &meminfo.bank[i];
312 membank_type =
313 reserve_info->paddr_to_memtype(mb->start);
314 if (memtype != membank_type)
315 continue;
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800316 size = total_stable_size(i);
Larry Bassel7fb0b252011-07-22 14:18:50 -0700317 if (size >= mt->size) {
Larry Bassel48e4f5f2012-02-14 13:54:12 -0800318 size = stable_size(mb,
319 reserve_info->low_unstable_address);
320 /* mt->size may be larger than size, all this
321 * means is that we are carving the memory pool
322 * out of multiple contiguous memory banks.
323 */
Olav Haugan5b633022011-10-28 13:26:31 -0700324 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700325 ret = memblock_remove(mt->start, mt->size);
326 BUG_ON(ret);
327 break;
328 }
329 }
330 }
331}
332
Larry Bassel1eb58f02011-12-06 16:01:59 -0800333unsigned long __init reserve_memory_for_fmem(unsigned long fmem_size)
334{
335 struct membank *mb;
336 int ret;
337 unsigned long fmem_phys;
338
339 if (!fmem_size)
340 return 0;
341
342 mb = &meminfo.bank[meminfo.nr_banks - 1];
Olav Haugan069fa452012-02-10 15:45:23 -0800343 /*
344 * Placing fmem at the top of memory causes multimedia issues.
345 * Instead, place it 1 page below the top of memory to prevent
346 * the issues from occurring.
347 */
348 fmem_phys = mb->start + (mb->size - fmem_size) - PAGE_SIZE;
Larry Bassel1eb58f02011-12-06 16:01:59 -0800349 ret = memblock_remove(fmem_phys, fmem_size);
350 BUG_ON(ret);
351
352 pr_info("fmem start %lx size %lx\n", fmem_phys, fmem_size);
353 return fmem_phys;
354}
355
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700356static void __init initialize_mempools(void)
357{
358 struct mem_pool *mpool;
359 int memtype;
360 struct memtype_reserve *mt;
361
362 mt = &reserve_info->memtype_reserve_table[0];
363 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
364 if (!mt->size)
365 continue;
366 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
367 if (!mpool)
368 pr_warning("failed to create %s mempool\n",
369 memtype_name[memtype]);
370 }
371}
372
373void __init msm_reserve(void)
374{
375 memory_pool_init();
376 reserve_info->calculate_reserve_sizes();
377 calculate_reserve_limits();
378 adjust_reserve_sizes();
379 reserve_memory_for_mempools();
380 initialize_mempools();
381}
382
383static int get_ebi_memtype(void)
384{
385 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
386 if (cpu_is_msm7x30() || cpu_is_msm8x55())
387 return MEMTYPE_EBI0;
388 return MEMTYPE_EBI1;
389}
390
391void *allocate_contiguous_ebi(unsigned long size,
392 unsigned long align, int cached)
393{
394 return allocate_contiguous_memory(size, get_ebi_memtype(),
395 align, cached);
396}
397EXPORT_SYMBOL(allocate_contiguous_ebi);
398
399unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
400 unsigned long align)
401{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600402 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
403 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700404}
405EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
406
407/* emulation of the deprecated pmem_kalloc and pmem_kfree */
408int32_t pmem_kalloc(const size_t size, const uint32_t flags)
409{
410 int pmem_memtype;
411 int memtype = MEMTYPE_NONE;
412 int ebi1_memtype = MEMTYPE_EBI1;
413 unsigned int align;
414 int32_t paddr;
415
416 switch (flags & PMEM_ALIGNMENT_MASK) {
417 case PMEM_ALIGNMENT_4K:
418 align = SZ_4K;
419 break;
420 case PMEM_ALIGNMENT_1M:
421 align = SZ_1M;
422 break;
423 default:
424 pr_alert("Invalid alignment %x\n",
425 (flags & PMEM_ALIGNMENT_MASK));
426 return -EINVAL;
427 }
428
429 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
430 if (cpu_is_msm7x30() || cpu_is_msm8x55())
431 ebi1_memtype = MEMTYPE_EBI0;
432
433 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
434 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
435 memtype = ebi1_memtype;
436 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
437 memtype = MEMTYPE_SMI_KERNEL;
438 else {
439 pr_alert("Invalid memory type %x\n",
440 flags & PMEM_MEMTYPE_MASK);
441 return -EINVAL;
442 }
443
Jordan Crouse8c78b132011-05-26 10:27:47 -0600444 paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
445 __builtin_return_address(0));
446
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700447 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
Jordan Crouse8c78b132011-05-26 10:27:47 -0600448 paddr = _allocate_contiguous_memory_nomap(size,
449 ebi1_memtype, align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700450
451 if (!paddr)
452 return -ENOMEM;
453 return paddr;
454}
455EXPORT_SYMBOL(pmem_kalloc);
456
457int pmem_kfree(const int32_t physaddr)
458{
459 free_contiguous_memory_by_paddr(physaddr);
460
461 return 0;
462}
463EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700464
465unsigned int msm_ttbr0;
466
467void store_ttbr0(void)
468{
469 /* Store TTBR0 for post-mortem debugging purposes. */
470 asm("mrc p15, 0, %0, c2, c0, 0\n"
471 : "=r" (msm_ttbr0));
472}
Laura Abbottf637aff2011-12-14 14:16:17 -0800473
474int request_fmem_c_region(void *unused)
475{
476 return fmem_set_state(FMEM_C_STATE);
477}
478
479int release_fmem_c_region(void *unused)
480{
481 return fmem_set_state(FMEM_T_STATE);
482}