blob: 00f315de67b959d5028068c2d6bcd828de2fa19b [file] [log] [blame]
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -07001/* arch/arm/mach-msm/memory.c
2 *
3 * Copyright (C) 2007 Google, Inc.
4 * Copyright (c) 2009-2011, Code Aurora Forum. All rights reserved.
5 *
6 * This software is licensed under the terms of the GNU General Public
7 * License version 2, as published by the Free Software Foundation, and
8 * may be copied, distributed, and modified under those terms.
9 *
10 * This program is distributed in the hope that it will be useful,
11 * but WITHOUT ANY WARRANTY; without even the implied warranty of
12 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
13 * GNU General Public License for more details.
14 *
15 */
16
17#include <linux/mm.h>
18#include <linux/mm_types.h>
19#include <linux/bootmem.h>
20#include <linux/module.h>
21#include <linux/memory_alloc.h>
22#include <linux/memblock.h>
23#include <asm/pgtable.h>
24#include <asm/io.h>
25#include <asm/mach/map.h>
26#include <asm/cacheflush.h>
27#include <asm/setup.h>
28#include <asm/mach-types.h>
29#include <mach/msm_memtypes.h>
30#include <linux/hardirq.h>
31#if defined(CONFIG_MSM_NPA_REMOTE)
32#include "npa_remote.h"
33#include <linux/completion.h>
34#include <linux/err.h>
35#endif
36#include <linux/android_pmem.h>
37#include <mach/msm_iomap.h>
38#include <mach/socinfo.h>
39#include <../../mm/mm.h>
40
41int arch_io_remap_pfn_range(struct vm_area_struct *vma, unsigned long addr,
42 unsigned long pfn, unsigned long size, pgprot_t prot)
43{
44 unsigned long pfn_addr = pfn << PAGE_SHIFT;
45 if ((pfn_addr >= 0x88000000) && (pfn_addr < 0xD0000000)) {
46 prot = pgprot_device(prot);
47 pr_debug("remapping device %lx\n", prot);
48 }
49 return remap_pfn_range(vma, addr, pfn, size, prot);
50}
51
52void *strongly_ordered_page;
53char strongly_ordered_mem[PAGE_SIZE*2-4];
54
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -070055void map_page_strongly_ordered(void)
56{
57#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
58 long unsigned int phys;
59 struct map_desc map;
60
61 if (strongly_ordered_page)
62 return;
63
64 strongly_ordered_page = (void*)PFN_ALIGN((int)&strongly_ordered_mem);
65 phys = __pa(strongly_ordered_page);
66
67 map.pfn = __phys_to_pfn(phys);
68 map.virtual = MSM_STRONGLY_ORDERED_PAGE;
69 map.length = PAGE_SIZE;
70 map.type = MT_DEVICE_STRONGLY_ORDERED;
71 create_mapping(&map);
72
73 printk(KERN_ALERT "Initialized strongly ordered page successfully\n");
74#endif
75}
76EXPORT_SYMBOL(map_page_strongly_ordered);
77
78void write_to_strongly_ordered_memory(void)
79{
80#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
81 if (!strongly_ordered_page) {
82 if (!in_interrupt())
83 map_page_strongly_ordered();
84 else {
85 printk(KERN_ALERT "Cannot map strongly ordered page in "
86 "Interrupt Context\n");
87 /* capture it here before the allocation fails later */
88 BUG();
89 }
90 }
91 *(int *)MSM_STRONGLY_ORDERED_PAGE = 0;
92#endif
93}
94EXPORT_SYMBOL(write_to_strongly_ordered_memory);
95
96void flush_axi_bus_buffer(void)
97{
98#if defined(CONFIG_ARCH_MSM7X27) && !defined(CONFIG_ARCH_MSM7X27A)
99 __asm__ __volatile__ ("mcr p15, 0, %0, c7, c10, 5" \
100 : : "r" (0) : "memory");
101 write_to_strongly_ordered_memory();
102#endif
103}
104
105#define CACHE_LINE_SIZE 32
106
107/* These cache related routines make the assumption that the associated
108 * physical memory is contiguous. They will operate on all (L1
109 * and L2 if present) caches.
110 */
111void clean_and_invalidate_caches(unsigned long vstart,
112 unsigned long length, unsigned long pstart)
113{
114 unsigned long vaddr;
115
116 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
117 asm ("mcr p15, 0, %0, c7, c14, 1" : : "r" (vaddr));
118#ifdef CONFIG_OUTER_CACHE
119 outer_flush_range(pstart, pstart + length);
120#endif
121 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
122 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
123
124 flush_axi_bus_buffer();
125}
126
127void clean_caches(unsigned long vstart,
128 unsigned long length, unsigned long pstart)
129{
130 unsigned long vaddr;
131
132 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
133 asm ("mcr p15, 0, %0, c7, c10, 1" : : "r" (vaddr));
134#ifdef CONFIG_OUTER_CACHE
135 outer_clean_range(pstart, pstart + length);
136#endif
137 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
138 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
139
140 flush_axi_bus_buffer();
141}
142
143void invalidate_caches(unsigned long vstart,
144 unsigned long length, unsigned long pstart)
145{
146 unsigned long vaddr;
147
148 for (vaddr = vstart; vaddr < vstart + length; vaddr += CACHE_LINE_SIZE)
149 asm ("mcr p15, 0, %0, c7, c6, 1" : : "r" (vaddr));
150#ifdef CONFIG_OUTER_CACHE
151 outer_inv_range(pstart, pstart + length);
152#endif
153 asm ("mcr p15, 0, %0, c7, c10, 4" : : "r" (0));
154 asm ("mcr p15, 0, %0, c7, c5, 0" : : "r" (0));
155
156 flush_axi_bus_buffer();
157}
158
159void *alloc_bootmem_aligned(unsigned long size, unsigned long alignment)
160{
161 void *unused_addr = NULL;
162 unsigned long addr, tmp_size, unused_size;
163
164 /* Allocate maximum size needed, see where it ends up.
165 * Then free it -- in this path there are no other allocators
166 * so we can depend on getting the same address back
167 * when we allocate a smaller piece that is aligned
168 * at the end (if necessary) and the piece we really want,
169 * then free the unused first piece.
170 */
171
172 tmp_size = size + alignment - PAGE_SIZE;
173 addr = (unsigned long)alloc_bootmem(tmp_size);
174 free_bootmem(__pa(addr), tmp_size);
175
176 unused_size = alignment - (addr % alignment);
177 if (unused_size)
178 unused_addr = alloc_bootmem(unused_size);
179
180 addr = (unsigned long)alloc_bootmem(size);
181 if (unused_size)
182 free_bootmem(__pa(unused_addr), unused_size);
183
184 return (void *)addr;
185}
186
Larry Bassela4414b12011-08-04 11:11:02 -0700187int (*change_memory_power)(u64, u64, int);
Larry Bassela7eadea2011-07-14 10:46:00 -0700188
Larry Bassela4414b12011-08-04 11:11:02 -0700189int platform_physical_remove_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700190{
Larry Bassela7eadea2011-07-14 10:46:00 -0700191 if (!change_memory_power)
192 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700193 return change_memory_power(start, size, MEMORY_DEEP_POWERDOWN);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700194}
195
Larry Bassela4414b12011-08-04 11:11:02 -0700196int platform_physical_active_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700197{
Larry Bassela7eadea2011-07-14 10:46:00 -0700198 if (!change_memory_power)
199 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700200 return change_memory_power(start, size, MEMORY_ACTIVE);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700201}
202
Larry Bassela4414b12011-08-04 11:11:02 -0700203int platform_physical_low_power_pages(u64 start, u64 size)
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700204{
Larry Bassela7eadea2011-07-14 10:46:00 -0700205 if (!change_memory_power)
206 return 0;
Larry Bassela4414b12011-08-04 11:11:02 -0700207 return change_memory_power(start, size, MEMORY_SELF_REFRESH);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700208}
209
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700210char *memtype_name[] = {
211 "SMI_KERNEL",
212 "SMI",
213 "EBI0",
214 "EBI1"
215};
216
217struct reserve_info *reserve_info;
218
Larry Bassel7fb0b252011-07-22 14:18:50 -0700219static unsigned long stable_size(struct membank *mb,
220 unsigned long unstable_limit)
221{
Olav Haugan5b633022011-10-28 13:26:31 -0700222 unsigned long upper_limit = mb->start + mb->size;
223
224 if (!unstable_limit)
Larry Bassel7fb0b252011-07-22 14:18:50 -0700225 return mb->size;
Olav Haugan5b633022011-10-28 13:26:31 -0700226
227 /* Check for 32 bit roll-over */
228 if (upper_limit >= mb->start) {
229 /* If we didn't roll over we can safely make the check below */
230 if (upper_limit <= unstable_limit)
231 return mb->size;
232 }
233
Larry Bassel7fb0b252011-07-22 14:18:50 -0700234 if (mb->start >= unstable_limit)
235 return 0;
236 return unstable_limit - mb->start;
237}
238
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700239static void __init calculate_reserve_limits(void)
240{
241 int i;
242 struct membank *mb;
243 int memtype;
244 struct memtype_reserve *mt;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700245 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700246
247 for (i = 0, mb = &meminfo.bank[0]; i < meminfo.nr_banks; i++, mb++) {
248 memtype = reserve_info->paddr_to_memtype(mb->start);
249 if (memtype == MEMTYPE_NONE) {
250 pr_warning("unknown memory type for bank at %lx\n",
251 (long unsigned int)mb->start);
252 continue;
253 }
254 mt = &reserve_info->memtype_reserve_table[memtype];
Larry Bassel7fb0b252011-07-22 14:18:50 -0700255 size = stable_size(mb, reserve_info->low_unstable_address);
256 mt->limit = max(mt->limit, size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700257 }
258}
259
260static void __init adjust_reserve_sizes(void)
261{
262 int i;
263 struct memtype_reserve *mt;
264
265 mt = &reserve_info->memtype_reserve_table[0];
266 for (i = 0; i < MEMTYPE_MAX; i++, mt++) {
267 if (mt->flags & MEMTYPE_FLAGS_1M_ALIGN)
268 mt->size = (mt->size + SECTION_SIZE - 1) & SECTION_MASK;
269 if (mt->size > mt->limit) {
270 pr_warning("%lx size for %s too large, setting to %lx\n",
271 mt->size, memtype_name[i], mt->limit);
272 mt->size = mt->limit;
273 }
274 }
275}
276
277static void __init reserve_memory_for_mempools(void)
278{
279 int i, memtype, membank_type;
280 struct memtype_reserve *mt;
281 struct membank *mb;
282 int ret;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700283 unsigned long size;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700284
285 mt = &reserve_info->memtype_reserve_table[0];
286 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
287 if (mt->flags & MEMTYPE_FLAGS_FIXED || !mt->size)
288 continue;
289
290 /* We know we will find a memory bank of the proper size
291 * as we have limited the size of the memory pool for
292 * each memory type to the size of the largest memory
293 * bank. Choose the memory bank with the highest physical
294 * address which is large enough, so that we will not
295 * take memory from the lowest memory bank which the kernel
296 * is in (and cause boot problems) and so that we might
297 * be able to steal memory that would otherwise become
Larry Bassel7fb0b252011-07-22 14:18:50 -0700298 * highmem. However, do not use unstable memory.
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700299 */
300 for (i = meminfo.nr_banks - 1; i >= 0; i--) {
301 mb = &meminfo.bank[i];
302 membank_type =
303 reserve_info->paddr_to_memtype(mb->start);
304 if (memtype != membank_type)
305 continue;
Larry Bassel7fb0b252011-07-22 14:18:50 -0700306 size = stable_size(mb,
307 reserve_info->low_unstable_address);
308 if (size >= mt->size) {
Olav Haugan5b633022011-10-28 13:26:31 -0700309 mt->start = mb->start + (size - mt->size);
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700310 ret = memblock_remove(mt->start, mt->size);
311 BUG_ON(ret);
312 break;
313 }
314 }
315 }
316}
317
318static void __init initialize_mempools(void)
319{
320 struct mem_pool *mpool;
321 int memtype;
322 struct memtype_reserve *mt;
323
324 mt = &reserve_info->memtype_reserve_table[0];
325 for (memtype = 0; memtype < MEMTYPE_MAX; memtype++, mt++) {
326 if (!mt->size)
327 continue;
328 mpool = initialize_memory_pool(mt->start, mt->size, memtype);
329 if (!mpool)
330 pr_warning("failed to create %s mempool\n",
331 memtype_name[memtype]);
332 }
333}
334
335void __init msm_reserve(void)
336{
337 memory_pool_init();
338 reserve_info->calculate_reserve_sizes();
339 calculate_reserve_limits();
340 adjust_reserve_sizes();
341 reserve_memory_for_mempools();
342 initialize_mempools();
343}
344
345static int get_ebi_memtype(void)
346{
347 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
348 if (cpu_is_msm7x30() || cpu_is_msm8x55())
349 return MEMTYPE_EBI0;
350 return MEMTYPE_EBI1;
351}
352
353void *allocate_contiguous_ebi(unsigned long size,
354 unsigned long align, int cached)
355{
356 return allocate_contiguous_memory(size, get_ebi_memtype(),
357 align, cached);
358}
359EXPORT_SYMBOL(allocate_contiguous_ebi);
360
361unsigned long allocate_contiguous_ebi_nomap(unsigned long size,
362 unsigned long align)
363{
Jordan Crouse8c78b132011-05-26 10:27:47 -0600364 return _allocate_contiguous_memory_nomap(size, get_ebi_memtype(),
365 align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700366}
367EXPORT_SYMBOL(allocate_contiguous_ebi_nomap);
368
369/* emulation of the deprecated pmem_kalloc and pmem_kfree */
370int32_t pmem_kalloc(const size_t size, const uint32_t flags)
371{
372 int pmem_memtype;
373 int memtype = MEMTYPE_NONE;
374 int ebi1_memtype = MEMTYPE_EBI1;
375 unsigned int align;
376 int32_t paddr;
377
378 switch (flags & PMEM_ALIGNMENT_MASK) {
379 case PMEM_ALIGNMENT_4K:
380 align = SZ_4K;
381 break;
382 case PMEM_ALIGNMENT_1M:
383 align = SZ_1M;
384 break;
385 default:
386 pr_alert("Invalid alignment %x\n",
387 (flags & PMEM_ALIGNMENT_MASK));
388 return -EINVAL;
389 }
390
391 /* on 7x30 and 8x55 "EBI1 kernel PMEM" is really on EBI0 */
392 if (cpu_is_msm7x30() || cpu_is_msm8x55())
393 ebi1_memtype = MEMTYPE_EBI0;
394
395 pmem_memtype = flags & PMEM_MEMTYPE_MASK;
396 if (pmem_memtype == PMEM_MEMTYPE_EBI1)
397 memtype = ebi1_memtype;
398 else if (pmem_memtype == PMEM_MEMTYPE_SMI)
399 memtype = MEMTYPE_SMI_KERNEL;
400 else {
401 pr_alert("Invalid memory type %x\n",
402 flags & PMEM_MEMTYPE_MASK);
403 return -EINVAL;
404 }
405
Jordan Crouse8c78b132011-05-26 10:27:47 -0600406 paddr = _allocate_contiguous_memory_nomap(size, memtype, align,
407 __builtin_return_address(0));
408
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700409 if (!paddr && pmem_memtype == PMEM_MEMTYPE_SMI)
Jordan Crouse8c78b132011-05-26 10:27:47 -0600410 paddr = _allocate_contiguous_memory_nomap(size,
411 ebi1_memtype, align, __builtin_return_address(0));
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700412
413 if (!paddr)
414 return -ENOMEM;
415 return paddr;
416}
417EXPORT_SYMBOL(pmem_kalloc);
418
419int pmem_kfree(const int32_t physaddr)
420{
421 free_contiguous_memory_by_paddr(physaddr);
422
423 return 0;
424}
425EXPORT_SYMBOL(pmem_kfree);
Vikram Mulukutlac39c6092011-07-29 18:36:35 -0700426
427unsigned int msm_ttbr0;
428
429void store_ttbr0(void)
430{
431 /* Store TTBR0 for post-mortem debugging purposes. */
432 asm("mrc p15, 0, %0, c2, c0, 0\n"
433 : "=r" (msm_ttbr0));
434}