blob: 9d9e736c2b4f4afe2a90190d8b583c8e1f0c656a [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010018
Russell King0ba8b9b2008-08-10 18:08:10 +010019#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000020#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050021#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010022#include <asm/setup.h>
23#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010024#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010025#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040026#include <asm/highmem.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010027#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010028
29#include <asm/mach/arch.h>
30#include <asm/mach/map.h>
31
32#include "mm.h"
33
Russell Kingd111e8f2006-09-27 15:27:33 +010034/*
35 * empty_zero_page is a special page that is used for
36 * zero-initialized data and COW.
37 */
38struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040039EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010040
41/*
42 * The pmd table for the upper-most set of pages.
43 */
44pmd_t *top_pmd;
45
Russell Kingae8f1542006-09-27 15:38:34 +010046#define CPOLICY_UNCACHED 0
47#define CPOLICY_BUFFERED 1
48#define CPOLICY_WRITETHROUGH 2
49#define CPOLICY_WRITEBACK 3
50#define CPOLICY_WRITEALLOC 4
51
52static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
53static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010054pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010055pgprot_t pgprot_kernel;
56
Imre_Deak44b18692007-02-11 13:45:13 +010057EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010058EXPORT_SYMBOL(pgprot_kernel);
59
60struct cachepolicy {
61 const char policy[16];
62 unsigned int cr_mask;
63 unsigned int pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000064 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010065};
66
67static struct cachepolicy cache_policies[] __initdata = {
68 {
69 .policy = "uncached",
70 .cr_mask = CR_W|CR_C,
71 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010072 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010073 }, {
74 .policy = "buffered",
75 .cr_mask = CR_C,
76 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010077 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010078 }, {
79 .policy = "writethrough",
80 .cr_mask = 0,
81 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010082 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010083 }, {
84 .policy = "writeback",
85 .cr_mask = 0,
86 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010087 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010088 }, {
89 .policy = "writealloc",
90 .cr_mask = 0,
91 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010092 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010093 }
94};
95
96/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +010097 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +010098 * problems by allowing the cache or the cache and
99 * writebuffer to be turned off. (Note: the write
100 * buffer should not be on and the cache off).
101 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100102static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100103{
104 int i;
105
106 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
107 int len = strlen(cache_policies[i].policy);
108
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100109 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100110 cachepolicy = i;
111 cr_alignment &= ~cache_policies[i].cr_mask;
112 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100113 break;
114 }
115 }
116 if (i == ARRAY_SIZE(cache_policies))
117 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000118 /*
119 * This restriction is partly to do with the way we boot; it is
120 * unpredictable to have memory mapped using two different sets of
121 * memory attributes (shared, type, and cache attribs). We can not
122 * change these attributes once the initial assembly has setup the
123 * page tables.
124 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100125 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
126 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
127 cachepolicy = CPOLICY_WRITEBACK;
128 }
Russell Kingae8f1542006-09-27 15:38:34 +0100129 flush_cache_all();
130 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100131 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100132}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100133early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100134
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100135static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100136{
137 char *p = "buffered";
138 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100139 early_cachepolicy(p);
140 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100141}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100143
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100145{
146 char *p = "uncached";
147 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100148 early_cachepolicy(p);
149 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100150}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100152
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100154{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100155 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100156 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100158 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100160}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161early_param("ecc", early_ecc);
Russell Kingae8f1542006-09-27 15:38:34 +0100162
163static int __init noalign_setup(char *__unused)
164{
165 cr_alignment &= ~CR_A;
166 cr_no_alignment &= ~CR_A;
167 set_cr(cr_alignment);
168 return 1;
169}
170__setup("noalign", noalign_setup);
171
Russell King255d1f82006-12-18 00:12:47 +0000172#ifndef CONFIG_SMP
173void adjust_cr(unsigned long mask, unsigned long set)
174{
175 unsigned long flags;
176
177 mask &= ~CR_A;
178
179 set &= mask;
180
181 local_irq_save(flags);
182
183 cr_no_alignment = (cr_no_alignment & ~mask) | set;
184 cr_alignment = (cr_alignment & ~mask) | set;
185
186 set_cr((get_cr() & ~mask) | set);
187
188 local_irq_restore(flags);
189}
190#endif
191
Russell King36bb94b2010-11-16 08:40:36 +0000192#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000193#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100194
Russell Kingb29e9f52007-04-21 10:47:29 +0100195static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100196 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100197 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
198 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100199 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000200 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100201 .domain = DOMAIN_IO,
202 },
203 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100204 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100205 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000206 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100207 .domain = DOMAIN_IO,
208 },
209 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100210 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100211 .prot_l1 = PMD_TYPE_TABLE,
212 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
213 .domain = DOMAIN_IO,
214 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100215 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100216 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100217 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000218 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100219 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100220 },
Russell Kingebb4c652008-11-09 11:18:36 +0000221 [MT_UNCACHED] = {
222 .prot_pte = PROT_PTE_DEVICE,
223 .prot_l1 = PMD_TYPE_TABLE,
224 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
225 .domain = DOMAIN_IO,
226 },
Russell Kingae8f1542006-09-27 15:38:34 +0100227 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100228 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100229 .domain = DOMAIN_KERNEL,
230 },
231 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100232 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100233 .domain = DOMAIN_KERNEL,
234 },
235 [MT_LOW_VECTORS] = {
236 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000237 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100238 .prot_l1 = PMD_TYPE_TABLE,
239 .domain = DOMAIN_USER,
240 },
241 [MT_HIGH_VECTORS] = {
242 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000243 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100244 .prot_l1 = PMD_TYPE_TABLE,
245 .domain = DOMAIN_USER,
246 },
247 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000248 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100249 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100250 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .domain = DOMAIN_KERNEL,
252 },
253 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100254 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100255 .domain = DOMAIN_KERNEL,
256 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100257 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100258 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000259 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100260 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100261 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
262 .domain = DOMAIN_KERNEL,
263 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100264 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100265 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000266 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100267 .prot_l1 = PMD_TYPE_TABLE,
268 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
269 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100270 },
271 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000272 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100273 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100274 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100275 },
Russell Kingae8f1542006-09-27 15:38:34 +0100276};
277
Russell Kingb29e9f52007-04-21 10:47:29 +0100278const struct mem_type *get_mem_type(unsigned int type)
279{
280 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
281}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200282EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100283
Russell Kingae8f1542006-09-27 15:38:34 +0100284/*
285 * Adjust the PMD section entries according to the CPU in use.
286 */
287static void __init build_mem_type_table(void)
288{
289 struct cachepolicy *cp;
290 unsigned int cr = get_cr();
Russell Kingbb30f362008-09-06 20:04:59 +0100291 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100292 int cpu_arch = cpu_architecture();
293 int i;
294
Catalin Marinas11179d82007-07-20 11:42:24 +0100295 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100296#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100297 if (cachepolicy > CPOLICY_BUFFERED)
298 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100299#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100300 if (cachepolicy > CPOLICY_WRITETHROUGH)
301 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100302#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100303 }
Russell Kingae8f1542006-09-27 15:38:34 +0100304 if (cpu_arch < CPU_ARCH_ARMv5) {
305 if (cachepolicy >= CPOLICY_WRITEALLOC)
306 cachepolicy = CPOLICY_WRITEBACK;
307 ecc_mask = 0;
308 }
Russell Kingf00ec482010-09-04 10:47:48 +0100309 if (is_smp())
310 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100311
312 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000313 * Strip out features not present on earlier architectures.
314 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
315 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100316 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000317 if (cpu_arch < CPU_ARCH_ARMv5)
318 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
319 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
320 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
321 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
322 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100323
324 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000325 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
326 * "update-able on write" bit on ARM610). However, Xscale and
327 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100328 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000329 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100330 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100331 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100332 mem_types[i].prot_l1 &= ~PMD_BIT4;
333 }
334 } else if (cpu_arch < CPU_ARCH_ARMv6) {
335 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100336 if (mem_types[i].prot_l1)
337 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100338 if (mem_types[i].prot_sect)
339 mem_types[i].prot_sect |= PMD_BIT4;
340 }
341 }
Russell Kingae8f1542006-09-27 15:38:34 +0100342
Russell Kingb1cce6b2008-11-04 10:52:28 +0000343 /*
344 * Mark the device areas according to the CPU/architecture.
345 */
346 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
347 if (!cpu_is_xsc3()) {
348 /*
349 * Mark device regions on ARMv6+ as execute-never
350 * to prevent speculative instruction fetches.
351 */
352 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
353 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
354 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
355 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
356 }
357 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
358 /*
359 * For ARMv7 with TEX remapping,
360 * - shared device is SXCB=1100
361 * - nonshared device is SXCB=0100
362 * - write combine device mem is SXCB=0001
363 * (Uncached Normal memory)
364 */
365 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
366 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
367 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
368 } else if (cpu_is_xsc3()) {
369 /*
370 * For Xscale3,
371 * - shared device is TEXCB=00101
372 * - nonshared device is TEXCB=01000
373 * - write combine device mem is TEXCB=00100
374 * (Inner/Outer Uncacheable in xsc3 parlance)
375 */
376 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
377 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
378 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
379 } else {
380 /*
381 * For ARMv6 and ARMv7 without TEX remapping,
382 * - shared device is TEXCB=00001
383 * - nonshared device is TEXCB=01000
384 * - write combine device mem is TEXCB=00100
385 * (Uncached Normal in ARMv6 parlance).
386 */
387 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
388 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
389 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
390 }
391 } else {
392 /*
393 * On others, write combining is "Uncached/Buffered"
394 */
395 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
396 }
397
398 /*
399 * Now deal with the memory-type mappings
400 */
Russell Kingae8f1542006-09-27 15:38:34 +0100401 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100402 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
403
Russell Kingbb30f362008-09-06 20:04:59 +0100404 /*
405 * Only use write-through for non-SMP systems
406 */
Russell Kingf00ec482010-09-04 10:47:48 +0100407 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100408 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100409
410 /*
411 * Enable CPU-specific coherency if supported.
412 * (Only available on XSC3 at the moment.)
413 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100414 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000415 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100416 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
417 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
418 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
419 }
Russell Kingae8f1542006-09-27 15:38:34 +0100420 /*
421 * ARMv6 and above have extended page tables.
422 */
423 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
424 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100425 * Mark cache clean areas and XIP ROM read only
426 * from SVC mode and no access from userspace.
427 */
428 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
429 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
430 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
431
Russell Kingf00ec482010-09-04 10:47:48 +0100432 if (is_smp()) {
433 /*
434 * Mark memory with the "shared" attribute
435 * for SMP systems
436 */
437 user_pgprot |= L_PTE_SHARED;
438 kern_pgprot |= L_PTE_SHARED;
439 vecs_pgprot |= L_PTE_SHARED;
440 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
441 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
442 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
443 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
444 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
445 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
446 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
447 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
448 }
Russell Kingae8f1542006-09-27 15:38:34 +0100449 }
450
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100451 /*
452 * Non-cacheable Normal - intended for memory areas that must
453 * not cause dirty cache line writebacks when used
454 */
455 if (cpu_arch >= CPU_ARCH_ARMv6) {
456 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
457 /* Non-cacheable Normal is XCB = 001 */
458 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
459 PMD_SECT_BUFFERED;
460 } else {
461 /* For both ARMv6 and non-TEX-remapping ARMv7 */
462 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
463 PMD_SECT_TEX(1);
464 }
465 } else {
466 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
467 }
468
Russell Kingae8f1542006-09-27 15:38:34 +0100469 for (i = 0; i < 16; i++) {
470 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100471 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100472 }
473
Russell Kingbb30f362008-09-06 20:04:59 +0100474 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
475 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100476
Imre_Deak44b18692007-02-11 13:45:13 +0100477 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100478 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000479 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100480
481 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
482 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
483 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100484 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
485 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100486 mem_types[MT_ROM].prot_sect |= cp->pmd;
487
488 switch (cp->pmd) {
489 case PMD_SECT_WT:
490 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
491 break;
492 case PMD_SECT_WB:
493 case PMD_SECT_WBWA:
494 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
495 break;
496 }
497 printk("Memory policy: ECC %sabled, Data cache %s\n",
498 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100499
500 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
501 struct mem_type *t = &mem_types[i];
502 if (t->prot_l1)
503 t->prot_l1 |= PMD_DOMAIN(t->domain);
504 if (t->prot_sect)
505 t->prot_sect |= PMD_DOMAIN(t->domain);
506 }
Russell Kingae8f1542006-09-27 15:38:34 +0100507}
508
Catalin Marinasd9073872010-09-13 16:01:24 +0100509#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
510pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
511 unsigned long size, pgprot_t vma_prot)
512{
513 if (!pfn_valid(pfn))
514 return pgprot_noncached(vma_prot);
515 else if (file->f_flags & O_SYNC)
516 return pgprot_writecombine(vma_prot);
517 return vma_prot;
518}
519EXPORT_SYMBOL(phys_mem_access_prot);
520#endif
521
Russell Kingae8f1542006-09-27 15:38:34 +0100522#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
523
Russell King3abe9d32010-03-25 17:02:59 +0000524static void __init *early_alloc(unsigned long sz)
525{
Russell King2778f622010-07-09 16:27:52 +0100526 void *ptr = __va(memblock_alloc(sz, sz));
527 memset(ptr, 0, sz);
528 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000529}
530
Russell King4bb2e272010-07-01 18:33:29 +0100531static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
532{
533 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100534 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000535 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100536 }
537 BUG_ON(pmd_bad(*pmd));
538 return pte_offset_kernel(pmd, addr);
539}
540
Russell King24e6c692007-04-21 10:21:28 +0100541static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
542 unsigned long end, unsigned long pfn,
543 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100544{
Russell King4bb2e272010-07-01 18:33:29 +0100545 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100546 do {
Russell King40d192b2008-09-06 21:15:56 +0100547 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100548 pfn++;
549 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100550}
551
Russell King516295e2010-11-21 16:27:49 +0000552static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000553 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100554 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100555{
Russell King516295e2010-11-21 16:27:49 +0000556 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100557
Russell King24e6c692007-04-21 10:21:28 +0100558 /*
559 * Try a section mapping - end, addr and phys must all be aligned
560 * to a section boundary. Note that PMDs refer to the individual
561 * L1 entries, whereas PGDs refer to a group of L1 entries making
562 * up one logical pointer to an L2 table.
563 */
564 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
565 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100566
Russell King24e6c692007-04-21 10:21:28 +0100567 if (addr & SECTION_SIZE)
568 pmd++;
569
570 do {
571 *pmd = __pmd(phys | type->prot_sect);
572 phys += SECTION_SIZE;
573 } while (pmd++, addr += SECTION_SIZE, addr != end);
574
575 flush_pmd_entry(p);
576 } else {
577 /*
578 * No need to loop; pte's aren't interested in the
579 * individual L1 entries.
580 */
581 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100582 }
Russell Kingae8f1542006-09-27 15:38:34 +0100583}
584
Russell King516295e2010-11-21 16:27:49 +0000585static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
586 unsigned long phys, const struct mem_type *type)
587{
588 pud_t *pud = pud_offset(pgd, addr);
589 unsigned long next;
590
591 do {
592 next = pud_addr_end(addr, end);
593 alloc_init_section(pud, addr, next, phys, type);
594 phys += next - addr;
595 } while (pud++, addr = next, addr != end);
596}
597
Russell King4a56c1e2007-04-21 10:16:48 +0100598static void __init create_36bit_mapping(struct map_desc *md,
599 const struct mem_type *type)
600{
Russell King97092e02010-11-16 00:16:01 +0000601 unsigned long addr, length, end;
602 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100603 pgd_t *pgd;
604
605 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100606 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100607 length = PAGE_ALIGN(md->length);
608
609 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
610 printk(KERN_ERR "MM: CPU does not support supersection "
611 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100612 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100613 return;
614 }
615
616 /* N.B. ARMv6 supersections are only defined to work with domain 0.
617 * Since domain assignments can in fact be arbitrary, the
618 * 'domain == 0' check below is required to insure that ARMv6
619 * supersections are only allocated for domain 0 regardless
620 * of the actual domain assignments in use.
621 */
622 if (type->domain) {
623 printk(KERN_ERR "MM: invalid domain in supersection "
624 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100625 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100626 return;
627 }
628
629 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100630 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
631 " at 0x%08lx invalid alignment\n",
632 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100633 return;
634 }
635
636 /*
637 * Shift bits [35:32] of address into bits [23:20] of PMD
638 * (See ARMv6 spec).
639 */
640 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
641
642 pgd = pgd_offset_k(addr);
643 end = addr + length;
644 do {
Russell King516295e2010-11-21 16:27:49 +0000645 pud_t *pud = pud_offset(pgd, addr);
646 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100647 int i;
648
649 for (i = 0; i < 16; i++)
650 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
651
652 addr += SUPERSECTION_SIZE;
653 phys += SUPERSECTION_SIZE;
654 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
655 } while (addr != end);
656}
657
Russell Kingae8f1542006-09-27 15:38:34 +0100658/*
659 * Create the page directory entries and any necessary
660 * page tables for the mapping specified by `md'. We
661 * are able to cope here with varying sizes and address
662 * offsets, and we take full advantage of sections and
663 * supersections.
664 */
Russell Kinga2227122010-03-25 18:56:05 +0000665static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100666{
Will Deaconcae62922011-02-15 12:42:57 +0100667 unsigned long addr, length, end;
668 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100669 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100670 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100671
672 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100673 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
674 " at 0x%08lx in user region\n",
675 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100676 return;
677 }
678
679 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
680 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
Will Deacon29a38192011-02-15 14:31:37 +0100681 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
682 " at 0x%08lx overlaps vmalloc space\n",
683 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100684 }
685
Russell Kingd5c98172007-04-21 10:05:32 +0100686 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100687
688 /*
689 * Catch 36-bit addresses
690 */
Russell King4a56c1e2007-04-21 10:16:48 +0100691 if (md->pfn >= 0x100000) {
692 create_36bit_mapping(md, type);
693 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100694 }
695
Russell King7b9c7b42007-07-04 21:16:33 +0100696 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100697 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100698 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100699
Russell King24e6c692007-04-21 10:21:28 +0100700 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100701 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100702 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100703 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100704 return;
705 }
706
Russell King24e6c692007-04-21 10:21:28 +0100707 pgd = pgd_offset_k(addr);
708 end = addr + length;
709 do {
710 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100711
Russell King516295e2010-11-21 16:27:49 +0000712 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100713
Russell King24e6c692007-04-21 10:21:28 +0100714 phys += next - addr;
715 addr = next;
716 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100717}
718
719/*
720 * Create the architecture specific mappings
721 */
722void __init iotable_init(struct map_desc *io_desc, int nr)
723{
724 int i;
725
726 for (i = 0; i < nr; i++)
727 create_mapping(io_desc + i);
728}
729
Russell King79612392010-05-22 16:20:14 +0100730static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
Russell King6c5da7a2008-09-30 19:31:44 +0100731
732/*
733 * vmalloc=size forces the vmalloc area to be exactly 'size'
734 * bytes. This can be used to increase (or decrease) the vmalloc
735 * area - the default is 128m.
736 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100737static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100738{
Russell King79612392010-05-22 16:20:14 +0100739 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100740
741 if (vmalloc_reserve < SZ_16M) {
742 vmalloc_reserve = SZ_16M;
743 printk(KERN_WARNING
744 "vmalloc area too small, limiting to %luMB\n",
745 vmalloc_reserve >> 20);
746 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400747
748 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
749 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
750 printk(KERN_WARNING
751 "vmalloc area is too big, limiting to %luMB\n",
752 vmalloc_reserve >> 20);
753 }
Russell King79612392010-05-22 16:20:14 +0100754
755 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100756 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100757}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100758early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100759
Russell King8df65162010-10-27 19:57:38 +0100760static phys_addr_t lowmem_limit __initdata = 0;
761
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400762static void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200763{
Russell Kingdde58282009-08-15 12:36:00 +0100764 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200765
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400766 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400767 struct membank *bank = &meminfo.bank[j];
768 *bank = meminfo.bank[i];
769
770#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100771 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100772 __va(bank->start) < (void *)PAGE_OFFSET)
773 highmem = 1;
774
775 bank->highmem = highmem;
776
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400777 /*
778 * Split those memory banks which are partially overlapping
779 * the vmalloc area greatly simplifying things later.
780 */
Russell King79612392010-05-22 16:20:14 +0100781 if (__va(bank->start) < vmalloc_min &&
782 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400783 if (meminfo.nr_banks >= NR_BANKS) {
784 printk(KERN_CRIT "NR_BANKS too low, "
785 "ignoring high memory\n");
786 } else {
787 memmove(bank + 1, bank,
788 (meminfo.nr_banks - i) * sizeof(*bank));
789 meminfo.nr_banks++;
790 i++;
Russell King79612392010-05-22 16:20:14 +0100791 bank[1].size -= vmalloc_min - __va(bank->start);
792 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100793 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400794 j++;
795 }
Russell King79612392010-05-22 16:20:14 +0100796 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400797 }
798#else
Russell King041d7852009-09-27 17:40:42 +0100799 bank->highmem = highmem;
800
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400801 /*
802 * Check whether this memory bank would entirely overlap
803 * the vmalloc area.
804 */
Russell King79612392010-05-22 16:20:14 +0100805 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f92009-03-28 19:18:05 +0100806 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +0000807 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400808 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +0000809 (unsigned long long)bank->start,
810 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400811 continue;
812 }
813
814 /*
815 * Check whether this memory bank would partially overlap
816 * the vmalloc area.
817 */
Russell King79612392010-05-22 16:20:14 +0100818 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400819 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100820 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +0000821 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
822 "to -%.8llx (vmalloc region overlap).\n",
823 (unsigned long long)bank->start,
824 (unsigned long long)bank->start + bank->size - 1,
825 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400826 bank->size = newsize;
827 }
828#endif
Will Deacon40f7bfe2011-05-19 13:22:48 +0100829 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
830 lowmem_limit = bank->start + bank->size;
831
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400832 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200833 }
Russell Kinge616c592009-09-27 20:55:43 +0100834#ifdef CONFIG_HIGHMEM
835 if (highmem) {
836 const char *reason = NULL;
837
838 if (cache_is_vipt_aliasing()) {
839 /*
840 * Interactions between kmap and other mappings
841 * make highmem support with aliasing VIPT caches
842 * rather difficult.
843 */
844 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +0100845 }
846 if (reason) {
847 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
848 reason);
849 while (j > 0 && meminfo.bank[j - 1].highmem)
850 j--;
851 }
852 }
853#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400854 meminfo.nr_banks = j;
Will Deacon40f7bfe2011-05-19 13:22:48 +0100855 memblock_set_current_limit(lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200856}
857
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400858static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100859{
860 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +0100861 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +0100862
863 /*
864 * Clear out all the mappings below the kernel image.
865 */
Russell Kingab4f2ee2008-11-06 17:11:07 +0000866 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100867 pmd_clear(pmd_off_k(addr));
868
869#ifdef CONFIG_XIP_KERNEL
870 /* The XIP kernel is mapped in the module area -- skip over it */
Russell King37efe642008-12-01 11:53:07 +0000871 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100872#endif
873 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
874 pmd_clear(pmd_off_k(addr));
875
876 /*
Russell King8df65162010-10-27 19:57:38 +0100877 * Find the end of the first block of lowmem.
878 */
879 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
880 if (end >= lowmem_limit)
881 end = lowmem_limit;
882
883 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100884 * Clear out all the kernel space mappings, except for the first
885 * memory bank, up to the end of the vmalloc region.
886 */
Russell King8df65162010-10-27 19:57:38 +0100887 for (addr = __phys_to_virt(end);
Russell Kingd111e8f2006-09-27 15:27:33 +0100888 addr < VMALLOC_END; addr += PGDIR_SIZE)
889 pmd_clear(pmd_off_k(addr));
890}
891
892/*
Russell King2778f622010-07-09 16:27:52 +0100893 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +0100894 */
Russell King2778f622010-07-09 16:27:52 +0100895void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100896{
Russell Kingd111e8f2006-09-27 15:27:33 +0100897 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100898 * Reserve the page tables. These are already in use,
899 * and can only be in node 0.
900 */
Russell King2778f622010-07-09 16:27:52 +0100901 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
Russell Kingd111e8f2006-09-27 15:27:33 +0100902
Russell Kingd111e8f2006-09-27 15:27:33 +0100903#ifdef CONFIG_SA1111
904 /*
905 * Because of the SA1111 DMA bug, we want to preserve our
906 * precious DMA-able memory...
907 */
Russell King2778f622010-07-09 16:27:52 +0100908 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +0100909#endif
Russell Kingd111e8f2006-09-27 15:27:33 +0100910}
911
912/*
913 * Set up device the mappings. Since we clear out the page tables for all
914 * mappings above VMALLOC_END, we will remove any debug device mappings.
915 * This means you have to be careful how you debug this function, or any
916 * called function. This means you can't use any function or debugging
917 * method which may touch any device, otherwise the kernel _will_ crash.
918 */
919static void __init devicemaps_init(struct machine_desc *mdesc)
920{
921 struct map_desc map;
922 unsigned long addr;
Russell Kingd111e8f2006-09-27 15:27:33 +0100923
924 /*
925 * Allocate the vector page early.
926 */
Catalin Marinas247055a2010-09-13 16:03:21 +0100927 vectors_page = early_alloc(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100928
929 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
930 pmd_clear(pmd_off_k(addr));
931
932 /*
933 * Map the kernel if it is XIP.
934 * It is always first in the modulearea.
935 */
936#ifdef CONFIG_XIP_KERNEL
937 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +0000938 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +0000939 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100940 map.type = MT_ROM;
941 create_mapping(&map);
942#endif
943
944 /*
945 * Map the cache flushing regions.
946 */
947#ifdef FLUSH_BASE
948 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
949 map.virtual = FLUSH_BASE;
950 map.length = SZ_1M;
951 map.type = MT_CACHECLEAN;
952 create_mapping(&map);
953#endif
954#ifdef FLUSH_BASE_MINICACHE
955 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
956 map.virtual = FLUSH_BASE_MINICACHE;
957 map.length = SZ_1M;
958 map.type = MT_MINICLEAN;
959 create_mapping(&map);
960#endif
961
962 /*
963 * Create a mapping for the machine vectors at the high-vectors
964 * location (0xffff0000). If we aren't using high-vectors, also
965 * create a mapping at the low-vectors virtual address.
966 */
Catalin Marinas247055a2010-09-13 16:03:21 +0100967 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
Russell Kingd111e8f2006-09-27 15:27:33 +0100968 map.virtual = 0xffff0000;
969 map.length = PAGE_SIZE;
970 map.type = MT_HIGH_VECTORS;
971 create_mapping(&map);
972
973 if (!vectors_high()) {
974 map.virtual = 0;
975 map.type = MT_LOW_VECTORS;
976 create_mapping(&map);
977 }
978
979 /*
980 * Ask the machine support to map in the statically mapped devices.
981 */
982 if (mdesc->map_io)
983 mdesc->map_io();
984
985 /*
986 * Finally flush the caches and tlb to ensure that we're in a
987 * consistent state wrt the writebuffer. This also ensures that
988 * any write-allocated cache lines in the vector page are written
989 * back. After this point, we can start to touch devices again.
990 */
991 local_flush_tlb_all();
992 flush_cache_all();
993}
994
Nicolas Pitred73cd422008-09-15 16:44:55 -0400995static void __init kmap_init(void)
996{
997#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +0100998 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
999 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001000#endif
1001}
1002
Russell Kinga2227122010-03-25 18:56:05 +00001003static void __init map_lowmem(void)
1004{
Russell King8df65162010-10-27 19:57:38 +01001005 struct memblock_region *reg;
Russell Kinga2227122010-03-25 18:56:05 +00001006
1007 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001008 for_each_memblock(memory, reg) {
1009 phys_addr_t start = reg->base;
1010 phys_addr_t end = start + reg->size;
1011 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001012
Russell King8df65162010-10-27 19:57:38 +01001013 if (end > lowmem_limit)
1014 end = lowmem_limit;
1015 if (start >= end)
1016 break;
1017
1018 map.pfn = __phys_to_pfn(start);
1019 map.virtual = __phys_to_virt(start);
1020 map.length = end - start;
1021 map.type = MT_MEMORY;
1022
1023 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001024 }
1025}
1026
Russell Kingd111e8f2006-09-27 15:27:33 +01001027/*
1028 * paging_init() sets up the page tables, initialises the zone memory
1029 * maps, and sets up the zero page, bad page and bad page tables.
1030 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001031void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001032{
1033 void *zero_page;
1034
1035 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001036 sanity_check_meminfo();
1037 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001038 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001039 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001040 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001041
1042 top_pmd = pmd_off_k(0xffff0000);
1043
Russell King3abe9d32010-03-25 17:02:59 +00001044 /* allocate the zero page. */
1045 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001046
Russell King8d717a52010-05-22 19:47:18 +01001047 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001048
Russell Kingd111e8f2006-09-27 15:27:33 +01001049 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001050 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001051}