blob: a486bd0d97dca2ed4c9fa2eee98c22575bdf62a3 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Russell Kingceb683d2010-03-25 18:47:20 +000017#include <linux/sort.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010018#include <linux/fs.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King0ba8b9b2008-08-10 18:08:10 +010020#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000021#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050022#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010023#include <asm/setup.h>
24#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010025#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010026#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040027#include <asm/highmem.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010028
29#include <asm/mach/arch.h>
30#include <asm/mach/map.h>
31
32#include "mm.h"
33
34DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
35
Russell Kingd111e8f2006-09-27 15:27:33 +010036/*
37 * empty_zero_page is a special page that is used for
38 * zero-initialized data and COW.
39 */
40struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040041EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010042
43/*
44 * The pmd table for the upper-most set of pages.
45 */
46pmd_t *top_pmd;
47
Russell Kingae8f1542006-09-27 15:38:34 +010048#define CPOLICY_UNCACHED 0
49#define CPOLICY_BUFFERED 1
50#define CPOLICY_WRITETHROUGH 2
51#define CPOLICY_WRITEBACK 3
52#define CPOLICY_WRITEALLOC 4
53
54static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
55static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010056pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010057pgprot_t pgprot_kernel;
58
Imre_Deak44b18692007-02-11 13:45:13 +010059EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010060EXPORT_SYMBOL(pgprot_kernel);
61
62struct cachepolicy {
63 const char policy[16];
64 unsigned int cr_mask;
65 unsigned int pmd;
66 unsigned int pte;
67};
68
69static struct cachepolicy cache_policies[] __initdata = {
70 {
71 .policy = "uncached",
72 .cr_mask = CR_W|CR_C,
73 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010074 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010075 }, {
76 .policy = "buffered",
77 .cr_mask = CR_C,
78 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010079 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010080 }, {
81 .policy = "writethrough",
82 .cr_mask = 0,
83 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010084 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010085 }, {
86 .policy = "writeback",
87 .cr_mask = 0,
88 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010089 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010090 }, {
91 .policy = "writealloc",
92 .cr_mask = 0,
93 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010094 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010095 }
96};
97
98/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +010099 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100100 * problems by allowing the cache or the cache and
101 * writebuffer to be turned off. (Note: the write
102 * buffer should not be on and the cache off).
103 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100104static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100105{
106 int i;
107
108 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
109 int len = strlen(cache_policies[i].policy);
110
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100111 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100112 cachepolicy = i;
113 cr_alignment &= ~cache_policies[i].cr_mask;
114 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100115 break;
116 }
117 }
118 if (i == ARRAY_SIZE(cache_policies))
119 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000120 /*
121 * This restriction is partly to do with the way we boot; it is
122 * unpredictable to have memory mapped using two different sets of
123 * memory attributes (shared, type, and cache attribs). We can not
124 * change these attributes once the initial assembly has setup the
125 * page tables.
126 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100127 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
128 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
129 cachepolicy = CPOLICY_WRITEBACK;
130 }
Russell Kingae8f1542006-09-27 15:38:34 +0100131 flush_cache_all();
132 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100133 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100134}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100135early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100136
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100137static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100138{
139 char *p = "buffered";
140 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100141 early_cachepolicy(p);
142 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100143}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100145
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100146static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100147{
148 char *p = "uncached";
149 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100150 early_cachepolicy(p);
151 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100152}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100154
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100155static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100156{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100158 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100160 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100162}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163early_param("ecc", early_ecc);
Russell Kingae8f1542006-09-27 15:38:34 +0100164
165static int __init noalign_setup(char *__unused)
166{
167 cr_alignment &= ~CR_A;
168 cr_no_alignment &= ~CR_A;
169 set_cr(cr_alignment);
170 return 1;
171}
172__setup("noalign", noalign_setup);
173
Russell King255d1f82006-12-18 00:12:47 +0000174#ifndef CONFIG_SMP
175void adjust_cr(unsigned long mask, unsigned long set)
176{
177 unsigned long flags;
178
179 mask &= ~CR_A;
180
181 set &= mask;
182
183 local_irq_save(flags);
184
185 cr_no_alignment = (cr_no_alignment & ~mask) | set;
186 cr_alignment = (cr_alignment & ~mask) | set;
187
188 set_cr((get_cr() & ~mask) | set);
189
190 local_irq_restore(flags);
191}
192#endif
193
Russell King0af92be2007-05-05 20:28:16 +0100194#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
Russell Kingb1cce6b2008-11-04 10:52:28 +0000195#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100196
Russell Kingb29e9f52007-04-21 10:47:29 +0100197static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100198 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100199 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
200 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100201 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000202 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100203 .domain = DOMAIN_IO,
204 },
205 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100206 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100207 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000208 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100209 .domain = DOMAIN_IO,
210 },
211 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100212 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100213 .prot_l1 = PMD_TYPE_TABLE,
214 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
215 .domain = DOMAIN_IO,
216 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100217 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100218 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100219 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000220 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100221 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100222 },
Russell Kingebb4c652008-11-09 11:18:36 +0000223 [MT_UNCACHED] = {
224 .prot_pte = PROT_PTE_DEVICE,
225 .prot_l1 = PMD_TYPE_TABLE,
226 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
227 .domain = DOMAIN_IO,
228 },
Russell Kingae8f1542006-09-27 15:38:34 +0100229 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100230 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100231 .domain = DOMAIN_KERNEL,
232 },
233 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100234 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100235 .domain = DOMAIN_KERNEL,
236 },
237 [MT_LOW_VECTORS] = {
238 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
239 L_PTE_EXEC,
240 .prot_l1 = PMD_TYPE_TABLE,
241 .domain = DOMAIN_USER,
242 },
243 [MT_HIGH_VECTORS] = {
244 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
245 L_PTE_USER | L_PTE_EXEC,
246 .prot_l1 = PMD_TYPE_TABLE,
247 .domain = DOMAIN_USER,
248 },
249 [MT_MEMORY] = {
Russell King9ef79632007-05-05 20:03:35 +0100250 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .domain = DOMAIN_KERNEL,
252 },
253 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100254 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100255 .domain = DOMAIN_KERNEL,
256 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100257 [MT_MEMORY_NONCACHED] = {
258 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
259 .domain = DOMAIN_KERNEL,
260 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100261 [MT_MEMORY_DTCM] = {
262 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG |
263 L_PTE_DIRTY | L_PTE_WRITE,
264 .prot_l1 = PMD_TYPE_TABLE,
265 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
266 .domain = DOMAIN_KERNEL,
267 },
268 [MT_MEMORY_ITCM] = {
269 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
270 L_PTE_USER | L_PTE_EXEC,
271 .prot_l1 = PMD_TYPE_TABLE,
272 .domain = DOMAIN_IO,
273 },
Russell Kingae8f1542006-09-27 15:38:34 +0100274};
275
Russell Kingb29e9f52007-04-21 10:47:29 +0100276const struct mem_type *get_mem_type(unsigned int type)
277{
278 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
279}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200280EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100281
Russell Kingae8f1542006-09-27 15:38:34 +0100282/*
283 * Adjust the PMD section entries according to the CPU in use.
284 */
285static void __init build_mem_type_table(void)
286{
287 struct cachepolicy *cp;
288 unsigned int cr = get_cr();
Russell Kingbb30f362008-09-06 20:04:59 +0100289 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100290 int cpu_arch = cpu_architecture();
291 int i;
292
Catalin Marinas11179d82007-07-20 11:42:24 +0100293 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100294#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100295 if (cachepolicy > CPOLICY_BUFFERED)
296 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100297#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100298 if (cachepolicy > CPOLICY_WRITETHROUGH)
299 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100300#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100301 }
Russell Kingae8f1542006-09-27 15:38:34 +0100302 if (cpu_arch < CPU_ARCH_ARMv5) {
303 if (cachepolicy >= CPOLICY_WRITEALLOC)
304 cachepolicy = CPOLICY_WRITEBACK;
305 ecc_mask = 0;
306 }
Russell Kingbb30f362008-09-06 20:04:59 +0100307#ifdef CONFIG_SMP
308 cachepolicy = CPOLICY_WRITEALLOC;
309#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100310
311 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000312 * Strip out features not present on earlier architectures.
313 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
314 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100315 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000316 if (cpu_arch < CPU_ARCH_ARMv5)
317 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
318 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
319 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
320 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
321 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100322
323 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000324 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
325 * "update-able on write" bit on ARM610). However, Xscale and
326 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100327 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000328 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100329 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100330 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100331 mem_types[i].prot_l1 &= ~PMD_BIT4;
332 }
333 } else if (cpu_arch < CPU_ARCH_ARMv6) {
334 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100335 if (mem_types[i].prot_l1)
336 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100337 if (mem_types[i].prot_sect)
338 mem_types[i].prot_sect |= PMD_BIT4;
339 }
340 }
Russell Kingae8f1542006-09-27 15:38:34 +0100341
Russell Kingb1cce6b2008-11-04 10:52:28 +0000342 /*
343 * Mark the device areas according to the CPU/architecture.
344 */
345 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
346 if (!cpu_is_xsc3()) {
347 /*
348 * Mark device regions on ARMv6+ as execute-never
349 * to prevent speculative instruction fetches.
350 */
351 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
352 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
353 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
354 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
355 }
356 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
357 /*
358 * For ARMv7 with TEX remapping,
359 * - shared device is SXCB=1100
360 * - nonshared device is SXCB=0100
361 * - write combine device mem is SXCB=0001
362 * (Uncached Normal memory)
363 */
364 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
365 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
366 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
367 } else if (cpu_is_xsc3()) {
368 /*
369 * For Xscale3,
370 * - shared device is TEXCB=00101
371 * - nonshared device is TEXCB=01000
372 * - write combine device mem is TEXCB=00100
373 * (Inner/Outer Uncacheable in xsc3 parlance)
374 */
375 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
376 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
377 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
378 } else {
379 /*
380 * For ARMv6 and ARMv7 without TEX remapping,
381 * - shared device is TEXCB=00001
382 * - nonshared device is TEXCB=01000
383 * - write combine device mem is TEXCB=00100
384 * (Uncached Normal in ARMv6 parlance).
385 */
386 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
387 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
388 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
389 }
390 } else {
391 /*
392 * On others, write combining is "Uncached/Buffered"
393 */
394 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
395 }
396
397 /*
398 * Now deal with the memory-type mappings
399 */
Russell Kingae8f1542006-09-27 15:38:34 +0100400 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100401 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
402
403#ifndef CONFIG_SMP
404 /*
405 * Only use write-through for non-SMP systems
406 */
407 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
408 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
409#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100410
411 /*
412 * Enable CPU-specific coherency if supported.
413 * (Only available on XSC3 at the moment.)
414 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000415 if (arch_is_coherent() && cpu_is_xsc3())
416 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100417
418 /*
419 * ARMv6 and above have extended page tables.
420 */
421 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
422 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100423 * Mark cache clean areas and XIP ROM read only
424 * from SVC mode and no access from userspace.
425 */
426 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
427 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
428 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
429
Russell Kingae8f1542006-09-27 15:38:34 +0100430#ifdef CONFIG_SMP
431 /*
432 * Mark memory with the "shared" attribute for SMP systems
433 */
434 user_pgprot |= L_PTE_SHARED;
435 kern_pgprot |= L_PTE_SHARED;
Russell Kingbb30f362008-09-06 20:04:59 +0100436 vecs_pgprot |= L_PTE_SHARED;
Russell King85b3cce2010-04-09 15:00:11 +0100437 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
438 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
439 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
440 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
Russell Kingae8f1542006-09-27 15:38:34 +0100441 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100442 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100443#endif
444 }
445
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100446 /*
447 * Non-cacheable Normal - intended for memory areas that must
448 * not cause dirty cache line writebacks when used
449 */
450 if (cpu_arch >= CPU_ARCH_ARMv6) {
451 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
452 /* Non-cacheable Normal is XCB = 001 */
453 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
454 PMD_SECT_BUFFERED;
455 } else {
456 /* For both ARMv6 and non-TEX-remapping ARMv7 */
457 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
458 PMD_SECT_TEX(1);
459 }
460 } else {
461 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
462 }
463
Russell Kingae8f1542006-09-27 15:38:34 +0100464 for (i = 0; i < 16; i++) {
465 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100466 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100467 }
468
Russell Kingbb30f362008-09-06 20:04:59 +0100469 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
470 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100471
Imre_Deak44b18692007-02-11 13:45:13 +0100472 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100473 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King6dc995a2009-12-24 10:16:21 +0000474 L_PTE_DIRTY | L_PTE_WRITE | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100475
476 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
477 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
478 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
479 mem_types[MT_ROM].prot_sect |= cp->pmd;
480
481 switch (cp->pmd) {
482 case PMD_SECT_WT:
483 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
484 break;
485 case PMD_SECT_WB:
486 case PMD_SECT_WBWA:
487 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
488 break;
489 }
490 printk("Memory policy: ECC %sabled, Data cache %s\n",
491 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100492
493 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
494 struct mem_type *t = &mem_types[i];
495 if (t->prot_l1)
496 t->prot_l1 |= PMD_DOMAIN(t->domain);
497 if (t->prot_sect)
498 t->prot_sect |= PMD_DOMAIN(t->domain);
499 }
Russell Kingae8f1542006-09-27 15:38:34 +0100500}
501
Catalin Marinasd9073872010-09-13 16:01:24 +0100502#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
503pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
504 unsigned long size, pgprot_t vma_prot)
505{
506 if (!pfn_valid(pfn))
507 return pgprot_noncached(vma_prot);
508 else if (file->f_flags & O_SYNC)
509 return pgprot_writecombine(vma_prot);
510 return vma_prot;
511}
512EXPORT_SYMBOL(phys_mem_access_prot);
513#endif
514
Russell Kingae8f1542006-09-27 15:38:34 +0100515#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
516
Russell King3abe9d32010-03-25 17:02:59 +0000517static void __init *early_alloc(unsigned long sz)
518{
Russell King2778f622010-07-09 16:27:52 +0100519 void *ptr = __va(memblock_alloc(sz, sz));
520 memset(ptr, 0, sz);
521 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000522}
523
Russell King4bb2e272010-07-01 18:33:29 +0100524static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
525{
526 if (pmd_none(*pmd)) {
527 pte_t *pte = early_alloc(2 * PTRS_PER_PTE * sizeof(pte_t));
528 __pmd_populate(pmd, __pa(pte) | prot);
529 }
530 BUG_ON(pmd_bad(*pmd));
531 return pte_offset_kernel(pmd, addr);
532}
533
Russell King24e6c692007-04-21 10:21:28 +0100534static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
535 unsigned long end, unsigned long pfn,
536 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100537{
Russell King4bb2e272010-07-01 18:33:29 +0100538 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100539 do {
Russell King40d192b2008-09-06 21:15:56 +0100540 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100541 pfn++;
542 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100543}
544
Russell King24e6c692007-04-21 10:21:28 +0100545static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
546 unsigned long end, unsigned long phys,
547 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100548{
Russell King24e6c692007-04-21 10:21:28 +0100549 pmd_t *pmd = pmd_offset(pgd, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100550
Russell King24e6c692007-04-21 10:21:28 +0100551 /*
552 * Try a section mapping - end, addr and phys must all be aligned
553 * to a section boundary. Note that PMDs refer to the individual
554 * L1 entries, whereas PGDs refer to a group of L1 entries making
555 * up one logical pointer to an L2 table.
556 */
557 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
558 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100559
Russell King24e6c692007-04-21 10:21:28 +0100560 if (addr & SECTION_SIZE)
561 pmd++;
562
563 do {
564 *pmd = __pmd(phys | type->prot_sect);
565 phys += SECTION_SIZE;
566 } while (pmd++, addr += SECTION_SIZE, addr != end);
567
568 flush_pmd_entry(p);
569 } else {
570 /*
571 * No need to loop; pte's aren't interested in the
572 * individual L1 entries.
573 */
574 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100575 }
Russell Kingae8f1542006-09-27 15:38:34 +0100576}
577
Russell King4a56c1e2007-04-21 10:16:48 +0100578static void __init create_36bit_mapping(struct map_desc *md,
579 const struct mem_type *type)
580{
581 unsigned long phys, addr, length, end;
582 pgd_t *pgd;
583
584 addr = md->virtual;
585 phys = (unsigned long)__pfn_to_phys(md->pfn);
586 length = PAGE_ALIGN(md->length);
587
588 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
589 printk(KERN_ERR "MM: CPU does not support supersection "
590 "mapping for 0x%08llx at 0x%08lx\n",
591 __pfn_to_phys((u64)md->pfn), addr);
592 return;
593 }
594
595 /* N.B. ARMv6 supersections are only defined to work with domain 0.
596 * Since domain assignments can in fact be arbitrary, the
597 * 'domain == 0' check below is required to insure that ARMv6
598 * supersections are only allocated for domain 0 regardless
599 * of the actual domain assignments in use.
600 */
601 if (type->domain) {
602 printk(KERN_ERR "MM: invalid domain in supersection "
603 "mapping for 0x%08llx at 0x%08lx\n",
604 __pfn_to_phys((u64)md->pfn), addr);
605 return;
606 }
607
608 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
609 printk(KERN_ERR "MM: cannot create mapping for "
610 "0x%08llx at 0x%08lx invalid alignment\n",
611 __pfn_to_phys((u64)md->pfn), addr);
612 return;
613 }
614
615 /*
616 * Shift bits [35:32] of address into bits [23:20] of PMD
617 * (See ARMv6 spec).
618 */
619 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
620
621 pgd = pgd_offset_k(addr);
622 end = addr + length;
623 do {
624 pmd_t *pmd = pmd_offset(pgd, addr);
625 int i;
626
627 for (i = 0; i < 16; i++)
628 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
629
630 addr += SUPERSECTION_SIZE;
631 phys += SUPERSECTION_SIZE;
632 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
633 } while (addr != end);
634}
635
Russell Kingae8f1542006-09-27 15:38:34 +0100636/*
637 * Create the page directory entries and any necessary
638 * page tables for the mapping specified by `md'. We
639 * are able to cope here with varying sizes and address
640 * offsets, and we take full advantage of sections and
641 * supersections.
642 */
Russell Kinga2227122010-03-25 18:56:05 +0000643static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100644{
Russell King24e6c692007-04-21 10:21:28 +0100645 unsigned long phys, addr, length, end;
Russell Kingd5c98172007-04-21 10:05:32 +0100646 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100647 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100648
649 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
650 printk(KERN_WARNING "BUG: not creating mapping for "
651 "0x%08llx at 0x%08lx in user region\n",
652 __pfn_to_phys((u64)md->pfn), md->virtual);
653 return;
654 }
655
656 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
657 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
658 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
659 "overlaps vmalloc space\n",
660 __pfn_to_phys((u64)md->pfn), md->virtual);
661 }
662
Russell Kingd5c98172007-04-21 10:05:32 +0100663 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100664
665 /*
666 * Catch 36-bit addresses
667 */
Russell King4a56c1e2007-04-21 10:16:48 +0100668 if (md->pfn >= 0x100000) {
669 create_36bit_mapping(md, type);
670 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100671 }
672
Russell King7b9c7b42007-07-04 21:16:33 +0100673 addr = md->virtual & PAGE_MASK;
Russell King24e6c692007-04-21 10:21:28 +0100674 phys = (unsigned long)__pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100675 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100676
Russell King24e6c692007-04-21 10:21:28 +0100677 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Russell Kingae8f1542006-09-27 15:38:34 +0100678 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
679 "be mapped using pages, ignoring.\n",
Russell King24e6c692007-04-21 10:21:28 +0100680 __pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100681 return;
682 }
683
Russell King24e6c692007-04-21 10:21:28 +0100684 pgd = pgd_offset_k(addr);
685 end = addr + length;
686 do {
687 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100688
Russell King24e6c692007-04-21 10:21:28 +0100689 alloc_init_section(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100690
Russell King24e6c692007-04-21 10:21:28 +0100691 phys += next - addr;
692 addr = next;
693 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100694}
695
696/*
697 * Create the architecture specific mappings
698 */
699void __init iotable_init(struct map_desc *io_desc, int nr)
700{
701 int i;
702
703 for (i = 0; i < nr; i++)
704 create_mapping(io_desc + i);
705}
706
Russell King79612392010-05-22 16:20:14 +0100707static void * __initdata vmalloc_min = (void *)(VMALLOC_END - SZ_128M);
Russell King6c5da7a2008-09-30 19:31:44 +0100708
709/*
710 * vmalloc=size forces the vmalloc area to be exactly 'size'
711 * bytes. This can be used to increase (or decrease) the vmalloc
712 * area - the default is 128m.
713 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100714static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100715{
Russell King79612392010-05-22 16:20:14 +0100716 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100717
718 if (vmalloc_reserve < SZ_16M) {
719 vmalloc_reserve = SZ_16M;
720 printk(KERN_WARNING
721 "vmalloc area too small, limiting to %luMB\n",
722 vmalloc_reserve >> 20);
723 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400724
725 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
726 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
727 printk(KERN_WARNING
728 "vmalloc area is too big, limiting to %luMB\n",
729 vmalloc_reserve >> 20);
730 }
Russell King79612392010-05-22 16:20:14 +0100731
732 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100733 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100734}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100735early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100736
Russell King2778f622010-07-09 16:27:52 +0100737phys_addr_t lowmem_end_addr;
738
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400739static void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200740{
Russell Kingdde58282009-08-15 12:36:00 +0100741 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200742
Russell King2778f622010-07-09 16:27:52 +0100743 lowmem_end_addr = __pa(vmalloc_min - 1) + 1;
744
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400745 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400746 struct membank *bank = &meminfo.bank[j];
747 *bank = meminfo.bank[i];
748
749#ifdef CONFIG_HIGHMEM
Russell King79612392010-05-22 16:20:14 +0100750 if (__va(bank->start) > vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100751 __va(bank->start) < (void *)PAGE_OFFSET)
752 highmem = 1;
753
754 bank->highmem = highmem;
755
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400756 /*
757 * Split those memory banks which are partially overlapping
758 * the vmalloc area greatly simplifying things later.
759 */
Russell King79612392010-05-22 16:20:14 +0100760 if (__va(bank->start) < vmalloc_min &&
761 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400762 if (meminfo.nr_banks >= NR_BANKS) {
763 printk(KERN_CRIT "NR_BANKS too low, "
764 "ignoring high memory\n");
765 } else {
766 memmove(bank + 1, bank,
767 (meminfo.nr_banks - i) * sizeof(*bank));
768 meminfo.nr_banks++;
769 i++;
Russell King79612392010-05-22 16:20:14 +0100770 bank[1].size -= vmalloc_min - __va(bank->start);
771 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100772 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400773 j++;
774 }
Russell King79612392010-05-22 16:20:14 +0100775 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400776 }
777#else
Russell King041d7852009-09-27 17:40:42 +0100778 bank->highmem = highmem;
779
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400780 /*
781 * Check whether this memory bank would entirely overlap
782 * the vmalloc area.
783 */
Russell King79612392010-05-22 16:20:14 +0100784 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +0100785 __va(bank->start) < (void *)PAGE_OFFSET) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400786 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
787 "(vmalloc region overlap).\n",
788 bank->start, bank->start + bank->size - 1);
789 continue;
790 }
791
792 /*
793 * Check whether this memory bank would partially overlap
794 * the vmalloc area.
795 */
Russell King79612392010-05-22 16:20:14 +0100796 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400797 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100798 unsigned long newsize = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400799 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
800 "to -%.8lx (vmalloc region overlap).\n",
801 bank->start, bank->start + bank->size - 1,
802 bank->start + newsize - 1);
803 bank->size = newsize;
804 }
805#endif
806 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200807 }
Russell Kinge616c592009-09-27 20:55:43 +0100808#ifdef CONFIG_HIGHMEM
809 if (highmem) {
810 const char *reason = NULL;
811
812 if (cache_is_vipt_aliasing()) {
813 /*
814 * Interactions between kmap and other mappings
815 * make highmem support with aliasing VIPT caches
816 * rather difficult.
817 */
818 reason = "with VIPT aliasing cache";
819#ifdef CONFIG_SMP
820 } else if (tlb_ops_need_broadcast()) {
821 /*
822 * kmap_high needs to occasionally flush TLB entries,
823 * however, if the TLB entries need to be broadcast
824 * we may deadlock:
825 * kmap_high(irqs off)->flush_all_zero_pkmaps->
826 * flush_tlb_kernel_range->smp_call_function_many
827 * (must not be called with irqs off)
828 */
829 reason = "without hardware TLB ops broadcasting";
830#endif
831 }
832 if (reason) {
833 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
834 reason);
835 while (j > 0 && meminfo.bank[j - 1].highmem)
836 j--;
837 }
838 }
839#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400840 meminfo.nr_banks = j;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200841}
842
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400843static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100844{
845 unsigned long addr;
846
847 /*
848 * Clear out all the mappings below the kernel image.
849 */
Russell Kingab4f2ee2008-11-06 17:11:07 +0000850 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100851 pmd_clear(pmd_off_k(addr));
852
853#ifdef CONFIG_XIP_KERNEL
854 /* The XIP kernel is mapped in the module area -- skip over it */
Russell King37efe642008-12-01 11:53:07 +0000855 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100856#endif
857 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
858 pmd_clear(pmd_off_k(addr));
859
860 /*
861 * Clear out all the kernel space mappings, except for the first
862 * memory bank, up to the end of the vmalloc region.
863 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400864 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
Russell Kingd111e8f2006-09-27 15:27:33 +0100865 addr < VMALLOC_END; addr += PGDIR_SIZE)
866 pmd_clear(pmd_off_k(addr));
867}
868
869/*
Russell King2778f622010-07-09 16:27:52 +0100870 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +0100871 */
Russell King2778f622010-07-09 16:27:52 +0100872void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100873{
Russell Kingd111e8f2006-09-27 15:27:33 +0100874 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100875 * Reserve the page tables. These are already in use,
876 * and can only be in node 0.
877 */
Russell King2778f622010-07-09 16:27:52 +0100878 memblock_reserve(__pa(swapper_pg_dir), PTRS_PER_PGD * sizeof(pgd_t));
Russell Kingd111e8f2006-09-27 15:27:33 +0100879
Russell Kingd111e8f2006-09-27 15:27:33 +0100880#ifdef CONFIG_SA1111
881 /*
882 * Because of the SA1111 DMA bug, we want to preserve our
883 * precious DMA-able memory...
884 */
Russell King2778f622010-07-09 16:27:52 +0100885 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +0100886#endif
Russell Kingd111e8f2006-09-27 15:27:33 +0100887}
888
889/*
890 * Set up device the mappings. Since we clear out the page tables for all
891 * mappings above VMALLOC_END, we will remove any debug device mappings.
892 * This means you have to be careful how you debug this function, or any
893 * called function. This means you can't use any function or debugging
894 * method which may touch any device, otherwise the kernel _will_ crash.
895 */
896static void __init devicemaps_init(struct machine_desc *mdesc)
897{
898 struct map_desc map;
899 unsigned long addr;
900 void *vectors;
901
902 /*
903 * Allocate the vector page early.
904 */
Russell King3abe9d32010-03-25 17:02:59 +0000905 vectors = early_alloc(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100906
907 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
908 pmd_clear(pmd_off_k(addr));
909
910 /*
911 * Map the kernel if it is XIP.
912 * It is always first in the modulearea.
913 */
914#ifdef CONFIG_XIP_KERNEL
915 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +0000916 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +0000917 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100918 map.type = MT_ROM;
919 create_mapping(&map);
920#endif
921
922 /*
923 * Map the cache flushing regions.
924 */
925#ifdef FLUSH_BASE
926 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
927 map.virtual = FLUSH_BASE;
928 map.length = SZ_1M;
929 map.type = MT_CACHECLEAN;
930 create_mapping(&map);
931#endif
932#ifdef FLUSH_BASE_MINICACHE
933 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
934 map.virtual = FLUSH_BASE_MINICACHE;
935 map.length = SZ_1M;
936 map.type = MT_MINICLEAN;
937 create_mapping(&map);
938#endif
939
940 /*
941 * Create a mapping for the machine vectors at the high-vectors
942 * location (0xffff0000). If we aren't using high-vectors, also
943 * create a mapping at the low-vectors virtual address.
944 */
945 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
946 map.virtual = 0xffff0000;
947 map.length = PAGE_SIZE;
948 map.type = MT_HIGH_VECTORS;
949 create_mapping(&map);
950
951 if (!vectors_high()) {
952 map.virtual = 0;
953 map.type = MT_LOW_VECTORS;
954 create_mapping(&map);
955 }
956
957 /*
958 * Ask the machine support to map in the statically mapped devices.
959 */
960 if (mdesc->map_io)
961 mdesc->map_io();
962
963 /*
964 * Finally flush the caches and tlb to ensure that we're in a
965 * consistent state wrt the writebuffer. This also ensures that
966 * any write-allocated cache lines in the vector page are written
967 * back. After this point, we can start to touch devices again.
968 */
969 local_flush_tlb_all();
970 flush_cache_all();
971}
972
Nicolas Pitred73cd422008-09-15 16:44:55 -0400973static void __init kmap_init(void)
974{
975#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +0100976 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
977 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -0400978#endif
979}
980
Russell Kinga2227122010-03-25 18:56:05 +0000981static inline void map_memory_bank(struct membank *bank)
982{
983 struct map_desc map;
984
985 map.pfn = bank_pfn_start(bank);
986 map.virtual = __phys_to_virt(bank_phys_start(bank));
987 map.length = bank_phys_size(bank);
988 map.type = MT_MEMORY;
989
990 create_mapping(&map);
991}
992
993static void __init map_lowmem(void)
994{
995 struct meminfo *mi = &meminfo;
996 int i;
997
998 /* Map all the lowmem memory banks. */
999 for (i = 0; i < mi->nr_banks; i++) {
1000 struct membank *bank = &mi->bank[i];
1001
1002 if (!bank->highmem)
1003 map_memory_bank(bank);
1004 }
1005}
1006
Russell Kingceb683d2010-03-25 18:47:20 +00001007static int __init meminfo_cmp(const void *_a, const void *_b)
1008{
1009 const struct membank *a = _a, *b = _b;
1010 long cmp = bank_pfn_start(a) - bank_pfn_start(b);
1011 return cmp < 0 ? -1 : cmp > 0 ? 1 : 0;
1012}
1013
Russell Kingd111e8f2006-09-27 15:27:33 +01001014/*
1015 * paging_init() sets up the page tables, initialises the zone memory
1016 * maps, and sets up the zero page, bad page and bad page tables.
1017 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001018void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001019{
1020 void *zero_page;
1021
Russell Kingceb683d2010-03-25 18:47:20 +00001022 sort(&meminfo.bank, meminfo.nr_banks, sizeof(meminfo.bank[0]), meminfo_cmp, NULL);
1023
Russell Kingd111e8f2006-09-27 15:27:33 +01001024 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001025 sanity_check_meminfo();
1026 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001027 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001028 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001029 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001030
1031 top_pmd = pmd_off_k(0xffff0000);
1032
Russell King3abe9d32010-03-25 17:02:59 +00001033 /* allocate the zero page. */
1034 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001035
Russell King8d717a52010-05-22 19:47:18 +01001036 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001037
Russell Kingd111e8f2006-09-27 15:27:33 +01001038 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001039 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001040}
Russell Kingae8f1542006-09-27 15:38:34 +01001041
1042/*
1043 * In order to soft-boot, we need to insert a 1:1 mapping in place of
1044 * the user-mode pages. This will then ensure that we have predictable
1045 * results when turning the mmu off
1046 */
1047void setup_mm_for_reboot(char mode)
1048{
1049 unsigned long base_pmdval;
1050 pgd_t *pgd;
1051 int i;
1052
Mika Westerberg3f2d4f52010-04-13 07:01:46 +01001053 /*
1054 * We need to access to user-mode page tables here. For kernel threads
1055 * we don't have any user-mode mappings so we use the context that we
1056 * "borrowed".
1057 */
1058 pgd = current->active_mm->pgd;
Russell Kingae8f1542006-09-27 15:38:34 +01001059
1060 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
1061 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
1062 base_pmdval |= PMD_BIT4;
1063
1064 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
1065 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
1066 pmd_t *pmd;
1067
1068 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
1069 pmd[0] = __pmd(pmdval);
1070 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
1071 flush_pmd_entry(pmd);
1072 }
Tony Lindgrenad3e6c02010-01-19 16:42:12 +01001073
1074 local_flush_tlb_all();
Russell Kingae8f1542006-09-27 15:38:34 +01001075}