blob: 94c5a0c94f5e2e9a19da0509b5717004188ee31a [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King0ba8b9b2008-08-10 18:08:10 +010020#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000021#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050022#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010023#include <asm/setup.h>
24#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010025#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010026#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040027#include <asm/highmem.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010028#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010029
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include "mm.h"
34
Russell Kingd111e8f2006-09-27 15:27:33 +010035/*
36 * empty_zero_page is a special page that is used for
37 * zero-initialized data and COW.
38 */
39struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040040EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010041
42/*
43 * The pmd table for the upper-most set of pages.
44 */
45pmd_t *top_pmd;
46
Russell Kingae8f1542006-09-27 15:38:34 +010047#define CPOLICY_UNCACHED 0
48#define CPOLICY_BUFFERED 1
49#define CPOLICY_WRITETHROUGH 2
50#define CPOLICY_WRITEBACK 3
51#define CPOLICY_WRITEALLOC 4
52
53static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
54static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010055pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010056pgprot_t pgprot_kernel;
57
Imre_Deak44b18692007-02-11 13:45:13 +010058EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010059EXPORT_SYMBOL(pgprot_kernel);
60
61struct cachepolicy {
62 const char policy[16];
63 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010064 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000065 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010066};
67
68static struct cachepolicy cache_policies[] __initdata = {
69 {
70 .policy = "uncached",
71 .cr_mask = CR_W|CR_C,
72 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010073 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010074 }, {
75 .policy = "buffered",
76 .cr_mask = CR_C,
77 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010078 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010079 }, {
80 .policy = "writethrough",
81 .cr_mask = 0,
82 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010083 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010084 }, {
85 .policy = "writeback",
86 .cr_mask = 0,
87 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010088 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010089 }, {
90 .policy = "writealloc",
91 .cr_mask = 0,
92 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010093 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010094 }
95};
96
97/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +010098 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +010099 * problems by allowing the cache or the cache and
100 * writebuffer to be turned off. (Note: the write
101 * buffer should not be on and the cache off).
102 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100103static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100104{
105 int i;
106
107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
108 int len = strlen(cache_policies[i].policy);
109
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100110 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100111 cachepolicy = i;
112 cr_alignment &= ~cache_policies[i].cr_mask;
113 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100114 break;
115 }
116 }
117 if (i == ARRAY_SIZE(cache_policies))
118 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000119 /*
120 * This restriction is partly to do with the way we boot; it is
121 * unpredictable to have memory mapped using two different sets of
122 * memory attributes (shared, type, and cache attribs). We can not
123 * change these attributes once the initial assembly has setup the
124 * page tables.
125 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100126 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
127 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
128 cachepolicy = CPOLICY_WRITEBACK;
129 }
Russell Kingae8f1542006-09-27 15:38:34 +0100130 flush_cache_all();
131 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100132 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100133}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100134early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100135
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100136static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100137{
138 char *p = "buffered";
139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140 early_cachepolicy(p);
141 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100142}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100143early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100144
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100145static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100146{
147 char *p = "uncached";
148 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100149 early_cachepolicy(p);
150 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100151}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100152early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100153
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000154#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100155static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100156{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100158 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100159 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100160 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100161 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100162}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000164#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100165
166static int __init noalign_setup(char *__unused)
167{
168 cr_alignment &= ~CR_A;
169 cr_no_alignment &= ~CR_A;
170 set_cr(cr_alignment);
171 return 1;
172}
173__setup("noalign", noalign_setup);
174
Russell King255d1f82006-12-18 00:12:47 +0000175#ifndef CONFIG_SMP
176void adjust_cr(unsigned long mask, unsigned long set)
177{
178 unsigned long flags;
179
180 mask &= ~CR_A;
181
182 set &= mask;
183
184 local_irq_save(flags);
185
186 cr_no_alignment = (cr_no_alignment & ~mask) | set;
187 cr_alignment = (cr_alignment & ~mask) | set;
188
189 set_cr((get_cr() & ~mask) | set);
190
191 local_irq_restore(flags);
192}
193#endif
194
Russell King36bb94b2010-11-16 08:40:36 +0000195#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000196#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100197
Russell Kingb29e9f52007-04-21 10:47:29 +0100198static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100199 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100200 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
201 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100202 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000203 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100204 .domain = DOMAIN_IO,
205 },
206 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100207 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100208 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000209 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100210 .domain = DOMAIN_IO,
211 },
212 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100213 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100214 .prot_l1 = PMD_TYPE_TABLE,
215 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
216 .domain = DOMAIN_IO,
217 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100218 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100219 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100220 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000221 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100222 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100223 },
Russell Kingebb4c652008-11-09 11:18:36 +0000224 [MT_UNCACHED] = {
225 .prot_pte = PROT_PTE_DEVICE,
226 .prot_l1 = PMD_TYPE_TABLE,
227 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
228 .domain = DOMAIN_IO,
229 },
Russell Kingae8f1542006-09-27 15:38:34 +0100230 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100231 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100232 .domain = DOMAIN_KERNEL,
233 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000234#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100235 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100236 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100237 .domain = DOMAIN_KERNEL,
238 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000239#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100240 [MT_LOW_VECTORS] = {
241 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000242 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100243 .prot_l1 = PMD_TYPE_TABLE,
244 .domain = DOMAIN_USER,
245 },
246 [MT_HIGH_VECTORS] = {
247 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000248 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100249 .prot_l1 = PMD_TYPE_TABLE,
250 .domain = DOMAIN_USER,
251 },
252 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000253 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100254 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100255 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100256 .domain = DOMAIN_KERNEL,
257 },
258 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100259 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100260 .domain = DOMAIN_KERNEL,
261 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100262 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100263 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000264 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100265 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100266 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
267 .domain = DOMAIN_KERNEL,
268 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100269 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100270 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000271 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100272 .prot_l1 = PMD_TYPE_TABLE,
273 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
274 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100275 },
276 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000277 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100278 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100279 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100280 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700281 [MT_MEMORY_SO] = {
282 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
283 L_PTE_MT_UNCACHED,
284 .prot_l1 = PMD_TYPE_TABLE,
285 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
286 PMD_SECT_UNCACHED | PMD_SECT_XN,
287 .domain = DOMAIN_KERNEL,
288 },
Russell Kingae8f1542006-09-27 15:38:34 +0100289};
290
Russell Kingb29e9f52007-04-21 10:47:29 +0100291const struct mem_type *get_mem_type(unsigned int type)
292{
293 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
294}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200295EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100296
Russell Kingae8f1542006-09-27 15:38:34 +0100297/*
298 * Adjust the PMD section entries according to the CPU in use.
299 */
300static void __init build_mem_type_table(void)
301{
302 struct cachepolicy *cp;
303 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100304 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100305 int cpu_arch = cpu_architecture();
306 int i;
307
Catalin Marinas11179d82007-07-20 11:42:24 +0100308 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100309#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100310 if (cachepolicy > CPOLICY_BUFFERED)
311 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100312#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100313 if (cachepolicy > CPOLICY_WRITETHROUGH)
314 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100315#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100316 }
Russell Kingae8f1542006-09-27 15:38:34 +0100317 if (cpu_arch < CPU_ARCH_ARMv5) {
318 if (cachepolicy >= CPOLICY_WRITEALLOC)
319 cachepolicy = CPOLICY_WRITEBACK;
320 ecc_mask = 0;
321 }
Russell Kingf00ec482010-09-04 10:47:48 +0100322 if (is_smp())
323 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100324
325 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000326 * Strip out features not present on earlier architectures.
327 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
328 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100329 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000330 if (cpu_arch < CPU_ARCH_ARMv5)
331 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
332 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
333 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
334 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
335 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100336
337 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000338 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
339 * "update-able on write" bit on ARM610). However, Xscale and
340 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100341 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000342 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100343 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100344 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100345 mem_types[i].prot_l1 &= ~PMD_BIT4;
346 }
347 } else if (cpu_arch < CPU_ARCH_ARMv6) {
348 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100349 if (mem_types[i].prot_l1)
350 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100351 if (mem_types[i].prot_sect)
352 mem_types[i].prot_sect |= PMD_BIT4;
353 }
354 }
Russell Kingae8f1542006-09-27 15:38:34 +0100355
Russell Kingb1cce6b2008-11-04 10:52:28 +0000356 /*
357 * Mark the device areas according to the CPU/architecture.
358 */
359 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
360 if (!cpu_is_xsc3()) {
361 /*
362 * Mark device regions on ARMv6+ as execute-never
363 * to prevent speculative instruction fetches.
364 */
365 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
366 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
367 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
368 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
369 }
370 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
371 /*
372 * For ARMv7 with TEX remapping,
373 * - shared device is SXCB=1100
374 * - nonshared device is SXCB=0100
375 * - write combine device mem is SXCB=0001
376 * (Uncached Normal memory)
377 */
378 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
379 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
380 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
381 } else if (cpu_is_xsc3()) {
382 /*
383 * For Xscale3,
384 * - shared device is TEXCB=00101
385 * - nonshared device is TEXCB=01000
386 * - write combine device mem is TEXCB=00100
387 * (Inner/Outer Uncacheable in xsc3 parlance)
388 */
389 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
390 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
391 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
392 } else {
393 /*
394 * For ARMv6 and ARMv7 without TEX remapping,
395 * - shared device is TEXCB=00001
396 * - nonshared device is TEXCB=01000
397 * - write combine device mem is TEXCB=00100
398 * (Uncached Normal in ARMv6 parlance).
399 */
400 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
401 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
402 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
403 }
404 } else {
405 /*
406 * On others, write combining is "Uncached/Buffered"
407 */
408 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
409 }
410
411 /*
412 * Now deal with the memory-type mappings
413 */
Russell Kingae8f1542006-09-27 15:38:34 +0100414 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100415 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
416
Russell Kingbb30f362008-09-06 20:04:59 +0100417 /*
418 * Only use write-through for non-SMP systems
419 */
Russell Kingf00ec482010-09-04 10:47:48 +0100420 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100421 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100422
423 /*
424 * Enable CPU-specific coherency if supported.
425 * (Only available on XSC3 at the moment.)
426 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100427 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000428 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100429 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
430 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
431 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
432 }
Russell Kingae8f1542006-09-27 15:38:34 +0100433 /*
434 * ARMv6 and above have extended page tables.
435 */
436 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000437#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100438 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100439 * Mark cache clean areas and XIP ROM read only
440 * from SVC mode and no access from userspace.
441 */
442 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
443 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
444 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000445#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100446
Russell Kingf00ec482010-09-04 10:47:48 +0100447 if (is_smp()) {
448 /*
449 * Mark memory with the "shared" attribute
450 * for SMP systems
451 */
452 user_pgprot |= L_PTE_SHARED;
453 kern_pgprot |= L_PTE_SHARED;
454 vecs_pgprot |= L_PTE_SHARED;
455 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
456 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
457 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
458 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
459 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
460 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
461 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
462 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
463 }
Russell Kingae8f1542006-09-27 15:38:34 +0100464 }
465
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100466 /*
467 * Non-cacheable Normal - intended for memory areas that must
468 * not cause dirty cache line writebacks when used
469 */
470 if (cpu_arch >= CPU_ARCH_ARMv6) {
471 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
472 /* Non-cacheable Normal is XCB = 001 */
473 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
474 PMD_SECT_BUFFERED;
475 } else {
476 /* For both ARMv6 and non-TEX-remapping ARMv7 */
477 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
478 PMD_SECT_TEX(1);
479 }
480 } else {
481 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
482 }
483
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000484#ifdef CONFIG_ARM_LPAE
485 /*
486 * Do not generate access flag faults for the kernel mappings.
487 */
488 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
489 mem_types[i].prot_pte |= PTE_EXT_AF;
490 mem_types[i].prot_sect |= PMD_SECT_AF;
491 }
492 kern_pgprot |= PTE_EXT_AF;
493 vecs_pgprot |= PTE_EXT_AF;
494#endif
495
Russell Kingae8f1542006-09-27 15:38:34 +0100496 for (i = 0; i < 16; i++) {
497 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100498 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100499 }
500
Russell Kingbb30f362008-09-06 20:04:59 +0100501 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
502 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100503
Imre_Deak44b18692007-02-11 13:45:13 +0100504 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100505 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000506 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100507
508 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
509 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
510 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100511 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
512 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100513 mem_types[MT_ROM].prot_sect |= cp->pmd;
514
515 switch (cp->pmd) {
516 case PMD_SECT_WT:
517 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
518 break;
519 case PMD_SECT_WB:
520 case PMD_SECT_WBWA:
521 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
522 break;
523 }
524 printk("Memory policy: ECC %sabled, Data cache %s\n",
525 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100526
527 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
528 struct mem_type *t = &mem_types[i];
529 if (t->prot_l1)
530 t->prot_l1 |= PMD_DOMAIN(t->domain);
531 if (t->prot_sect)
532 t->prot_sect |= PMD_DOMAIN(t->domain);
533 }
Russell Kingae8f1542006-09-27 15:38:34 +0100534}
535
Catalin Marinasd9073872010-09-13 16:01:24 +0100536#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
537pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
538 unsigned long size, pgprot_t vma_prot)
539{
540 if (!pfn_valid(pfn))
541 return pgprot_noncached(vma_prot);
542 else if (file->f_flags & O_SYNC)
543 return pgprot_writecombine(vma_prot);
544 return vma_prot;
545}
546EXPORT_SYMBOL(phys_mem_access_prot);
547#endif
548
Russell Kingae8f1542006-09-27 15:38:34 +0100549#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
550
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400551static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000552{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400553 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100554 memset(ptr, 0, sz);
555 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000556}
557
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400558static void __init *early_alloc(unsigned long sz)
559{
560 return early_alloc_aligned(sz, sz);
561}
562
Russell King4bb2e272010-07-01 18:33:29 +0100563static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
564{
565 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100566 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000567 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100568 }
569 BUG_ON(pmd_bad(*pmd));
570 return pte_offset_kernel(pmd, addr);
571}
572
Russell King24e6c692007-04-21 10:21:28 +0100573static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
574 unsigned long end, unsigned long pfn,
575 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100576{
Russell King4bb2e272010-07-01 18:33:29 +0100577 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100578 do {
Russell King40d192b2008-09-06 21:15:56 +0100579 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100580 pfn++;
581 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100582}
583
Russell King516295e2010-11-21 16:27:49 +0000584static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000585 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100586 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100587{
Russell King516295e2010-11-21 16:27:49 +0000588 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100589
Russell King24e6c692007-04-21 10:21:28 +0100590 /*
591 * Try a section mapping - end, addr and phys must all be aligned
592 * to a section boundary. Note that PMDs refer to the individual
593 * L1 entries, whereas PGDs refer to a group of L1 entries making
594 * up one logical pointer to an L2 table.
595 */
596 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
597 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100598
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000599#ifndef CONFIG_ARM_LPAE
Russell King24e6c692007-04-21 10:21:28 +0100600 if (addr & SECTION_SIZE)
601 pmd++;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000602#endif
Russell King24e6c692007-04-21 10:21:28 +0100603
604 do {
605 *pmd = __pmd(phys | type->prot_sect);
606 phys += SECTION_SIZE;
607 } while (pmd++, addr += SECTION_SIZE, addr != end);
608
609 flush_pmd_entry(p);
610 } else {
611 /*
612 * No need to loop; pte's aren't interested in the
613 * individual L1 entries.
614 */
615 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100616 }
Russell Kingae8f1542006-09-27 15:38:34 +0100617}
618
Russell King516295e2010-11-21 16:27:49 +0000619static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
620 unsigned long phys, const struct mem_type *type)
621{
622 pud_t *pud = pud_offset(pgd, addr);
623 unsigned long next;
624
625 do {
626 next = pud_addr_end(addr, end);
627 alloc_init_section(pud, addr, next, phys, type);
628 phys += next - addr;
629 } while (pud++, addr = next, addr != end);
630}
631
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000632#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100633static void __init create_36bit_mapping(struct map_desc *md,
634 const struct mem_type *type)
635{
Russell King97092e02010-11-16 00:16:01 +0000636 unsigned long addr, length, end;
637 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100638 pgd_t *pgd;
639
640 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100641 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100642 length = PAGE_ALIGN(md->length);
643
644 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
645 printk(KERN_ERR "MM: CPU does not support supersection "
646 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100647 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100648 return;
649 }
650
651 /* N.B. ARMv6 supersections are only defined to work with domain 0.
652 * Since domain assignments can in fact be arbitrary, the
653 * 'domain == 0' check below is required to insure that ARMv6
654 * supersections are only allocated for domain 0 regardless
655 * of the actual domain assignments in use.
656 */
657 if (type->domain) {
658 printk(KERN_ERR "MM: invalid domain in supersection "
659 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100660 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100661 return;
662 }
663
664 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100665 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
666 " at 0x%08lx invalid alignment\n",
667 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100668 return;
669 }
670
671 /*
672 * Shift bits [35:32] of address into bits [23:20] of PMD
673 * (See ARMv6 spec).
674 */
675 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
676
677 pgd = pgd_offset_k(addr);
678 end = addr + length;
679 do {
Russell King516295e2010-11-21 16:27:49 +0000680 pud_t *pud = pud_offset(pgd, addr);
681 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100682 int i;
683
684 for (i = 0; i < 16; i++)
685 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
686
687 addr += SUPERSECTION_SIZE;
688 phys += SUPERSECTION_SIZE;
689 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
690 } while (addr != end);
691}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000692#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100693
Russell Kingae8f1542006-09-27 15:38:34 +0100694/*
695 * Create the page directory entries and any necessary
696 * page tables for the mapping specified by `md'. We
697 * are able to cope here with varying sizes and address
698 * offsets, and we take full advantage of sections and
699 * supersections.
700 */
Russell Kinga2227122010-03-25 18:56:05 +0000701static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100702{
Will Deaconcae62922011-02-15 12:42:57 +0100703 unsigned long addr, length, end;
704 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100705 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100706 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100707
708 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100709 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
710 " at 0x%08lx in user region\n",
711 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100712 return;
713 }
714
715 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400716 md->virtual >= PAGE_OFFSET &&
717 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100718 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400719 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100720 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100721 }
722
Russell Kingd5c98172007-04-21 10:05:32 +0100723 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100724
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000725#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100726 /*
727 * Catch 36-bit addresses
728 */
Russell King4a56c1e2007-04-21 10:16:48 +0100729 if (md->pfn >= 0x100000) {
730 create_36bit_mapping(md, type);
731 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100732 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000733#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100734
Russell King7b9c7b42007-07-04 21:16:33 +0100735 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100736 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100737 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100738
Russell King24e6c692007-04-21 10:21:28 +0100739 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100740 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100741 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100742 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100743 return;
744 }
745
Russell King24e6c692007-04-21 10:21:28 +0100746 pgd = pgd_offset_k(addr);
747 end = addr + length;
748 do {
749 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100750
Russell King516295e2010-11-21 16:27:49 +0000751 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100752
Russell King24e6c692007-04-21 10:21:28 +0100753 phys += next - addr;
754 addr = next;
755 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100756}
757
758/*
759 * Create the architecture specific mappings
760 */
761void __init iotable_init(struct map_desc *io_desc, int nr)
762{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400763 struct map_desc *md;
764 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100765
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400766 if (!nr)
767 return;
768
769 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
770
771 for (md = io_desc; nr; md++, nr--) {
772 create_mapping(md);
773 vm->addr = (void *)(md->virtual & PAGE_MASK);
774 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
775 vm->phys_addr = __pfn_to_phys(md->pfn);
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400776 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
777 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400778 vm->caller = iotable_init;
779 vm_area_add_early(vm++);
780 }
Russell Kingae8f1542006-09-27 15:38:34 +0100781}
782
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400783static void * __initdata vmalloc_min =
784 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100785
786/*
787 * vmalloc=size forces the vmalloc area to be exactly 'size'
788 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400789 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100790 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100791static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100792{
Russell King79612392010-05-22 16:20:14 +0100793 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100794
795 if (vmalloc_reserve < SZ_16M) {
796 vmalloc_reserve = SZ_16M;
797 printk(KERN_WARNING
798 "vmalloc area too small, limiting to %luMB\n",
799 vmalloc_reserve >> 20);
800 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400801
802 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
803 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
804 printk(KERN_WARNING
805 "vmalloc area is too big, limiting to %luMB\n",
806 vmalloc_reserve >> 20);
807 }
Russell King79612392010-05-22 16:20:14 +0100808
809 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100810 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100811}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100812early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100813
Russell King8df65162010-10-27 19:57:38 +0100814static phys_addr_t lowmem_limit __initdata = 0;
815
Russell King0371d3f2011-07-05 19:58:29 +0100816void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200817{
Russell Kingdde58282009-08-15 12:36:00 +0100818 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200819
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400820 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400821 struct membank *bank = &meminfo.bank[j];
822 *bank = meminfo.bank[i];
823
Will Deacon77f73a22011-11-22 17:30:32 +0000824 if (bank->start > ULONG_MAX)
825 highmem = 1;
826
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400827#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100828 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100829 __va(bank->start) < (void *)PAGE_OFFSET)
830 highmem = 1;
831
832 bank->highmem = highmem;
833
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400834 /*
835 * Split those memory banks which are partially overlapping
836 * the vmalloc area greatly simplifying things later.
837 */
Will Deacon77f73a22011-11-22 17:30:32 +0000838 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +0100839 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400840 if (meminfo.nr_banks >= NR_BANKS) {
841 printk(KERN_CRIT "NR_BANKS too low, "
842 "ignoring high memory\n");
843 } else {
844 memmove(bank + 1, bank,
845 (meminfo.nr_banks - i) * sizeof(*bank));
846 meminfo.nr_banks++;
847 i++;
Russell King79612392010-05-22 16:20:14 +0100848 bank[1].size -= vmalloc_min - __va(bank->start);
849 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100850 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400851 j++;
852 }
Russell King79612392010-05-22 16:20:14 +0100853 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400854 }
855#else
Russell King041d7852009-09-27 17:40:42 +0100856 bank->highmem = highmem;
857
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400858 /*
Will Deacon77f73a22011-11-22 17:30:32 +0000859 * Highmem banks not allowed with !CONFIG_HIGHMEM.
860 */
861 if (highmem) {
862 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
863 "(!CONFIG_HIGHMEM).\n",
864 (unsigned long long)bank->start,
865 (unsigned long long)bank->start + bank->size - 1);
866 continue;
867 }
868
869 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400870 * Check whether this memory bank would entirely overlap
871 * the vmalloc area.
872 */
Russell King79612392010-05-22 16:20:14 +0100873 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f92009-03-28 19:18:05 +0100874 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +0000875 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400876 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +0000877 (unsigned long long)bank->start,
878 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400879 continue;
880 }
881
882 /*
883 * Check whether this memory bank would partially overlap
884 * the vmalloc area.
885 */
Russell King79612392010-05-22 16:20:14 +0100886 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400887 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100888 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +0000889 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
890 "to -%.8llx (vmalloc region overlap).\n",
891 (unsigned long long)bank->start,
892 (unsigned long long)bank->start + bank->size - 1,
893 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400894 bank->size = newsize;
895 }
896#endif
Will Deacon40f7bfe2011-05-19 13:22:48 +0100897 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
898 lowmem_limit = bank->start + bank->size;
899
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400900 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200901 }
Russell Kinge616c592009-09-27 20:55:43 +0100902#ifdef CONFIG_HIGHMEM
903 if (highmem) {
904 const char *reason = NULL;
905
906 if (cache_is_vipt_aliasing()) {
907 /*
908 * Interactions between kmap and other mappings
909 * make highmem support with aliasing VIPT caches
910 * rather difficult.
911 */
912 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +0100913 }
914 if (reason) {
915 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
916 reason);
917 while (j > 0 && meminfo.bank[j - 1].highmem)
918 j--;
919 }
920 }
921#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400922 meminfo.nr_banks = j;
Nicolas Pitre55a81732011-09-18 22:40:00 -0400923 high_memory = __va(lowmem_limit - 1) + 1;
Will Deacon40f7bfe2011-05-19 13:22:48 +0100924 memblock_set_current_limit(lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200925}
926
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400927static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100928{
929 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +0100930 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +0100931
932 /*
933 * Clear out all the mappings below the kernel image.
934 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100935 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100936 pmd_clear(pmd_off_k(addr));
937
938#ifdef CONFIG_XIP_KERNEL
939 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +0100940 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100941#endif
Catalin Marinase73fc882011-08-23 14:07:23 +0100942 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100943 pmd_clear(pmd_off_k(addr));
944
945 /*
Russell King8df65162010-10-27 19:57:38 +0100946 * Find the end of the first block of lowmem.
947 */
948 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
949 if (end >= lowmem_limit)
950 end = lowmem_limit;
951
952 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100953 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400954 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +0100955 */
Russell King8df65162010-10-27 19:57:38 +0100956 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400957 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100958 pmd_clear(pmd_off_k(addr));
959}
960
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000961#ifdef CONFIG_ARM_LPAE
962/* the first page is reserved for pgd */
963#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
964 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
965#else
Catalin Marinase73fc882011-08-23 14:07:23 +0100966#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000967#endif
Catalin Marinase73fc882011-08-23 14:07:23 +0100968
Russell Kingd111e8f2006-09-27 15:27:33 +0100969/*
Russell King2778f622010-07-09 16:27:52 +0100970 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +0100971 */
Russell King2778f622010-07-09 16:27:52 +0100972void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100973{
Russell Kingd111e8f2006-09-27 15:27:33 +0100974 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100975 * Reserve the page tables. These are already in use,
976 * and can only be in node 0.
977 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100978 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100979
Russell Kingd111e8f2006-09-27 15:27:33 +0100980#ifdef CONFIG_SA1111
981 /*
982 * Because of the SA1111 DMA bug, we want to preserve our
983 * precious DMA-able memory...
984 */
Russell King2778f622010-07-09 16:27:52 +0100985 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +0100986#endif
Russell Kingd111e8f2006-09-27 15:27:33 +0100987}
988
989/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400990 * Set up the device mappings. Since we clear out the page tables for all
991 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +0100992 * This means you have to be careful how you debug this function, or any
993 * called function. This means you can't use any function or debugging
994 * method which may touch any device, otherwise the kernel _will_ crash.
995 */
996static void __init devicemaps_init(struct machine_desc *mdesc)
997{
998 struct map_desc map;
999 unsigned long addr;
Russell Kingd111e8f2006-09-27 15:27:33 +01001000
1001 /*
1002 * Allocate the vector page early.
1003 */
Catalin Marinas247055a2010-09-13 16:03:21 +01001004 vectors_page = early_alloc(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001005
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001006 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001007 pmd_clear(pmd_off_k(addr));
1008
1009 /*
1010 * Map the kernel if it is XIP.
1011 * It is always first in the modulearea.
1012 */
1013#ifdef CONFIG_XIP_KERNEL
1014 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001015 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001016 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001017 map.type = MT_ROM;
1018 create_mapping(&map);
1019#endif
1020
1021 /*
1022 * Map the cache flushing regions.
1023 */
1024#ifdef FLUSH_BASE
1025 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1026 map.virtual = FLUSH_BASE;
1027 map.length = SZ_1M;
1028 map.type = MT_CACHECLEAN;
1029 create_mapping(&map);
1030#endif
1031#ifdef FLUSH_BASE_MINICACHE
1032 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1033 map.virtual = FLUSH_BASE_MINICACHE;
1034 map.length = SZ_1M;
1035 map.type = MT_MINICLEAN;
1036 create_mapping(&map);
1037#endif
1038
1039 /*
1040 * Create a mapping for the machine vectors at the high-vectors
1041 * location (0xffff0000). If we aren't using high-vectors, also
1042 * create a mapping at the low-vectors virtual address.
1043 */
Catalin Marinas247055a2010-09-13 16:03:21 +01001044 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
Russell Kingd111e8f2006-09-27 15:27:33 +01001045 map.virtual = 0xffff0000;
1046 map.length = PAGE_SIZE;
1047 map.type = MT_HIGH_VECTORS;
1048 create_mapping(&map);
1049
1050 if (!vectors_high()) {
1051 map.virtual = 0;
1052 map.type = MT_LOW_VECTORS;
1053 create_mapping(&map);
1054 }
1055
1056 /*
1057 * Ask the machine support to map in the statically mapped devices.
1058 */
1059 if (mdesc->map_io)
1060 mdesc->map_io();
1061
1062 /*
1063 * Finally flush the caches and tlb to ensure that we're in a
1064 * consistent state wrt the writebuffer. This also ensures that
1065 * any write-allocated cache lines in the vector page are written
1066 * back. After this point, we can start to touch devices again.
1067 */
1068 local_flush_tlb_all();
1069 flush_cache_all();
1070}
1071
Nicolas Pitred73cd422008-09-15 16:44:55 -04001072static void __init kmap_init(void)
1073{
1074#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +01001075 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1076 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001077#endif
1078}
1079
Russell Kinga2227122010-03-25 18:56:05 +00001080static void __init map_lowmem(void)
1081{
Russell King8df65162010-10-27 19:57:38 +01001082 struct memblock_region *reg;
Russell Kinga2227122010-03-25 18:56:05 +00001083
1084 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001085 for_each_memblock(memory, reg) {
1086 phys_addr_t start = reg->base;
1087 phys_addr_t end = start + reg->size;
1088 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001089
Russell King8df65162010-10-27 19:57:38 +01001090 if (end > lowmem_limit)
1091 end = lowmem_limit;
1092 if (start >= end)
1093 break;
1094
1095 map.pfn = __phys_to_pfn(start);
1096 map.virtual = __phys_to_virt(start);
1097 map.length = end - start;
1098 map.type = MT_MEMORY;
1099
1100 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001101 }
1102}
1103
Russell Kingd111e8f2006-09-27 15:27:33 +01001104/*
1105 * paging_init() sets up the page tables, initialises the zone memory
1106 * maps, and sets up the zero page, bad page and bad page tables.
1107 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001108void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001109{
1110 void *zero_page;
1111
Russell King0371d3f2011-07-05 19:58:29 +01001112 memblock_set_current_limit(lowmem_limit);
1113
Russell Kingd111e8f2006-09-27 15:27:33 +01001114 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001115 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001116 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001117 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001118 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001119
1120 top_pmd = pmd_off_k(0xffff0000);
1121
Russell King3abe9d32010-03-25 17:02:59 +00001122 /* allocate the zero page. */
1123 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001124
Russell King8d717a52010-05-22 19:47:18 +01001125 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001126
Russell Kingd111e8f2006-09-27 15:27:33 +01001127 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001128 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001129}