blob: c61481577ae1437367e0e5c60dfbaf29e0a825f0 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King0ba8b9b2008-08-10 18:08:10 +010020#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000021#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050022#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010023#include <asm/setup.h>
24#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010025#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010026#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040027#include <asm/highmem.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010028#include <asm/traps.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010029
30#include <asm/mach/arch.h>
31#include <asm/mach/map.h>
32
33#include "mm.h"
34
Russell Kingd111e8f2006-09-27 15:27:33 +010035/*
36 * empty_zero_page is a special page that is used for
37 * zero-initialized data and COW.
38 */
39struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040040EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010041
42/*
43 * The pmd table for the upper-most set of pages.
44 */
45pmd_t *top_pmd;
46
Russell Kingae8f1542006-09-27 15:38:34 +010047#define CPOLICY_UNCACHED 0
48#define CPOLICY_BUFFERED 1
49#define CPOLICY_WRITETHROUGH 2
50#define CPOLICY_WRITEBACK 3
51#define CPOLICY_WRITEALLOC 4
52
53static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
54static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010055pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010056pgprot_t pgprot_kernel;
57
Imre_Deak44b18692007-02-11 13:45:13 +010058EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010059EXPORT_SYMBOL(pgprot_kernel);
60
61struct cachepolicy {
62 const char policy[16];
63 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010064 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000065 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010066};
67
68static struct cachepolicy cache_policies[] __initdata = {
69 {
70 .policy = "uncached",
71 .cr_mask = CR_W|CR_C,
72 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010073 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010074 }, {
75 .policy = "buffered",
76 .cr_mask = CR_C,
77 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010078 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010079 }, {
80 .policy = "writethrough",
81 .cr_mask = 0,
82 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010083 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010084 }, {
85 .policy = "writeback",
86 .cr_mask = 0,
87 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010088 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010089 }, {
90 .policy = "writealloc",
91 .cr_mask = 0,
92 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010093 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010094 }
95};
96
97/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +010098 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +010099 * problems by allowing the cache or the cache and
100 * writebuffer to be turned off. (Note: the write
101 * buffer should not be on and the cache off).
102 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100103static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100104{
105 int i;
106
107 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
108 int len = strlen(cache_policies[i].policy);
109
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100110 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100111 cachepolicy = i;
112 cr_alignment &= ~cache_policies[i].cr_mask;
113 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100114 break;
115 }
116 }
117 if (i == ARRAY_SIZE(cache_policies))
118 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000119 /*
120 * This restriction is partly to do with the way we boot; it is
121 * unpredictable to have memory mapped using two different sets of
122 * memory attributes (shared, type, and cache attribs). We can not
123 * change these attributes once the initial assembly has setup the
124 * page tables.
125 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100126 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
127 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
128 cachepolicy = CPOLICY_WRITEBACK;
129 }
Russell Kingae8f1542006-09-27 15:38:34 +0100130 flush_cache_all();
131 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100132 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100133}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100134early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100135
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100136static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100137{
138 char *p = "buffered";
139 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140 early_cachepolicy(p);
141 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100142}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100143early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100144
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100145static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100146{
147 char *p = "uncached";
148 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100149 early_cachepolicy(p);
150 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100151}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100152early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100153
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100154static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100155{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100156 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100157 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100158 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100159 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100160 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100161}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100162early_param("ecc", early_ecc);
Russell Kingae8f1542006-09-27 15:38:34 +0100163
164static int __init noalign_setup(char *__unused)
165{
166 cr_alignment &= ~CR_A;
167 cr_no_alignment &= ~CR_A;
168 set_cr(cr_alignment);
169 return 1;
170}
171__setup("noalign", noalign_setup);
172
Russell King255d1f82006-12-18 00:12:47 +0000173#ifndef CONFIG_SMP
174void adjust_cr(unsigned long mask, unsigned long set)
175{
176 unsigned long flags;
177
178 mask &= ~CR_A;
179
180 set &= mask;
181
182 local_irq_save(flags);
183
184 cr_no_alignment = (cr_no_alignment & ~mask) | set;
185 cr_alignment = (cr_alignment & ~mask) | set;
186
187 set_cr((get_cr() & ~mask) | set);
188
189 local_irq_restore(flags);
190}
191#endif
192
Russell King36bb94b2010-11-16 08:40:36 +0000193#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000194#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100195
Russell Kingb29e9f52007-04-21 10:47:29 +0100196static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100197 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100198 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
199 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100200 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000201 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100202 .domain = DOMAIN_IO,
203 },
204 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100205 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100206 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000207 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100208 .domain = DOMAIN_IO,
209 },
210 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100211 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100212 .prot_l1 = PMD_TYPE_TABLE,
213 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
214 .domain = DOMAIN_IO,
215 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100216 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100217 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100218 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000219 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100220 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100221 },
Russell Kingebb4c652008-11-09 11:18:36 +0000222 [MT_UNCACHED] = {
223 .prot_pte = PROT_PTE_DEVICE,
224 .prot_l1 = PMD_TYPE_TABLE,
225 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
226 .domain = DOMAIN_IO,
227 },
Russell Kingae8f1542006-09-27 15:38:34 +0100228 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100229 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100230 .domain = DOMAIN_KERNEL,
231 },
232 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100233 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100234 .domain = DOMAIN_KERNEL,
235 },
236 [MT_LOW_VECTORS] = {
237 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000238 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100239 .prot_l1 = PMD_TYPE_TABLE,
240 .domain = DOMAIN_USER,
241 },
242 [MT_HIGH_VECTORS] = {
243 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000244 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100245 .prot_l1 = PMD_TYPE_TABLE,
246 .domain = DOMAIN_USER,
247 },
248 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100250 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100251 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100252 .domain = DOMAIN_KERNEL,
253 },
254 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100255 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100256 .domain = DOMAIN_KERNEL,
257 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100258 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100259 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000260 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100261 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100262 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
263 .domain = DOMAIN_KERNEL,
264 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100265 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100266 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000267 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100268 .prot_l1 = PMD_TYPE_TABLE,
269 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
270 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100271 },
272 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000273 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100274 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100275 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100276 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700277 [MT_MEMORY_SO] = {
278 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
279 L_PTE_MT_UNCACHED,
280 .prot_l1 = PMD_TYPE_TABLE,
281 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
282 PMD_SECT_UNCACHED | PMD_SECT_XN,
283 .domain = DOMAIN_KERNEL,
284 },
Russell Kingae8f1542006-09-27 15:38:34 +0100285};
286
Russell Kingb29e9f52007-04-21 10:47:29 +0100287const struct mem_type *get_mem_type(unsigned int type)
288{
289 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
290}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200291EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100292
Russell Kingae8f1542006-09-27 15:38:34 +0100293/*
294 * Adjust the PMD section entries according to the CPU in use.
295 */
296static void __init build_mem_type_table(void)
297{
298 struct cachepolicy *cp;
299 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100300 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100301 int cpu_arch = cpu_architecture();
302 int i;
303
Catalin Marinas11179d82007-07-20 11:42:24 +0100304 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100305#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100306 if (cachepolicy > CPOLICY_BUFFERED)
307 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100308#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100309 if (cachepolicy > CPOLICY_WRITETHROUGH)
310 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100311#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100312 }
Russell Kingae8f1542006-09-27 15:38:34 +0100313 if (cpu_arch < CPU_ARCH_ARMv5) {
314 if (cachepolicy >= CPOLICY_WRITEALLOC)
315 cachepolicy = CPOLICY_WRITEBACK;
316 ecc_mask = 0;
317 }
Russell Kingf00ec482010-09-04 10:47:48 +0100318 if (is_smp())
319 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100320
321 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000322 * Strip out features not present on earlier architectures.
323 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
324 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100325 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000326 if (cpu_arch < CPU_ARCH_ARMv5)
327 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
328 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
329 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
330 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
331 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100332
333 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000334 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
335 * "update-able on write" bit on ARM610). However, Xscale and
336 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100337 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000338 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100339 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100340 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100341 mem_types[i].prot_l1 &= ~PMD_BIT4;
342 }
343 } else if (cpu_arch < CPU_ARCH_ARMv6) {
344 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100345 if (mem_types[i].prot_l1)
346 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100347 if (mem_types[i].prot_sect)
348 mem_types[i].prot_sect |= PMD_BIT4;
349 }
350 }
Russell Kingae8f1542006-09-27 15:38:34 +0100351
Russell Kingb1cce6b2008-11-04 10:52:28 +0000352 /*
353 * Mark the device areas according to the CPU/architecture.
354 */
355 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
356 if (!cpu_is_xsc3()) {
357 /*
358 * Mark device regions on ARMv6+ as execute-never
359 * to prevent speculative instruction fetches.
360 */
361 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
362 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
363 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
364 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
365 }
366 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
367 /*
368 * For ARMv7 with TEX remapping,
369 * - shared device is SXCB=1100
370 * - nonshared device is SXCB=0100
371 * - write combine device mem is SXCB=0001
372 * (Uncached Normal memory)
373 */
374 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
375 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
376 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
377 } else if (cpu_is_xsc3()) {
378 /*
379 * For Xscale3,
380 * - shared device is TEXCB=00101
381 * - nonshared device is TEXCB=01000
382 * - write combine device mem is TEXCB=00100
383 * (Inner/Outer Uncacheable in xsc3 parlance)
384 */
385 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
386 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
387 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
388 } else {
389 /*
390 * For ARMv6 and ARMv7 without TEX remapping,
391 * - shared device is TEXCB=00001
392 * - nonshared device is TEXCB=01000
393 * - write combine device mem is TEXCB=00100
394 * (Uncached Normal in ARMv6 parlance).
395 */
396 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
397 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
398 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
399 }
400 } else {
401 /*
402 * On others, write combining is "Uncached/Buffered"
403 */
404 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
405 }
406
407 /*
408 * Now deal with the memory-type mappings
409 */
Russell Kingae8f1542006-09-27 15:38:34 +0100410 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100411 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
412
Russell Kingbb30f362008-09-06 20:04:59 +0100413 /*
414 * Only use write-through for non-SMP systems
415 */
Russell Kingf00ec482010-09-04 10:47:48 +0100416 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100417 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100418
419 /*
420 * Enable CPU-specific coherency if supported.
421 * (Only available on XSC3 at the moment.)
422 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100423 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000424 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100425 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
426 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
427 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
428 }
Russell Kingae8f1542006-09-27 15:38:34 +0100429 /*
430 * ARMv6 and above have extended page tables.
431 */
432 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
433 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100434 * Mark cache clean areas and XIP ROM read only
435 * from SVC mode and no access from userspace.
436 */
437 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
438 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
439 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
440
Russell Kingf00ec482010-09-04 10:47:48 +0100441 if (is_smp()) {
442 /*
443 * Mark memory with the "shared" attribute
444 * for SMP systems
445 */
446 user_pgprot |= L_PTE_SHARED;
447 kern_pgprot |= L_PTE_SHARED;
448 vecs_pgprot |= L_PTE_SHARED;
449 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
450 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
451 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
452 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
453 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
454 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
455 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
456 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
457 }
Russell Kingae8f1542006-09-27 15:38:34 +0100458 }
459
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100460 /*
461 * Non-cacheable Normal - intended for memory areas that must
462 * not cause dirty cache line writebacks when used
463 */
464 if (cpu_arch >= CPU_ARCH_ARMv6) {
465 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
466 /* Non-cacheable Normal is XCB = 001 */
467 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
468 PMD_SECT_BUFFERED;
469 } else {
470 /* For both ARMv6 and non-TEX-remapping ARMv7 */
471 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
472 PMD_SECT_TEX(1);
473 }
474 } else {
475 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
476 }
477
Russell Kingae8f1542006-09-27 15:38:34 +0100478 for (i = 0; i < 16; i++) {
479 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100480 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100481 }
482
Russell Kingbb30f362008-09-06 20:04:59 +0100483 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
484 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100485
Imre_Deak44b18692007-02-11 13:45:13 +0100486 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100487 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000488 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100489
490 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
491 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
492 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100493 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
494 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100495 mem_types[MT_ROM].prot_sect |= cp->pmd;
496
497 switch (cp->pmd) {
498 case PMD_SECT_WT:
499 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
500 break;
501 case PMD_SECT_WB:
502 case PMD_SECT_WBWA:
503 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
504 break;
505 }
506 printk("Memory policy: ECC %sabled, Data cache %s\n",
507 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100508
509 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
510 struct mem_type *t = &mem_types[i];
511 if (t->prot_l1)
512 t->prot_l1 |= PMD_DOMAIN(t->domain);
513 if (t->prot_sect)
514 t->prot_sect |= PMD_DOMAIN(t->domain);
515 }
Russell Kingae8f1542006-09-27 15:38:34 +0100516}
517
Catalin Marinasd9073872010-09-13 16:01:24 +0100518#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
519pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
520 unsigned long size, pgprot_t vma_prot)
521{
522 if (!pfn_valid(pfn))
523 return pgprot_noncached(vma_prot);
524 else if (file->f_flags & O_SYNC)
525 return pgprot_writecombine(vma_prot);
526 return vma_prot;
527}
528EXPORT_SYMBOL(phys_mem_access_prot);
529#endif
530
Russell Kingae8f1542006-09-27 15:38:34 +0100531#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
532
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400533static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000534{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400535 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100536 memset(ptr, 0, sz);
537 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000538}
539
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400540static void __init *early_alloc(unsigned long sz)
541{
542 return early_alloc_aligned(sz, sz);
543}
544
Russell King4bb2e272010-07-01 18:33:29 +0100545static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
546{
547 if (pmd_none(*pmd)) {
Catalin Marinas410f1482011-02-14 12:58:04 +0100548 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
Russell King97092e02010-11-16 00:16:01 +0000549 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100550 }
551 BUG_ON(pmd_bad(*pmd));
552 return pte_offset_kernel(pmd, addr);
553}
554
Russell King24e6c692007-04-21 10:21:28 +0100555static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
556 unsigned long end, unsigned long pfn,
557 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100558{
Russell King4bb2e272010-07-01 18:33:29 +0100559 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100560 do {
Russell King40d192b2008-09-06 21:15:56 +0100561 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100562 pfn++;
563 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100564}
565
Russell King516295e2010-11-21 16:27:49 +0000566static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000567 unsigned long end, phys_addr_t phys,
Russell King24e6c692007-04-21 10:21:28 +0100568 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100569{
Russell King516295e2010-11-21 16:27:49 +0000570 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100571
Russell King24e6c692007-04-21 10:21:28 +0100572 /*
573 * Try a section mapping - end, addr and phys must all be aligned
574 * to a section boundary. Note that PMDs refer to the individual
575 * L1 entries, whereas PGDs refer to a group of L1 entries making
576 * up one logical pointer to an L2 table.
577 */
578 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
579 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100580
Russell King24e6c692007-04-21 10:21:28 +0100581 if (addr & SECTION_SIZE)
582 pmd++;
583
584 do {
585 *pmd = __pmd(phys | type->prot_sect);
586 phys += SECTION_SIZE;
587 } while (pmd++, addr += SECTION_SIZE, addr != end);
588
589 flush_pmd_entry(p);
590 } else {
591 /*
592 * No need to loop; pte's aren't interested in the
593 * individual L1 entries.
594 */
595 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100596 }
Russell Kingae8f1542006-09-27 15:38:34 +0100597}
598
Russell King516295e2010-11-21 16:27:49 +0000599static void alloc_init_pud(pgd_t *pgd, unsigned long addr, unsigned long end,
600 unsigned long phys, const struct mem_type *type)
601{
602 pud_t *pud = pud_offset(pgd, addr);
603 unsigned long next;
604
605 do {
606 next = pud_addr_end(addr, end);
607 alloc_init_section(pud, addr, next, phys, type);
608 phys += next - addr;
609 } while (pud++, addr = next, addr != end);
610}
611
Russell King4a56c1e2007-04-21 10:16:48 +0100612static void __init create_36bit_mapping(struct map_desc *md,
613 const struct mem_type *type)
614{
Russell King97092e02010-11-16 00:16:01 +0000615 unsigned long addr, length, end;
616 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100617 pgd_t *pgd;
618
619 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100620 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100621 length = PAGE_ALIGN(md->length);
622
623 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
624 printk(KERN_ERR "MM: CPU does not support supersection "
625 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100626 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100627 return;
628 }
629
630 /* N.B. ARMv6 supersections are only defined to work with domain 0.
631 * Since domain assignments can in fact be arbitrary, the
632 * 'domain == 0' check below is required to insure that ARMv6
633 * supersections are only allocated for domain 0 regardless
634 * of the actual domain assignments in use.
635 */
636 if (type->domain) {
637 printk(KERN_ERR "MM: invalid domain in supersection "
638 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100639 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100640 return;
641 }
642
643 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100644 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
645 " at 0x%08lx invalid alignment\n",
646 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100647 return;
648 }
649
650 /*
651 * Shift bits [35:32] of address into bits [23:20] of PMD
652 * (See ARMv6 spec).
653 */
654 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
655
656 pgd = pgd_offset_k(addr);
657 end = addr + length;
658 do {
Russell King516295e2010-11-21 16:27:49 +0000659 pud_t *pud = pud_offset(pgd, addr);
660 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100661 int i;
662
663 for (i = 0; i < 16; i++)
664 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
665
666 addr += SUPERSECTION_SIZE;
667 phys += SUPERSECTION_SIZE;
668 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
669 } while (addr != end);
670}
671
Russell Kingae8f1542006-09-27 15:38:34 +0100672/*
673 * Create the page directory entries and any necessary
674 * page tables for the mapping specified by `md'. We
675 * are able to cope here with varying sizes and address
676 * offsets, and we take full advantage of sections and
677 * supersections.
678 */
Russell Kinga2227122010-03-25 18:56:05 +0000679static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100680{
Will Deaconcae62922011-02-15 12:42:57 +0100681 unsigned long addr, length, end;
682 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100683 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100684 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100685
686 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100687 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
688 " at 0x%08lx in user region\n",
689 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100690 return;
691 }
692
693 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400694 md->virtual >= PAGE_OFFSET &&
695 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100696 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400697 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100698 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100699 }
700
Russell Kingd5c98172007-04-21 10:05:32 +0100701 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100702
703 /*
704 * Catch 36-bit addresses
705 */
Russell King4a56c1e2007-04-21 10:16:48 +0100706 if (md->pfn >= 0x100000) {
707 create_36bit_mapping(md, type);
708 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100709 }
710
Russell King7b9c7b42007-07-04 21:16:33 +0100711 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100712 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100713 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100714
Russell King24e6c692007-04-21 10:21:28 +0100715 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100716 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100717 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100718 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100719 return;
720 }
721
Russell King24e6c692007-04-21 10:21:28 +0100722 pgd = pgd_offset_k(addr);
723 end = addr + length;
724 do {
725 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100726
Russell King516295e2010-11-21 16:27:49 +0000727 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100728
Russell King24e6c692007-04-21 10:21:28 +0100729 phys += next - addr;
730 addr = next;
731 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100732}
733
734/*
735 * Create the architecture specific mappings
736 */
737void __init iotable_init(struct map_desc *io_desc, int nr)
738{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400739 struct map_desc *md;
740 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100741
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400742 if (!nr)
743 return;
744
745 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
746
747 for (md = io_desc; nr; md++, nr--) {
748 create_mapping(md);
749 vm->addr = (void *)(md->virtual & PAGE_MASK);
750 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
751 vm->phys_addr = __pfn_to_phys(md->pfn);
752 vm->flags = VM_IOREMAP;
753 vm->caller = iotable_init;
754 vm_area_add_early(vm++);
755 }
Russell Kingae8f1542006-09-27 15:38:34 +0100756}
757
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400758static void * __initdata vmalloc_min =
759 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100760
761/*
762 * vmalloc=size forces the vmalloc area to be exactly 'size'
763 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400764 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100765 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100766static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100767{
Russell King79612392010-05-22 16:20:14 +0100768 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100769
770 if (vmalloc_reserve < SZ_16M) {
771 vmalloc_reserve = SZ_16M;
772 printk(KERN_WARNING
773 "vmalloc area too small, limiting to %luMB\n",
774 vmalloc_reserve >> 20);
775 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400776
777 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
778 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
779 printk(KERN_WARNING
780 "vmalloc area is too big, limiting to %luMB\n",
781 vmalloc_reserve >> 20);
782 }
Russell King79612392010-05-22 16:20:14 +0100783
784 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100785 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100786}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100787early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100788
Russell King8df65162010-10-27 19:57:38 +0100789static phys_addr_t lowmem_limit __initdata = 0;
790
Russell King0371d3f2011-07-05 19:58:29 +0100791void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200792{
Russell Kingdde58282009-08-15 12:36:00 +0100793 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200794
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400795 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400796 struct membank *bank = &meminfo.bank[j];
797 *bank = meminfo.bank[i];
798
799#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100800 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100801 __va(bank->start) < (void *)PAGE_OFFSET)
802 highmem = 1;
803
804 bank->highmem = highmem;
805
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400806 /*
807 * Split those memory banks which are partially overlapping
808 * the vmalloc area greatly simplifying things later.
809 */
Russell King79612392010-05-22 16:20:14 +0100810 if (__va(bank->start) < vmalloc_min &&
811 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400812 if (meminfo.nr_banks >= NR_BANKS) {
813 printk(KERN_CRIT "NR_BANKS too low, "
814 "ignoring high memory\n");
815 } else {
816 memmove(bank + 1, bank,
817 (meminfo.nr_banks - i) * sizeof(*bank));
818 meminfo.nr_banks++;
819 i++;
Russell King79612392010-05-22 16:20:14 +0100820 bank[1].size -= vmalloc_min - __va(bank->start);
821 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +0100822 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400823 j++;
824 }
Russell King79612392010-05-22 16:20:14 +0100825 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400826 }
827#else
Russell King041d7852009-09-27 17:40:42 +0100828 bank->highmem = highmem;
829
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400830 /*
831 * Check whether this memory bank would entirely overlap
832 * the vmalloc area.
833 */
Russell King79612392010-05-22 16:20:14 +0100834 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f92009-03-28 19:18:05 +0100835 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +0000836 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400837 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +0000838 (unsigned long long)bank->start,
839 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400840 continue;
841 }
842
843 /*
844 * Check whether this memory bank would partially overlap
845 * the vmalloc area.
846 */
Russell King79612392010-05-22 16:20:14 +0100847 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400848 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +0100849 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +0000850 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
851 "to -%.8llx (vmalloc region overlap).\n",
852 (unsigned long long)bank->start,
853 (unsigned long long)bank->start + bank->size - 1,
854 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400855 bank->size = newsize;
856 }
857#endif
Will Deacon40f7bfe2011-05-19 13:22:48 +0100858 if (!bank->highmem && bank->start + bank->size > lowmem_limit)
859 lowmem_limit = bank->start + bank->size;
860
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400861 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200862 }
Russell Kinge616c592009-09-27 20:55:43 +0100863#ifdef CONFIG_HIGHMEM
864 if (highmem) {
865 const char *reason = NULL;
866
867 if (cache_is_vipt_aliasing()) {
868 /*
869 * Interactions between kmap and other mappings
870 * make highmem support with aliasing VIPT caches
871 * rather difficult.
872 */
873 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +0100874 }
875 if (reason) {
876 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
877 reason);
878 while (j > 0 && meminfo.bank[j - 1].highmem)
879 j--;
880 }
881 }
882#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400883 meminfo.nr_banks = j;
Nicolas Pitre55a81732011-09-18 22:40:00 -0400884 high_memory = __va(lowmem_limit - 1) + 1;
Will Deacon40f7bfe2011-05-19 13:22:48 +0100885 memblock_set_current_limit(lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200886}
887
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400888static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100889{
890 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +0100891 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +0100892
893 /*
894 * Clear out all the mappings below the kernel image.
895 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100896 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100897 pmd_clear(pmd_off_k(addr));
898
899#ifdef CONFIG_XIP_KERNEL
900 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +0100901 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100902#endif
Catalin Marinase73fc882011-08-23 14:07:23 +0100903 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100904 pmd_clear(pmd_off_k(addr));
905
906 /*
Russell King8df65162010-10-27 19:57:38 +0100907 * Find the end of the first block of lowmem.
908 */
909 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
910 if (end >= lowmem_limit)
911 end = lowmem_limit;
912
913 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100914 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400915 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +0100916 */
Russell King8df65162010-10-27 19:57:38 +0100917 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400918 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100919 pmd_clear(pmd_off_k(addr));
920}
921
Catalin Marinase73fc882011-08-23 14:07:23 +0100922#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
923
Russell Kingd111e8f2006-09-27 15:27:33 +0100924/*
Russell King2778f622010-07-09 16:27:52 +0100925 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +0100926 */
Russell King2778f622010-07-09 16:27:52 +0100927void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100928{
Russell Kingd111e8f2006-09-27 15:27:33 +0100929 /*
Russell Kingd111e8f2006-09-27 15:27:33 +0100930 * Reserve the page tables. These are already in use,
931 * and can only be in node 0.
932 */
Catalin Marinase73fc882011-08-23 14:07:23 +0100933 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100934
Russell Kingd111e8f2006-09-27 15:27:33 +0100935#ifdef CONFIG_SA1111
936 /*
937 * Because of the SA1111 DMA bug, we want to preserve our
938 * precious DMA-able memory...
939 */
Russell King2778f622010-07-09 16:27:52 +0100940 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +0100941#endif
Russell Kingd111e8f2006-09-27 15:27:33 +0100942}
943
944/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400945 * Set up the device mappings. Since we clear out the page tables for all
946 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +0100947 * This means you have to be careful how you debug this function, or any
948 * called function. This means you can't use any function or debugging
949 * method which may touch any device, otherwise the kernel _will_ crash.
950 */
951static void __init devicemaps_init(struct machine_desc *mdesc)
952{
953 struct map_desc map;
954 unsigned long addr;
Russell Kingd111e8f2006-09-27 15:27:33 +0100955
956 /*
957 * Allocate the vector page early.
958 */
Catalin Marinas247055a2010-09-13 16:03:21 +0100959 vectors_page = early_alloc(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100960
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400961 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100962 pmd_clear(pmd_off_k(addr));
963
964 /*
965 * Map the kernel if it is XIP.
966 * It is always first in the modulearea.
967 */
968#ifdef CONFIG_XIP_KERNEL
969 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +0000970 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +0000971 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100972 map.type = MT_ROM;
973 create_mapping(&map);
974#endif
975
976 /*
977 * Map the cache flushing regions.
978 */
979#ifdef FLUSH_BASE
980 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
981 map.virtual = FLUSH_BASE;
982 map.length = SZ_1M;
983 map.type = MT_CACHECLEAN;
984 create_mapping(&map);
985#endif
986#ifdef FLUSH_BASE_MINICACHE
987 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
988 map.virtual = FLUSH_BASE_MINICACHE;
989 map.length = SZ_1M;
990 map.type = MT_MINICLEAN;
991 create_mapping(&map);
992#endif
993
994 /*
995 * Create a mapping for the machine vectors at the high-vectors
996 * location (0xffff0000). If we aren't using high-vectors, also
997 * create a mapping at the low-vectors virtual address.
998 */
Catalin Marinas247055a2010-09-13 16:03:21 +0100999 map.pfn = __phys_to_pfn(virt_to_phys(vectors_page));
Russell Kingd111e8f2006-09-27 15:27:33 +01001000 map.virtual = 0xffff0000;
1001 map.length = PAGE_SIZE;
1002 map.type = MT_HIGH_VECTORS;
1003 create_mapping(&map);
1004
1005 if (!vectors_high()) {
1006 map.virtual = 0;
1007 map.type = MT_LOW_VECTORS;
1008 create_mapping(&map);
1009 }
1010
1011 /*
1012 * Ask the machine support to map in the statically mapped devices.
1013 */
1014 if (mdesc->map_io)
1015 mdesc->map_io();
1016
1017 /*
1018 * Finally flush the caches and tlb to ensure that we're in a
1019 * consistent state wrt the writebuffer. This also ensures that
1020 * any write-allocated cache lines in the vector page are written
1021 * back. After this point, we can start to touch devices again.
1022 */
1023 local_flush_tlb_all();
1024 flush_cache_all();
1025}
1026
Nicolas Pitred73cd422008-09-15 16:44:55 -04001027static void __init kmap_init(void)
1028{
1029#ifdef CONFIG_HIGHMEM
Russell King4bb2e272010-07-01 18:33:29 +01001030 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
1031 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001032#endif
1033}
1034
Russell Kinga2227122010-03-25 18:56:05 +00001035static void __init map_lowmem(void)
1036{
Russell King8df65162010-10-27 19:57:38 +01001037 struct memblock_region *reg;
Russell Kinga2227122010-03-25 18:56:05 +00001038
1039 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001040 for_each_memblock(memory, reg) {
1041 phys_addr_t start = reg->base;
1042 phys_addr_t end = start + reg->size;
1043 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001044
Russell King8df65162010-10-27 19:57:38 +01001045 if (end > lowmem_limit)
1046 end = lowmem_limit;
1047 if (start >= end)
1048 break;
1049
1050 map.pfn = __phys_to_pfn(start);
1051 map.virtual = __phys_to_virt(start);
1052 map.length = end - start;
1053 map.type = MT_MEMORY;
1054
1055 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001056 }
1057}
1058
Russell Kingd111e8f2006-09-27 15:27:33 +01001059/*
1060 * paging_init() sets up the page tables, initialises the zone memory
1061 * maps, and sets up the zero page, bad page and bad page tables.
1062 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001063void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001064{
1065 void *zero_page;
1066
Russell King0371d3f2011-07-05 19:58:29 +01001067 memblock_set_current_limit(lowmem_limit);
1068
Russell Kingd111e8f2006-09-27 15:27:33 +01001069 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001070 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001071 map_lowmem();
Russell Kingd111e8f2006-09-27 15:27:33 +01001072 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001073 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001074
1075 top_pmd = pmd_off_k(0xffff0000);
1076
Russell King3abe9d32010-03-25 17:02:59 +00001077 /* allocate the zero page. */
1078 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001079
Russell King8d717a52010-05-22 19:47:18 +01001080 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001081
Russell Kingd111e8f2006-09-27 15:27:33 +01001082 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001083 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001084}