blob: 1cb6cbaf26aad2a32b715967eb428e442da7378a [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King15d07dc2012-03-28 18:30:01 +010020#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010021#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000022#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050023#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010024#include <asm/setup.h>
25#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Neil Leederf06ab972011-10-25 17:57:26 -040031#include <asm/mmu_writeable.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010032
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35
Greg Reidcf105492012-10-12 12:14:12 -040036#include <asm/user_accessible_timer.h>
37
Russell Kingd111e8f2006-09-27 15:27:33 +010038#include "mm.h"
39
Russell Kingd111e8f2006-09-27 15:27:33 +010040/*
41 * empty_zero_page is a special page that is used for
42 * zero-initialized data and COW.
43 */
44struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040045EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010046
47/*
48 * The pmd table for the upper-most set of pages.
49 */
50pmd_t *top_pmd;
51
Russell Kingae8f1542006-09-27 15:38:34 +010052#define CPOLICY_UNCACHED 0
53#define CPOLICY_BUFFERED 1
54#define CPOLICY_WRITETHROUGH 2
55#define CPOLICY_WRITEBACK 3
56#define CPOLICY_WRITEALLOC 4
57
Neil Leederf06ab972011-10-25 17:57:26 -040058#define RX_AREA_START _text
59#define RX_AREA_END __start_rodata
60
Russell Kingae8f1542006-09-27 15:38:34 +010061static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
62static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010063pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010064pgprot_t pgprot_kernel;
65
Imre_Deak44b18692007-02-11 13:45:13 +010066EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010067EXPORT_SYMBOL(pgprot_kernel);
68
69struct cachepolicy {
70 const char policy[16];
71 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010072 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000073 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010074};
75
76static struct cachepolicy cache_policies[] __initdata = {
77 {
78 .policy = "uncached",
79 .cr_mask = CR_W|CR_C,
80 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010081 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010082 }, {
83 .policy = "buffered",
84 .cr_mask = CR_C,
85 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010086 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010087 }, {
88 .policy = "writethrough",
89 .cr_mask = 0,
90 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010091 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010092 }, {
93 .policy = "writeback",
94 .cr_mask = 0,
95 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010096 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010097 }, {
98 .policy = "writealloc",
99 .cr_mask = 0,
100 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +0100101 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +0100102 }
103};
104
105/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100106 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100107 * problems by allowing the cache or the cache and
108 * writebuffer to be turned off. (Note: the write
109 * buffer should not be on and the cache off).
110 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100111static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100112{
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
116 int len = strlen(cache_policies[i].policy);
117
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100118 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100119 cachepolicy = i;
120 cr_alignment &= ~cache_policies[i].cr_mask;
121 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100122 break;
123 }
124 }
125 if (i == ARRAY_SIZE(cache_policies))
126 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000127 /*
128 * This restriction is partly to do with the way we boot; it is
129 * unpredictable to have memory mapped using two different sets of
130 * memory attributes (shared, type, and cache attribs). We can not
131 * change these attributes once the initial assembly has setup the
132 * page tables.
133 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100134 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
135 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
136 cachepolicy = CPOLICY_WRITEBACK;
137 }
Russell Kingae8f1542006-09-27 15:38:34 +0100138 flush_cache_all();
139 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100141}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100143
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100145{
146 char *p = "buffered";
147 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100148 early_cachepolicy(p);
149 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100150}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100152
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100154{
155 char *p = "uncached";
156 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 early_cachepolicy(p);
158 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100159}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100160early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100161
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000162#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100164{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100166 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100167 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100168 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100169 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100170}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100171early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000172#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100173
174static int __init noalign_setup(char *__unused)
175{
176 cr_alignment &= ~CR_A;
177 cr_no_alignment &= ~CR_A;
178 set_cr(cr_alignment);
179 return 1;
180}
181__setup("noalign", noalign_setup);
182
Russell King255d1f82006-12-18 00:12:47 +0000183#ifndef CONFIG_SMP
184void adjust_cr(unsigned long mask, unsigned long set)
185{
186 unsigned long flags;
187
188 mask &= ~CR_A;
189
190 set &= mask;
191
192 local_irq_save(flags);
193
194 cr_no_alignment = (cr_no_alignment & ~mask) | set;
195 cr_alignment = (cr_alignment & ~mask) | set;
196
197 set_cr((get_cr() & ~mask) | set);
198
199 local_irq_restore(flags);
200}
201#endif
202
Russell King36bb94b2010-11-16 08:40:36 +0000203#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000204#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100205
Russell Kingb29e9f52007-04-21 10:47:29 +0100206static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100207 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100208 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
209 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100210 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000211 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100212 .domain = DOMAIN_IO,
213 },
214 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100215 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100216 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000217 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100218 .domain = DOMAIN_IO,
219 },
220 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100222 .prot_l1 = PMD_TYPE_TABLE,
223 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
224 .domain = DOMAIN_IO,
225 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100226 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100227 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100228 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000229 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100230 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100231 },
Russell Kingebb4c652008-11-09 11:18:36 +0000232 [MT_UNCACHED] = {
233 .prot_pte = PROT_PTE_DEVICE,
234 .prot_l1 = PMD_TYPE_TABLE,
235 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
236 .domain = DOMAIN_IO,
237 },
Russell Kingae8f1542006-09-27 15:38:34 +0100238 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100240 .domain = DOMAIN_KERNEL,
241 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000242#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100243 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100244 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100245 .domain = DOMAIN_KERNEL,
246 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000247#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100248 [MT_LOW_VECTORS] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000250 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .prot_l1 = PMD_TYPE_TABLE,
252 .domain = DOMAIN_USER,
253 },
254 [MT_HIGH_VECTORS] = {
255 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000256 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100257 .prot_l1 = PMD_TYPE_TABLE,
258 .domain = DOMAIN_USER,
259 },
260 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100262 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100264 .domain = DOMAIN_KERNEL,
265 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 [MT_MEMORY_R] = {
267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
268 .domain = DOMAIN_KERNEL,
269 },
270 [MT_MEMORY_RW] = {
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL,
273 },
274 [MT_MEMORY_RX] = {
275 .prot_sect = PMD_TYPE_SECT,
276 .domain = DOMAIN_KERNEL,
277 },
Russell Kingae8f1542006-09-27 15:38:34 +0100278 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100279 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100280 .domain = DOMAIN_KERNEL,
281 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100282 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100283 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000284 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100285 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
287 .domain = DOMAIN_KERNEL,
288 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100289 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000291 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100292 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
294 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100295 },
296 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100298 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100299 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100300 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700301 [MT_MEMORY_SO] = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
303 L_PTE_MT_UNCACHED,
304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
306 PMD_SECT_UNCACHED | PMD_SECT_XN,
307 .domain = DOMAIN_KERNEL,
308 },
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100309 [MT_MEMORY_DMA_READY] = {
310 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
311 .prot_l1 = PMD_TYPE_TABLE,
312 .domain = DOMAIN_KERNEL,
313 },
Greg Reidcf105492012-10-12 12:14:12 -0400314 [MT_DEVICE_USER_ACCESSIBLE] = {
315 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
316 L_PTE_SHARED | L_PTE_USER | L_PTE_RDONLY,
317 .prot_l1 = PMD_TYPE_TABLE,
318 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
319 .domain = DOMAIN_IO,
320 },
Russell Kingae8f1542006-09-27 15:38:34 +0100321};
322
Russell Kingb29e9f52007-04-21 10:47:29 +0100323const struct mem_type *get_mem_type(unsigned int type)
324{
325 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
326}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200327EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100328
Russell Kingae8f1542006-09-27 15:38:34 +0100329/*
330 * Adjust the PMD section entries according to the CPU in use.
331 */
332static void __init build_mem_type_table(void)
333{
334 struct cachepolicy *cp;
335 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100336 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100337 int cpu_arch = cpu_architecture();
338 int i;
339
Catalin Marinas11179d82007-07-20 11:42:24 +0100340 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100341#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100342 if (cachepolicy > CPOLICY_BUFFERED)
343 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100344#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100345 if (cachepolicy > CPOLICY_WRITETHROUGH)
346 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100347#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100348 }
Russell Kingae8f1542006-09-27 15:38:34 +0100349 if (cpu_arch < CPU_ARCH_ARMv5) {
350 if (cachepolicy >= CPOLICY_WRITEALLOC)
351 cachepolicy = CPOLICY_WRITEBACK;
352 ecc_mask = 0;
353 }
Russell Kingf00ec482010-09-04 10:47:48 +0100354 if (is_smp())
355 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100356
357 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000358 * Strip out features not present on earlier architectures.
359 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
360 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100361 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000362 if (cpu_arch < CPU_ARCH_ARMv5)
363 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
364 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
365 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
366 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
367 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100368
369 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000370 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
371 * "update-able on write" bit on ARM610). However, Xscale and
372 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100373 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000374 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100375 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100376 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100377 mem_types[i].prot_l1 &= ~PMD_BIT4;
378 }
379 } else if (cpu_arch < CPU_ARCH_ARMv6) {
380 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100381 if (mem_types[i].prot_l1)
382 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100383 if (mem_types[i].prot_sect)
384 mem_types[i].prot_sect |= PMD_BIT4;
385 }
386 }
Russell Kingae8f1542006-09-27 15:38:34 +0100387
Russell Kingb1cce6b2008-11-04 10:52:28 +0000388 /*
389 * Mark the device areas according to the CPU/architecture.
390 */
391 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
392 if (!cpu_is_xsc3()) {
393 /*
394 * Mark device regions on ARMv6+ as execute-never
395 * to prevent speculative instruction fetches.
396 */
397 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
398 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
399 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
400 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
401 }
402 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
403 /*
404 * For ARMv7 with TEX remapping,
405 * - shared device is SXCB=1100
406 * - nonshared device is SXCB=0100
407 * - write combine device mem is SXCB=0001
408 * (Uncached Normal memory)
409 */
410 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
411 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
412 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
413 } else if (cpu_is_xsc3()) {
414 /*
415 * For Xscale3,
416 * - shared device is TEXCB=00101
417 * - nonshared device is TEXCB=01000
418 * - write combine device mem is TEXCB=00100
419 * (Inner/Outer Uncacheable in xsc3 parlance)
420 */
421 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
422 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
423 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
424 } else {
425 /*
426 * For ARMv6 and ARMv7 without TEX remapping,
427 * - shared device is TEXCB=00001
428 * - nonshared device is TEXCB=01000
429 * - write combine device mem is TEXCB=00100
430 * (Uncached Normal in ARMv6 parlance).
431 */
432 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
433 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
434 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
435 }
436 } else {
437 /*
438 * On others, write combining is "Uncached/Buffered"
439 */
440 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
441 }
442
443 /*
444 * Now deal with the memory-type mappings
445 */
Russell Kingae8f1542006-09-27 15:38:34 +0100446 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100447 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
448
Russell Kingbb30f362008-09-06 20:04:59 +0100449 /*
450 * Only use write-through for non-SMP systems
451 */
Russell Kingf00ec482010-09-04 10:47:48 +0100452 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100453 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100454
455 /*
456 * Enable CPU-specific coherency if supported.
457 * (Only available on XSC3 at the moment.)
458 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100459 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000460 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100461 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100462 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100463 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
464 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
465 }
Russell Kingae8f1542006-09-27 15:38:34 +0100466 /*
467 * ARMv6 and above have extended page tables.
468 */
469 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000470#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100471 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100472 * Mark cache clean areas and XIP ROM read only
473 * from SVC mode and no access from userspace.
474 */
475 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700476 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
477 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Russell Kingae8f1542006-09-27 15:38:34 +0100478 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
479 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000480#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100481
Russell Kingf00ec482010-09-04 10:47:48 +0100482 if (is_smp()) {
483 /*
484 * Mark memory with the "shared" attribute
485 * for SMP systems
486 */
487 user_pgprot |= L_PTE_SHARED;
488 kern_pgprot |= L_PTE_SHARED;
489 vecs_pgprot |= L_PTE_SHARED;
490 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
491 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
492 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
493 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
494 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
495 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100496 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100497 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700498 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
499 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
500 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
Russell Kingf00ec482010-09-04 10:47:48 +0100501 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
502 }
Russell Kingae8f1542006-09-27 15:38:34 +0100503 }
504
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100505 /*
506 * Non-cacheable Normal - intended for memory areas that must
507 * not cause dirty cache line writebacks when used
508 */
509 if (cpu_arch >= CPU_ARCH_ARMv6) {
510 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
511 /* Non-cacheable Normal is XCB = 001 */
512 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
513 PMD_SECT_BUFFERED;
514 } else {
515 /* For both ARMv6 and non-TEX-remapping ARMv7 */
516 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
517 PMD_SECT_TEX(1);
518 }
519 } else {
520 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
521 }
522
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000523#ifdef CONFIG_ARM_LPAE
524 /*
525 * Do not generate access flag faults for the kernel mappings.
526 */
527 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
528 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100529 if (mem_types[i].prot_sect)
530 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000531 }
532 kern_pgprot |= PTE_EXT_AF;
533 vecs_pgprot |= PTE_EXT_AF;
534#endif
535
Russell Kingae8f1542006-09-27 15:38:34 +0100536 for (i = 0; i < 16; i++) {
537 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100538 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100539 }
540
Russell Kingbb30f362008-09-06 20:04:59 +0100541 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
542 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100543
Imre_Deak44b18692007-02-11 13:45:13 +0100544 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100545 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000546 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100547
548 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
549 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
550 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100551 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100552 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100553 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700554 mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
555 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
556 mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100557 mem_types[MT_ROM].prot_sect |= cp->pmd;
558
559 switch (cp->pmd) {
560 case PMD_SECT_WT:
561 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
562 break;
563 case PMD_SECT_WB:
564 case PMD_SECT_WBWA:
565 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
566 break;
567 }
568 printk("Memory policy: ECC %sabled, Data cache %s\n",
569 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100570
571 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
572 struct mem_type *t = &mem_types[i];
573 if (t->prot_l1)
574 t->prot_l1 |= PMD_DOMAIN(t->domain);
575 if (t->prot_sect)
576 t->prot_sect |= PMD_DOMAIN(t->domain);
577 }
Russell Kingae8f1542006-09-27 15:38:34 +0100578}
579
Catalin Marinasd9073872010-09-13 16:01:24 +0100580#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
581pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
582 unsigned long size, pgprot_t vma_prot)
583{
584 if (!pfn_valid(pfn))
585 return pgprot_noncached(vma_prot);
586 else if (file->f_flags & O_SYNC)
587 return pgprot_writecombine(vma_prot);
588 return vma_prot;
589}
590EXPORT_SYMBOL(phys_mem_access_prot);
591#endif
592
Russell Kingae8f1542006-09-27 15:38:34 +0100593#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
594
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400595static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000596{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400597 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100598 memset(ptr, 0, sz);
599 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000600}
601
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400602static void __init *early_alloc(unsigned long sz)
603{
604 return early_alloc_aligned(sz, sz);
605}
606
Colin Crosse5e483d2011-08-11 17:15:24 -0700607static pte_t * __init early_pte_alloc(pmd_t *pmd)
608{
609 if (pmd_none(*pmd) || pmd_bad(*pmd))
610 return early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
611 return pmd_page_vaddr(*pmd);
612}
613
614static void __init early_pte_install(pmd_t *pmd, pte_t *pte, unsigned long prot)
615{
616 __pmd_populate(pmd, __pa(pte), prot);
617 BUG_ON(pmd_bad(*pmd));
618}
619
Steve Mucklef132c6c2012-06-06 18:30:57 -0700620#ifdef CONFIG_HIGHMEM
Colin Crosse5e483d2011-08-11 17:15:24 -0700621static pte_t * __init early_pte_alloc_and_install(pmd_t *pmd,
622 unsigned long addr, unsigned long prot)
Russell King4bb2e272010-07-01 18:33:29 +0100623{
624 if (pmd_none(*pmd)) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700625 pte_t *pte = early_pte_alloc(pmd);
626 early_pte_install(pmd, pte, prot);
Russell King4bb2e272010-07-01 18:33:29 +0100627 }
628 BUG_ON(pmd_bad(*pmd));
629 return pte_offset_kernel(pmd, addr);
630}
Steve Mucklef132c6c2012-06-06 18:30:57 -0700631#endif
Russell King4bb2e272010-07-01 18:33:29 +0100632
Russell King24e6c692007-04-21 10:21:28 +0100633static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
634 unsigned long end, unsigned long pfn,
635 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100636{
Colin Crosse5e483d2011-08-11 17:15:24 -0700637 pte_t *start_pte = early_pte_alloc(pmd);
638 pte_t *pte = start_pte + pte_index(addr);
639
640 /* If replacing a section mapping, the whole section must be replaced */
641 BUG_ON(pmd_bad(*pmd) && ((addr | end) & ~PMD_MASK));
642
Russell King24e6c692007-04-21 10:21:28 +0100643 do {
Russell King40d192b2008-09-06 21:15:56 +0100644 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100645 pfn++;
646 } while (pte++, addr += PAGE_SIZE, addr != end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700647 early_pte_install(pmd, start_pte, type->prot_l1);
Russell Kingae8f1542006-09-27 15:38:34 +0100648}
649
Russell King516295e2010-11-21 16:27:49 +0000650static void __init alloc_init_section(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000651 unsigned long end, phys_addr_t phys,
Colin Crosse5e483d2011-08-11 17:15:24 -0700652 const struct mem_type *type,
653 bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100654{
Russell King516295e2010-11-21 16:27:49 +0000655 pmd_t *pmd = pmd_offset(pud, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100656
Russell King24e6c692007-04-21 10:21:28 +0100657 /*
658 * Try a section mapping - end, addr and phys must all be aligned
659 * to a section boundary. Note that PMDs refer to the individual
660 * L1 entries, whereas PGDs refer to a group of L1 entries making
661 * up one logical pointer to an L2 table.
662 */
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100663 if (type->prot_sect && ((addr | end | phys) & ~SECTION_MASK) == 0 && !force_pages) {
Russell King24e6c692007-04-21 10:21:28 +0100664 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100665
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000666#ifndef CONFIG_ARM_LPAE
Russell King24e6c692007-04-21 10:21:28 +0100667 if (addr & SECTION_SIZE)
668 pmd++;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000669#endif
Russell King24e6c692007-04-21 10:21:28 +0100670
671 do {
672 *pmd = __pmd(phys | type->prot_sect);
673 phys += SECTION_SIZE;
674 } while (pmd++, addr += SECTION_SIZE, addr != end);
675
676 flush_pmd_entry(p);
677 } else {
678 /*
679 * No need to loop; pte's aren't interested in the
680 * individual L1 entries.
681 */
682 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100683 }
Russell Kingae8f1542006-09-27 15:38:34 +0100684}
685
Stephen Boyd14904922012-04-27 01:40:10 +0100686static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Colin Crossf02fac62012-05-07 18:20:34 -0700687 unsigned long end, unsigned long phys, const struct mem_type *type,
688 bool force_pages)
Russell King516295e2010-11-21 16:27:49 +0000689{
690 pud_t *pud = pud_offset(pgd, addr);
691 unsigned long next;
692
693 do {
694 next = pud_addr_end(addr, end);
Colin Crosse5e483d2011-08-11 17:15:24 -0700695 alloc_init_section(pud, addr, next, phys, type, force_pages);
Russell King516295e2010-11-21 16:27:49 +0000696 phys += next - addr;
697 } while (pud++, addr = next, addr != end);
698}
699
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000700#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100701static void __init create_36bit_mapping(struct map_desc *md,
702 const struct mem_type *type)
703{
Russell King97092e02010-11-16 00:16:01 +0000704 unsigned long addr, length, end;
705 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100706 pgd_t *pgd;
707
708 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100709 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100710 length = PAGE_ALIGN(md->length);
711
712 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
713 printk(KERN_ERR "MM: CPU does not support supersection "
714 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100715 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100716 return;
717 }
718
719 /* N.B. ARMv6 supersections are only defined to work with domain 0.
720 * Since domain assignments can in fact be arbitrary, the
721 * 'domain == 0' check below is required to insure that ARMv6
722 * supersections are only allocated for domain 0 regardless
723 * of the actual domain assignments in use.
724 */
725 if (type->domain) {
726 printk(KERN_ERR "MM: invalid domain in supersection "
727 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100728 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100729 return;
730 }
731
732 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100733 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
734 " at 0x%08lx invalid alignment\n",
735 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100736 return;
737 }
738
739 /*
740 * Shift bits [35:32] of address into bits [23:20] of PMD
741 * (See ARMv6 spec).
742 */
743 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
744
745 pgd = pgd_offset_k(addr);
746 end = addr + length;
747 do {
Russell King516295e2010-11-21 16:27:49 +0000748 pud_t *pud = pud_offset(pgd, addr);
749 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100750 int i;
751
752 for (i = 0; i < 16; i++)
753 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
754
755 addr += SUPERSECTION_SIZE;
756 phys += SUPERSECTION_SIZE;
757 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
758 } while (addr != end);
759}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000760#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100761
Russell Kingae8f1542006-09-27 15:38:34 +0100762/*
763 * Create the page directory entries and any necessary
764 * page tables for the mapping specified by `md'. We
765 * are able to cope here with varying sizes and address
766 * offsets, and we take full advantage of sections and
767 * supersections.
768 */
Colin Crosse5e483d2011-08-11 17:15:24 -0700769static void __init create_mapping(struct map_desc *md, bool force_pages)
Russell Kingae8f1542006-09-27 15:38:34 +0100770{
Will Deaconcae62922011-02-15 12:42:57 +0100771 unsigned long addr, length, end;
772 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100773 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100774 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100775
Greg Reidcf105492012-10-12 12:14:12 -0400776 if ((md->virtual != vectors_base() &&
777 md->virtual != get_user_accessible_timers_base()) &&
778 md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100779 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
780 " at 0x%08lx in user region\n",
781 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100782 return;
783 }
784
785 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400786 md->virtual >= PAGE_OFFSET &&
787 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100788 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400789 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100790 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100791 }
792
Russell Kingd5c98172007-04-21 10:05:32 +0100793 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100794
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000795#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100796 /*
797 * Catch 36-bit addresses
798 */
Russell King4a56c1e2007-04-21 10:16:48 +0100799 if (md->pfn >= 0x100000) {
800 create_36bit_mapping(md, type);
801 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100802 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000803#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100804
Russell King7b9c7b42007-07-04 21:16:33 +0100805 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100806 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100807 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100808
Russell King24e6c692007-04-21 10:21:28 +0100809 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100810 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100811 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100812 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100813 return;
814 }
815
Russell King24e6c692007-04-21 10:21:28 +0100816 pgd = pgd_offset_k(addr);
817 end = addr + length;
818 do {
819 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100820
Colin Crosse5e483d2011-08-11 17:15:24 -0700821 alloc_init_pud(pgd, addr, next, phys, type, force_pages);
Russell Kingae8f1542006-09-27 15:38:34 +0100822
Russell King24e6c692007-04-21 10:21:28 +0100823 phys += next - addr;
824 addr = next;
825 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100826}
827
828/*
829 * Create the architecture specific mappings
830 */
831void __init iotable_init(struct map_desc *io_desc, int nr)
832{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400833 struct map_desc *md;
834 struct vm_struct *vm;
Russell Kingae8f1542006-09-27 15:38:34 +0100835
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400836 if (!nr)
837 return;
838
839 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
840
841 for (md = io_desc; nr; md++, nr--) {
Colin Crosse5e483d2011-08-11 17:15:24 -0700842 create_mapping(md, false);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400843 vm->addr = (void *)(md->virtual & PAGE_MASK);
844 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
845 vm->phys_addr = __pfn_to_phys(md->pfn);
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400846 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
847 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400848 vm->caller = iotable_init;
849 vm_area_add_early(vm++);
850 }
Russell Kingae8f1542006-09-27 15:38:34 +0100851}
852
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100853#ifndef CONFIG_ARM_LPAE
854
855/*
856 * The Linux PMD is made of two consecutive section entries covering 2MB
857 * (see definition in include/asm/pgtable-2level.h). However a call to
858 * create_mapping() may optimize static mappings by using individual
859 * 1MB section mappings. This leaves the actual PMD potentially half
860 * initialized if the top or bottom section entry isn't used, leaving it
861 * open to problems if a subsequent ioremap() or vmalloc() tries to use
862 * the virtual space left free by that unused section entry.
863 *
864 * Let's avoid the issue by inserting dummy vm entries covering the unused
865 * PMD halves once the static mappings are in place.
866 */
867
868static void __init pmd_empty_section_gap(unsigned long addr)
869{
870 struct vm_struct *vm;
871
872 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
873 vm->addr = (void *)addr;
874 vm->size = SECTION_SIZE;
Russell King79db1f32012-08-22 12:26:47 +0530875 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100876 vm->caller = pmd_empty_section_gap;
877 vm_area_add_early(vm);
878}
879
880static void __init fill_pmd_gaps(void)
881{
882 struct vm_struct *vm;
883 unsigned long addr, next = 0;
884 pmd_t *pmd;
885
886 /* we're still single threaded hence no lock needed here */
887 for (vm = vmlist; vm; vm = vm->next) {
Russell King79db1f32012-08-22 12:26:47 +0530888 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100889 continue;
890 addr = (unsigned long)vm->addr;
891 if (addr < next)
892 continue;
893
894 /*
895 * Check if this vm starts on an odd section boundary.
896 * If so and the first section entry for this PMD is free
897 * then we block the corresponding virtual address.
898 */
899 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
900 pmd = pmd_off_k(addr);
901 if (pmd_none(*pmd))
902 pmd_empty_section_gap(addr & PMD_MASK);
903 }
904
905 /*
906 * Then check if this vm ends on an odd section boundary.
907 * If so and the second section entry for this PMD is empty
908 * then we block the corresponding virtual address.
909 */
910 addr += vm->size;
911 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
912 pmd = pmd_off_k(addr) + 1;
913 if (pmd_none(*pmd))
914 pmd_empty_section_gap(addr);
915 }
916
917 /* no need to look at any vm entry until we hit the next PMD */
918 next = (addr + PMD_SIZE - 1) & PMD_MASK;
919 }
920}
921
922#else
923#define fill_pmd_gaps() do { } while (0)
924#endif
925
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400926static void * __initdata vmalloc_min =
927 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100928
929/*
930 * vmalloc=size forces the vmalloc area to be exactly 'size'
931 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400932 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100933 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100934static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100935{
Russell King79612392010-05-22 16:20:14 +0100936 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100937
938 if (vmalloc_reserve < SZ_16M) {
939 vmalloc_reserve = SZ_16M;
940 printk(KERN_WARNING
941 "vmalloc area too small, limiting to %luMB\n",
942 vmalloc_reserve >> 20);
943 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400944
945 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
946 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
947 printk(KERN_WARNING
948 "vmalloc area is too big, limiting to %luMB\n",
949 vmalloc_reserve >> 20);
950 }
Russell King79612392010-05-22 16:20:14 +0100951
952 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100953 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100954}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100955early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100956
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100957phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +0100958
Russell King0371d3f2011-07-05 19:58:29 +0100959void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200960{
Russell Kingdde58282009-08-15 12:36:00 +0100961 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200962
Larry Bassel31a949b2012-04-11 15:53:21 -0700963#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
964 find_membank0_hole();
965#endif
966
Larry Basself973fab2011-10-14 10:55:11 -0700967#if (defined CONFIG_HIGHMEM) && (defined CONFIG_FIX_MOVABLE_ZONE)
Jack Cheung22cda042011-12-16 15:20:14 -0800968 if (movable_reserved_size && __pa(vmalloc_min) > movable_reserved_start)
969 vmalloc_min = __va(movable_reserved_start);
Larry Basself973fab2011-10-14 10:55:11 -0700970#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400971 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400972 struct membank *bank = &meminfo.bank[j];
973 *bank = meminfo.bank[i];
974
Will Deacon77f73a22011-11-22 17:30:32 +0000975 if (bank->start > ULONG_MAX)
976 highmem = 1;
977
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400978#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +0100979 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +0100980 __va(bank->start) < (void *)PAGE_OFFSET)
981 highmem = 1;
982
983 bank->highmem = highmem;
984
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400985 /*
986 * Split those memory banks which are partially overlapping
987 * the vmalloc area greatly simplifying things later.
988 */
Will Deacon77f73a22011-11-22 17:30:32 +0000989 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +0100990 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400991 if (meminfo.nr_banks >= NR_BANKS) {
992 printk(KERN_CRIT "NR_BANKS too low, "
993 "ignoring high memory\n");
994 } else {
995 memmove(bank + 1, bank,
996 (meminfo.nr_banks - i) * sizeof(*bank));
997 meminfo.nr_banks++;
998 i++;
Russell King79612392010-05-22 16:20:14 +0100999 bank[1].size -= vmalloc_min - __va(bank->start);
1000 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +01001001 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001002 j++;
1003 }
Russell King79612392010-05-22 16:20:14 +01001004 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001005 }
1006#else
Russell King041d7852009-09-27 17:40:42 +01001007 bank->highmem = highmem;
1008
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001009 /*
Will Deacon77f73a22011-11-22 17:30:32 +00001010 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1011 */
1012 if (highmem) {
1013 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1014 "(!CONFIG_HIGHMEM).\n",
1015 (unsigned long long)bank->start,
1016 (unsigned long long)bank->start + bank->size - 1);
1017 continue;
1018 }
1019
1020 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001021 * Check whether this memory bank would entirely overlap
1022 * the vmalloc area.
1023 */
Russell King79612392010-05-22 16:20:14 +01001024 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +01001025 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001026 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001027 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +00001028 (unsigned long long)bank->start,
1029 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001030 continue;
1031 }
1032
1033 /*
1034 * Check whether this memory bank would partially overlap
1035 * the vmalloc area.
1036 */
Russell King79612392010-05-22 16:20:14 +01001037 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001038 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +01001039 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +00001040 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1041 "to -%.8llx (vmalloc region overlap).\n",
1042 (unsigned long long)bank->start,
1043 (unsigned long long)bank->start + bank->size - 1,
1044 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001045 bank->size = newsize;
1046 }
1047#endif
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001048 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1049 arm_lowmem_limit = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001050
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001051 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001052 }
Russell Kinge616c592009-09-27 20:55:43 +01001053#ifdef CONFIG_HIGHMEM
1054 if (highmem) {
1055 const char *reason = NULL;
1056
1057 if (cache_is_vipt_aliasing()) {
1058 /*
1059 * Interactions between kmap and other mappings
1060 * make highmem support with aliasing VIPT caches
1061 * rather difficult.
1062 */
1063 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001064 }
1065 if (reason) {
1066 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1067 reason);
1068 while (j > 0 && meminfo.bank[j - 1].highmem)
1069 j--;
1070 }
1071 }
1072#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001073 meminfo.nr_banks = j;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001074 high_memory = __va(arm_lowmem_limit - 1) + 1;
1075 memblock_set_current_limit(arm_lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001076}
1077
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001078static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001079{
1080 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001081 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001082
1083 /*
1084 * Clear out all the mappings below the kernel image.
1085 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001086 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001087 pmd_clear(pmd_off_k(addr));
1088
1089#ifdef CONFIG_XIP_KERNEL
1090 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001091 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001092#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001093 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001094 pmd_clear(pmd_off_k(addr));
1095
1096 /*
Russell King8df65162010-10-27 19:57:38 +01001097 * Find the end of the first block of lowmem.
1098 */
1099 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001100 if (end >= arm_lowmem_limit)
1101 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001102
1103 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001104 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001105 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001106 */
Russell King8df65162010-10-27 19:57:38 +01001107 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001108 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001109 pmd_clear(pmd_off_k(addr));
1110}
1111
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001112#ifdef CONFIG_ARM_LPAE
1113/* the first page is reserved for pgd */
1114#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1115 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1116#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001117#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001118#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001119
Russell Kingd111e8f2006-09-27 15:27:33 +01001120/*
Russell King2778f622010-07-09 16:27:52 +01001121 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001122 */
Russell King2778f622010-07-09 16:27:52 +01001123void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001124{
Russell Kingd111e8f2006-09-27 15:27:33 +01001125 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001126 * Reserve the page tables. These are already in use,
1127 * and can only be in node 0.
1128 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001129 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001130
Russell Kingd111e8f2006-09-27 15:27:33 +01001131#ifdef CONFIG_SA1111
1132 /*
1133 * Because of the SA1111 DMA bug, we want to preserve our
1134 * precious DMA-able memory...
1135 */
Russell King2778f622010-07-09 16:27:52 +01001136 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001137#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001138}
1139
1140/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001141 * Set up the device mappings. Since we clear out the page tables for all
1142 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001143 * This means you have to be careful how you debug this function, or any
1144 * called function. This means you can't use any function or debugging
1145 * method which may touch any device, otherwise the kernel _will_ crash.
1146 */
1147static void __init devicemaps_init(struct machine_desc *mdesc)
1148{
1149 struct map_desc map;
1150 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001151 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001152
1153 /*
1154 * Allocate the vector page early.
1155 */
Russell King94e5a852012-01-18 15:32:49 +00001156 vectors = early_alloc(PAGE_SIZE);
1157
1158 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001159
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001160 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001161 pmd_clear(pmd_off_k(addr));
1162
1163 /*
1164 * Map the kernel if it is XIP.
1165 * It is always first in the modulearea.
1166 */
1167#ifdef CONFIG_XIP_KERNEL
1168 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001169 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001170 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001171 map.type = MT_ROM;
1172 create_mapping(&map);
1173#endif
1174
1175 /*
1176 * Map the cache flushing regions.
1177 */
1178#ifdef FLUSH_BASE
1179 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1180 map.virtual = FLUSH_BASE;
1181 map.length = SZ_1M;
1182 map.type = MT_CACHECLEAN;
1183 create_mapping(&map);
1184#endif
1185#ifdef FLUSH_BASE_MINICACHE
1186 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1187 map.virtual = FLUSH_BASE_MINICACHE;
1188 map.length = SZ_1M;
1189 map.type = MT_MINICLEAN;
1190 create_mapping(&map);
1191#endif
1192
1193 /*
1194 * Create a mapping for the machine vectors at the high-vectors
1195 * location (0xffff0000). If we aren't using high-vectors, also
1196 * create a mapping at the low-vectors virtual address.
1197 */
Russell King94e5a852012-01-18 15:32:49 +00001198 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001199 map.virtual = 0xffff0000;
1200 map.length = PAGE_SIZE;
1201 map.type = MT_HIGH_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001202 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001203
1204 if (!vectors_high()) {
1205 map.virtual = 0;
1206 map.type = MT_LOW_VECTORS;
Colin Crosse5e483d2011-08-11 17:15:24 -07001207 create_mapping(&map, false);
Russell Kingd111e8f2006-09-27 15:27:33 +01001208 }
1209
1210 /*
1211 * Ask the machine support to map in the statically mapped devices.
1212 */
1213 if (mdesc->map_io)
1214 mdesc->map_io();
Nicolas Pitreb0884a92012-06-27 17:28:57 +01001215 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001216
Greg Reidcf105492012-10-12 12:14:12 -04001217 if (use_user_accessible_timers()) {
1218 /*
1219 * Generate a mapping for the timer page.
1220 */
1221 int page_addr = get_timer_page_address();
1222 if (page_addr != ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE) {
1223 map.pfn = __phys_to_pfn(page_addr);
1224 map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
1225 map.length = PAGE_SIZE;
1226 map.type = MT_DEVICE_USER_ACCESSIBLE;
1227 create_mapping(&map, false);
1228 }
1229 }
1230
Russell Kingd111e8f2006-09-27 15:27:33 +01001231 /*
1232 * Finally flush the caches and tlb to ensure that we're in a
1233 * consistent state wrt the writebuffer. This also ensures that
1234 * any write-allocated cache lines in the vector page are written
1235 * back. After this point, we can start to touch devices again.
1236 */
1237 local_flush_tlb_all();
1238 flush_cache_all();
1239}
1240
Nicolas Pitred73cd422008-09-15 16:44:55 -04001241static void __init kmap_init(void)
1242{
1243#ifdef CONFIG_HIGHMEM
Colin Crosse5e483d2011-08-11 17:15:24 -07001244 pkmap_page_table = early_pte_alloc_and_install(pmd_off_k(PKMAP_BASE),
Russell King4bb2e272010-07-01 18:33:29 +01001245 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001246#endif
1247}
1248
Neil Leederf06ab972011-10-25 17:57:26 -04001249#ifdef CONFIG_STRICT_MEMORY_RWX
1250static struct {
1251 pmd_t *pmd_to_flush;
1252 pmd_t *pmd;
1253 unsigned long addr;
1254 pmd_t saved_pmd;
1255 bool made_writeable;
1256} mem_unprotect;
1257
1258static DEFINE_SPINLOCK(mem_text_writeable_lock);
1259
1260void mem_text_writeable_spinlock(unsigned long *flags)
1261{
1262 spin_lock_irqsave(&mem_text_writeable_lock, *flags);
1263}
1264
1265void mem_text_writeable_spinunlock(unsigned long *flags)
1266{
1267 spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
1268}
1269
1270/*
1271 * mem_text_address_writeable() and mem_text_address_restore()
1272 * should be called as a pair. They are used to make the
1273 * specified address in the kernel text section temporarily writeable
1274 * when it has been marked read-only by STRICT_MEMORY_RWX.
1275 * Used by kprobes and other debugging tools to set breakpoints etc.
1276 * mem_text_address_writeable() is invoked before writing.
1277 * After the write, mem_text_address_restore() must be called
1278 * to restore the original state.
1279 * This is only effective when used on the kernel text section
1280 * marked as MEMORY_RX by map_lowmem()
1281 *
1282 * They must each be called with mem_text_writeable_lock locked
1283 * by the caller, with no unlocking between the calls.
1284 * The caller should release mem_text_writeable_lock immediately
1285 * after the call to mem_text_address_restore().
1286 * Only the write and associated cache operations should be performed
1287 * between the calls.
1288 */
1289
1290/* this function must be called with mem_text_writeable_lock held */
1291void mem_text_address_writeable(unsigned long addr)
1292{
1293 struct task_struct *tsk = current;
1294 struct mm_struct *mm = tsk->active_mm;
1295 pgd_t *pgd = pgd_offset(mm, addr);
1296 pud_t *pud = pud_offset(pgd, addr);
1297
1298 mem_unprotect.made_writeable = 0;
1299
1300 if ((addr < (unsigned long)RX_AREA_START) ||
1301 (addr >= (unsigned long)RX_AREA_END))
1302 return;
1303
1304 mem_unprotect.pmd = pmd_offset(pud, addr);
1305 mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
1306 mem_unprotect.addr = addr & PAGE_MASK;
1307
1308 if (addr & SECTION_SIZE)
1309 mem_unprotect.pmd++;
1310
1311 mem_unprotect.saved_pmd = *mem_unprotect.pmd;
1312 if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
1313 return;
1314
1315 *mem_unprotect.pmd &= ~PMD_SECT_APX;
1316
1317 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1318 flush_tlb_kernel_page(mem_unprotect.addr);
1319 mem_unprotect.made_writeable = 1;
1320}
1321
1322/* this function must be called with mem_text_writeable_lock held */
1323void mem_text_address_restore(void)
1324{
1325 if (mem_unprotect.made_writeable) {
1326 *mem_unprotect.pmd = mem_unprotect.saved_pmd;
1327 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1328 flush_tlb_kernel_page(mem_unprotect.addr);
1329 }
1330}
1331#endif
1332
Neil Leeder32942752011-11-07 10:56:46 -05001333void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
1334{
1335 unsigned long flags;
1336
1337 mem_text_writeable_spinlock(&flags);
1338 mem_text_address_writeable((unsigned long)addr);
1339 *addr = word;
1340 flush_icache_range((unsigned long)addr,
1341 ((unsigned long)addr + sizeof(long)));
1342 mem_text_address_restore();
1343 mem_text_writeable_spinunlock(&flags);
1344}
1345EXPORT_SYMBOL(mem_text_write_kernel_word);
1346
Steve Mucklef132c6c2012-06-06 18:30:57 -07001347extern char __init_data[];
Colin Crosse5e483d2011-08-11 17:15:24 -07001348
Russell Kinga2227122010-03-25 18:56:05 +00001349static void __init map_lowmem(void)
1350{
Russell King8df65162010-10-27 19:57:38 +01001351 struct memblock_region *reg;
Colin Crosse5e483d2011-08-11 17:15:24 -07001352 phys_addr_t start;
1353 phys_addr_t end;
1354 struct map_desc map;
Russell Kinga2227122010-03-25 18:56:05 +00001355
1356 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001357 for_each_memblock(memory, reg) {
Colin Crosse5e483d2011-08-11 17:15:24 -07001358 start = reg->base;
1359 end = start + reg->size;
Russell Kinga2227122010-03-25 18:56:05 +00001360
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001361 if (end > arm_lowmem_limit)
1362 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001363 if (start >= end)
1364 break;
1365
1366 map.pfn = __phys_to_pfn(start);
1367 map.virtual = __phys_to_virt(start);
Jin Hongada9e122011-07-19 12:44:39 -07001368#ifdef CONFIG_STRICT_MEMORY_RWX
1369 if (start <= __pa(_text) && __pa(_text) < end) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001370 map.length = SECTION_SIZE;
Jin Hongada9e122011-07-19 12:44:39 -07001371 map.type = MT_MEMORY;
1372
Steve Mucklef132c6c2012-06-06 18:30:57 -07001373 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001374
Steve Mucklef132c6c2012-06-06 18:30:57 -07001375 map.pfn = __phys_to_pfn(start + SECTION_SIZE);
1376 map.virtual = __phys_to_virt(start + SECTION_SIZE);
1377 map.length = (unsigned long)RX_AREA_END - map.virtual;
Jin Hongada9e122011-07-19 12:44:39 -07001378 map.type = MT_MEMORY_RX;
1379
Steve Mucklef132c6c2012-06-06 18:30:57 -07001380 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001381
1382 map.pfn = __phys_to_pfn(__pa(__start_rodata));
1383 map.virtual = (unsigned long)__start_rodata;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001384 map.length = __init_begin - __start_rodata;
Jin Hongada9e122011-07-19 12:44:39 -07001385 map.type = MT_MEMORY_R;
1386
Steve Mucklef132c6c2012-06-06 18:30:57 -07001387 create_mapping(&map, false);
Jin Hongada9e122011-07-19 12:44:39 -07001388
Steve Mucklef132c6c2012-06-06 18:30:57 -07001389 map.pfn = __phys_to_pfn(__pa(__init_begin));
1390 map.virtual = (unsigned long)__init_begin;
1391 map.length = __init_data - __init_begin;
1392 map.type = MT_MEMORY;
1393
1394 create_mapping(&map, false);
1395
1396 map.pfn = __phys_to_pfn(__pa(__init_data));
1397 map.virtual = (unsigned long)__init_data;
1398 map.length = __phys_to_virt(end) - (unsigned int)__init_data;
Jin Hongada9e122011-07-19 12:44:39 -07001399 map.type = MT_MEMORY_RW;
1400 } else {
1401 map.length = end - start;
1402 map.type = MT_MEMORY_RW;
1403 }
1404#else
Russell King8df65162010-10-27 19:57:38 +01001405 map.length = end - start;
1406 map.type = MT_MEMORY;
Jin Hongada9e122011-07-19 12:44:39 -07001407#endif
Russell King8df65162010-10-27 19:57:38 +01001408
Colin Crosse5e483d2011-08-11 17:15:24 -07001409 create_mapping(&map, false);
Russell Kinga2227122010-03-25 18:56:05 +00001410 }
Colin Crosse5e483d2011-08-11 17:15:24 -07001411
1412#ifdef CONFIG_DEBUG_RODATA
1413 start = __pa(_stext) & PMD_MASK;
1414 end = ALIGN(__pa(__end_rodata), PMD_SIZE);
1415
1416 map.pfn = __phys_to_pfn(start);
1417 map.virtual = __phys_to_virt(start);
1418 map.length = end - start;
1419 map.type = MT_MEMORY;
1420
1421 create_mapping(&map, true);
1422#endif
Russell Kinga2227122010-03-25 18:56:05 +00001423}
1424
Russell Kingd111e8f2006-09-27 15:27:33 +01001425/*
1426 * paging_init() sets up the page tables, initialises the zone memory
1427 * maps, and sets up the zero page, bad page and bad page tables.
1428 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001429void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001430{
1431 void *zero_page;
1432
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001433 memblock_set_current_limit(arm_lowmem_limit);
Russell King0371d3f2011-07-05 19:58:29 +01001434
Russell Kingd111e8f2006-09-27 15:27:33 +01001435 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001436 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001437 map_lowmem();
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001438 dma_contiguous_remap();
Russell Kingd111e8f2006-09-27 15:27:33 +01001439 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001440 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001441
1442 top_pmd = pmd_off_k(0xffff0000);
1443
Russell King3abe9d32010-03-25 17:02:59 +00001444 /* allocate the zero page. */
1445 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001446
Russell King8d717a52010-05-22 19:47:18 +01001447 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001448
Russell Kingd111e8f2006-09-27 15:27:33 +01001449 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001450 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001451}