blob: 669b10e16f7a7872d3df21702c5de9aef58ff146 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King15d07dc2012-03-28 18:30:01 +010020#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010021#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000022#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050023#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010024#include <asm/setup.h>
25#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Neil Leederf06ab972011-10-25 17:57:26 -040031#include <asm/mmu_writeable.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010032
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35
Greg Reidcf105492012-10-12 12:14:12 -040036#include <asm/user_accessible_timer.h>
37
Russell Kingd111e8f2006-09-27 15:27:33 +010038#include "mm.h"
39
Russell Kingd111e8f2006-09-27 15:27:33 +010040/*
41 * empty_zero_page is a special page that is used for
42 * zero-initialized data and COW.
43 */
44struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040045EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010046
47/*
48 * The pmd table for the upper-most set of pages.
49 */
50pmd_t *top_pmd;
51
Russell Kingae8f1542006-09-27 15:38:34 +010052#define CPOLICY_UNCACHED 0
53#define CPOLICY_BUFFERED 1
54#define CPOLICY_WRITETHROUGH 2
55#define CPOLICY_WRITEBACK 3
56#define CPOLICY_WRITEALLOC 4
57
Neil Leederf06ab972011-10-25 17:57:26 -040058#define RX_AREA_START _text
59#define RX_AREA_END __start_rodata
60
Russell Kingae8f1542006-09-27 15:38:34 +010061static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
62static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010063pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010064pgprot_t pgprot_kernel;
65
Imre_Deak44b18692007-02-11 13:45:13 +010066EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010067EXPORT_SYMBOL(pgprot_kernel);
68
69struct cachepolicy {
70 const char policy[16];
71 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010072 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000073 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010074};
75
76static struct cachepolicy cache_policies[] __initdata = {
77 {
78 .policy = "uncached",
79 .cr_mask = CR_W|CR_C,
80 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010081 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010082 }, {
83 .policy = "buffered",
84 .cr_mask = CR_C,
85 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010086 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010087 }, {
88 .policy = "writethrough",
89 .cr_mask = 0,
90 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010091 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010092 }, {
93 .policy = "writeback",
94 .cr_mask = 0,
95 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010096 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010097 }, {
98 .policy = "writealloc",
99 .cr_mask = 0,
100 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +0100101 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +0100102 }
103};
104
105/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100106 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100107 * problems by allowing the cache or the cache and
108 * writebuffer to be turned off. (Note: the write
109 * buffer should not be on and the cache off).
110 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100111static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100112{
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
116 int len = strlen(cache_policies[i].policy);
117
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100118 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100119 cachepolicy = i;
120 cr_alignment &= ~cache_policies[i].cr_mask;
121 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100122 break;
123 }
124 }
125 if (i == ARRAY_SIZE(cache_policies))
126 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000127 /*
128 * This restriction is partly to do with the way we boot; it is
129 * unpredictable to have memory mapped using two different sets of
130 * memory attributes (shared, type, and cache attribs). We can not
131 * change these attributes once the initial assembly has setup the
132 * page tables.
133 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100134 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
135 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
136 cachepolicy = CPOLICY_WRITEBACK;
137 }
Russell Kingae8f1542006-09-27 15:38:34 +0100138 flush_cache_all();
139 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100141}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100143
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100145{
146 char *p = "buffered";
147 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100148 early_cachepolicy(p);
149 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100150}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100152
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100154{
155 char *p = "uncached";
156 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 early_cachepolicy(p);
158 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100159}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100160early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100161
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000162#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100164{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100166 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100167 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100168 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100169 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100170}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100171early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000172#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100173
174static int __init noalign_setup(char *__unused)
175{
176 cr_alignment &= ~CR_A;
177 cr_no_alignment &= ~CR_A;
178 set_cr(cr_alignment);
179 return 1;
180}
181__setup("noalign", noalign_setup);
182
Russell King255d1f82006-12-18 00:12:47 +0000183#ifndef CONFIG_SMP
184void adjust_cr(unsigned long mask, unsigned long set)
185{
186 unsigned long flags;
187
188 mask &= ~CR_A;
189
190 set &= mask;
191
192 local_irq_save(flags);
193
194 cr_no_alignment = (cr_no_alignment & ~mask) | set;
195 cr_alignment = (cr_alignment & ~mask) | set;
196
197 set_cr((get_cr() & ~mask) | set);
198
199 local_irq_restore(flags);
200}
201#endif
202
Russell King36bb94b2010-11-16 08:40:36 +0000203#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000204#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100205
Russell Kingb29e9f52007-04-21 10:47:29 +0100206static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100207 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100208 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
209 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100210 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000211 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100212 .domain = DOMAIN_IO,
213 },
214 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100215 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100216 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000217 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100218 .domain = DOMAIN_IO,
219 },
220 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100222 .prot_l1 = PMD_TYPE_TABLE,
223 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
224 .domain = DOMAIN_IO,
225 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100226 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100227 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100228 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000229 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100230 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100231 },
Russell Kingebb4c652008-11-09 11:18:36 +0000232 [MT_UNCACHED] = {
233 .prot_pte = PROT_PTE_DEVICE,
234 .prot_l1 = PMD_TYPE_TABLE,
235 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
236 .domain = DOMAIN_IO,
237 },
Russell Kingae8f1542006-09-27 15:38:34 +0100238 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100240 .domain = DOMAIN_KERNEL,
241 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000242#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100243 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100244 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100245 .domain = DOMAIN_KERNEL,
246 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000247#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100248 [MT_LOW_VECTORS] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000250 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .prot_l1 = PMD_TYPE_TABLE,
252 .domain = DOMAIN_USER,
253 },
254 [MT_HIGH_VECTORS] = {
255 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000256 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100257 .prot_l1 = PMD_TYPE_TABLE,
258 .domain = DOMAIN_USER,
259 },
260 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100262 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100264 .domain = DOMAIN_KERNEL,
265 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 [MT_MEMORY_R] = {
267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
268 .domain = DOMAIN_KERNEL,
269 },
270 [MT_MEMORY_RW] = {
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL,
273 },
274 [MT_MEMORY_RX] = {
275 .prot_sect = PMD_TYPE_SECT,
276 .domain = DOMAIN_KERNEL,
277 },
Russell Kingae8f1542006-09-27 15:38:34 +0100278 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100279 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100280 .domain = DOMAIN_KERNEL,
281 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100282 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100283 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000284 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100285 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
287 .domain = DOMAIN_KERNEL,
288 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100289 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000291 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100292 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
294 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100295 },
296 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100298 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100299 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100300 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700301 [MT_MEMORY_SO] = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
303 L_PTE_MT_UNCACHED,
304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
306 PMD_SECT_UNCACHED | PMD_SECT_XN,
307 .domain = DOMAIN_KERNEL,
308 },
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100309 [MT_MEMORY_DMA_READY] = {
310 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
311 .prot_l1 = PMD_TYPE_TABLE,
312 .domain = DOMAIN_KERNEL,
313 },
Greg Reidcf105492012-10-12 12:14:12 -0400314 [MT_DEVICE_USER_ACCESSIBLE] = {
315 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
316 L_PTE_SHARED | L_PTE_USER | L_PTE_RDONLY,
317 .prot_l1 = PMD_TYPE_TABLE,
318 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
319 .domain = DOMAIN_IO,
320 },
Russell Kingae8f1542006-09-27 15:38:34 +0100321};
322
Russell Kingb29e9f52007-04-21 10:47:29 +0100323const struct mem_type *get_mem_type(unsigned int type)
324{
325 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
326}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200327EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100328
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700329#define PTE_SET_FN(_name, pteop) \
330static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
331 void *data) \
332{ \
333 pte_t pte = pteop(*ptep); \
334\
335 set_pte_ext(ptep, pte, 0); \
336 return 0; \
337} \
338
339#define SET_MEMORY_FN(_name, callback) \
340int set_memory_##_name(unsigned long addr, int numpages) \
341{ \
342 unsigned long start = addr; \
343 unsigned long size = PAGE_SIZE*numpages; \
344 unsigned end = start + size; \
345\
346 if (start < MODULES_VADDR || start >= MODULES_END) \
347 return -EINVAL;\
348\
349 if (end < MODULES_VADDR || end >= MODULES_END) \
350 return -EINVAL; \
351\
352 apply_to_page_range(&init_mm, start, size, callback, NULL); \
353 flush_tlb_kernel_range(start, end); \
354 return 0;\
355}
356
357PTE_SET_FN(ro, pte_wrprotect)
358PTE_SET_FN(rw, pte_mkwrite)
359PTE_SET_FN(x, pte_mkexec)
360PTE_SET_FN(nx, pte_mknexec)
361
362SET_MEMORY_FN(ro, pte_set_ro)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700363EXPORT_SYMBOL(set_memory_ro);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700364SET_MEMORY_FN(rw, pte_set_rw)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700365EXPORT_SYMBOL(set_memory_rw);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700366SET_MEMORY_FN(x, pte_set_x)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700367EXPORT_SYMBOL(set_memory_x);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700368SET_MEMORY_FN(nx, pte_set_nx)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700369EXPORT_SYMBOL(set_memory_nx);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700370
Russell Kingae8f1542006-09-27 15:38:34 +0100371/*
372 * Adjust the PMD section entries according to the CPU in use.
373 */
374static void __init build_mem_type_table(void)
375{
376 struct cachepolicy *cp;
377 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100378 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100379 int cpu_arch = cpu_architecture();
380 int i;
381
Catalin Marinas11179d82007-07-20 11:42:24 +0100382 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100383#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100384 if (cachepolicy > CPOLICY_BUFFERED)
385 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100386#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100387 if (cachepolicy > CPOLICY_WRITETHROUGH)
388 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100389#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100390 }
Russell Kingae8f1542006-09-27 15:38:34 +0100391 if (cpu_arch < CPU_ARCH_ARMv5) {
392 if (cachepolicy >= CPOLICY_WRITEALLOC)
393 cachepolicy = CPOLICY_WRITEBACK;
394 ecc_mask = 0;
395 }
Russell Kingf00ec482010-09-04 10:47:48 +0100396 if (is_smp())
397 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100398
399 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000400 * Strip out features not present on earlier architectures.
401 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
402 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100403 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000404 if (cpu_arch < CPU_ARCH_ARMv5)
405 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
406 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
407 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
408 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
409 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100410
411 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000412 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
413 * "update-able on write" bit on ARM610). However, Xscale and
414 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100415 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000416 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100417 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100418 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100419 mem_types[i].prot_l1 &= ~PMD_BIT4;
420 }
421 } else if (cpu_arch < CPU_ARCH_ARMv6) {
422 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100423 if (mem_types[i].prot_l1)
424 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100425 if (mem_types[i].prot_sect)
426 mem_types[i].prot_sect |= PMD_BIT4;
427 }
428 }
Russell Kingae8f1542006-09-27 15:38:34 +0100429
Russell Kingb1cce6b2008-11-04 10:52:28 +0000430 /*
431 * Mark the device areas according to the CPU/architecture.
432 */
433 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
434 if (!cpu_is_xsc3()) {
435 /*
436 * Mark device regions on ARMv6+ as execute-never
437 * to prevent speculative instruction fetches.
438 */
439 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
440 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
441 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
442 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
443 }
444 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
445 /*
446 * For ARMv7 with TEX remapping,
447 * - shared device is SXCB=1100
448 * - nonshared device is SXCB=0100
449 * - write combine device mem is SXCB=0001
450 * (Uncached Normal memory)
451 */
452 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
453 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
454 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
455 } else if (cpu_is_xsc3()) {
456 /*
457 * For Xscale3,
458 * - shared device is TEXCB=00101
459 * - nonshared device is TEXCB=01000
460 * - write combine device mem is TEXCB=00100
461 * (Inner/Outer Uncacheable in xsc3 parlance)
462 */
463 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
464 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
465 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
466 } else {
467 /*
468 * For ARMv6 and ARMv7 without TEX remapping,
469 * - shared device is TEXCB=00001
470 * - nonshared device is TEXCB=01000
471 * - write combine device mem is TEXCB=00100
472 * (Uncached Normal in ARMv6 parlance).
473 */
474 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
475 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
476 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
477 }
478 } else {
479 /*
480 * On others, write combining is "Uncached/Buffered"
481 */
482 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
483 }
484
485 /*
486 * Now deal with the memory-type mappings
487 */
Russell Kingae8f1542006-09-27 15:38:34 +0100488 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100489 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
490
Russell Kingbb30f362008-09-06 20:04:59 +0100491 /*
492 * Only use write-through for non-SMP systems
493 */
Russell Kingf00ec482010-09-04 10:47:48 +0100494 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100495 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100496
497 /*
498 * Enable CPU-specific coherency if supported.
499 * (Only available on XSC3 at the moment.)
500 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100501 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000502 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100503 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100504 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100505 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
506 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
507 }
Russell Kingae8f1542006-09-27 15:38:34 +0100508 /*
509 * ARMv6 and above have extended page tables.
510 */
511 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000512#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100513 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100514 * Mark cache clean areas and XIP ROM read only
515 * from SVC mode and no access from userspace.
516 */
517 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700518 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
519 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Russell Kingae8f1542006-09-27 15:38:34 +0100520 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
521 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000522#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100523
Russell Kingf00ec482010-09-04 10:47:48 +0100524 if (is_smp()) {
525 /*
526 * Mark memory with the "shared" attribute
527 * for SMP systems
528 */
529 user_pgprot |= L_PTE_SHARED;
530 kern_pgprot |= L_PTE_SHARED;
531 vecs_pgprot |= L_PTE_SHARED;
532 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
533 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
534 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
535 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
536 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
537 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100538 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100539 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700540 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
541 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
542 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
Russell Kingf00ec482010-09-04 10:47:48 +0100543 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
544 }
Russell Kingae8f1542006-09-27 15:38:34 +0100545 }
546
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100547 /*
548 * Non-cacheable Normal - intended for memory areas that must
549 * not cause dirty cache line writebacks when used
550 */
551 if (cpu_arch >= CPU_ARCH_ARMv6) {
552 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
553 /* Non-cacheable Normal is XCB = 001 */
554 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
555 PMD_SECT_BUFFERED;
556 } else {
557 /* For both ARMv6 and non-TEX-remapping ARMv7 */
558 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
559 PMD_SECT_TEX(1);
560 }
561 } else {
562 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
563 }
564
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000565#ifdef CONFIG_ARM_LPAE
566 /*
567 * Do not generate access flag faults for the kernel mappings.
568 */
569 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
570 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100571 if (mem_types[i].prot_sect)
572 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000573 }
574 kern_pgprot |= PTE_EXT_AF;
575 vecs_pgprot |= PTE_EXT_AF;
576#endif
577
Russell Kingae8f1542006-09-27 15:38:34 +0100578 for (i = 0; i < 16; i++) {
579 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100580 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100581 }
582
Russell Kingbb30f362008-09-06 20:04:59 +0100583 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
584 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100585
Imre_Deak44b18692007-02-11 13:45:13 +0100586 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100587 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000588 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100589
590 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
591 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
592 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100593 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100594 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100595 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700596 mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
597 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
598 mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100599 mem_types[MT_ROM].prot_sect |= cp->pmd;
600
601 switch (cp->pmd) {
602 case PMD_SECT_WT:
603 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
604 break;
605 case PMD_SECT_WB:
606 case PMD_SECT_WBWA:
607 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
608 break;
609 }
610 printk("Memory policy: ECC %sabled, Data cache %s\n",
611 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100612
613 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
614 struct mem_type *t = &mem_types[i];
615 if (t->prot_l1)
616 t->prot_l1 |= PMD_DOMAIN(t->domain);
617 if (t->prot_sect)
618 t->prot_sect |= PMD_DOMAIN(t->domain);
619 }
Russell Kingae8f1542006-09-27 15:38:34 +0100620}
621
Catalin Marinasd9073872010-09-13 16:01:24 +0100622#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
623pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
624 unsigned long size, pgprot_t vma_prot)
625{
626 if (!pfn_valid(pfn))
627 return pgprot_noncached(vma_prot);
628 else if (file->f_flags & O_SYNC)
629 return pgprot_writecombine(vma_prot);
630 return vma_prot;
631}
632EXPORT_SYMBOL(phys_mem_access_prot);
633#endif
634
Russell Kingae8f1542006-09-27 15:38:34 +0100635#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
636
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400637static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000638{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400639 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100640 memset(ptr, 0, sz);
641 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000642}
643
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400644static void __init *early_alloc(unsigned long sz)
645{
646 return early_alloc_aligned(sz, sz);
647}
648
Laura Abbotta367aec2013-02-27 15:05:34 -0800649static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
Russell King4bb2e272010-07-01 18:33:29 +0100650{
651 if (pmd_none(*pmd)) {
Laura Abbotta367aec2013-02-27 15:05:34 -0800652 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
653 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100654 }
655 BUG_ON(pmd_bad(*pmd));
656 return pte_offset_kernel(pmd, addr);
657}
658
Russell King24e6c692007-04-21 10:21:28 +0100659static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
660 unsigned long end, unsigned long pfn,
661 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100662{
Laura Abbotta367aec2013-02-27 15:05:34 -0800663 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100664 do {
Russell King40d192b2008-09-06 21:15:56 +0100665 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100666 pfn++;
667 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100668}
669
R Sricharanba3e1872013-03-17 10:35:41 +0530670static void __init map_init_section(pmd_t *pmd, unsigned long addr,
671 unsigned long end, phys_addr_t phys,
672 const struct mem_type *type)
673{
674#ifndef CONFIG_ARM_LPAE
675 /*
676 * In classic MMU format, puds and pmds are folded in to
677 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
678 * group of L1 entries making up one logical pointer to
679 * an L2 table (2MB), where as PMDs refer to the individual
680 * L1 entries (1MB). Hence increment to get the correct
681 * offset for odd 1MB sections.
682 * (See arch/arm/include/asm/pgtable-2level.h)
683 */
684 if (addr & SECTION_SIZE)
685 pmd++;
686#endif
687 do {
688 *pmd = __pmd(phys | type->prot_sect);
689 phys += SECTION_SIZE;
690 } while (pmd++, addr += SECTION_SIZE, addr != end);
691
692 flush_pmd_entry(pmd);
693}
694
695static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000696 unsigned long end, phys_addr_t phys,
Laura Abbotta367aec2013-02-27 15:05:34 -0800697 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100698{
Russell King516295e2010-11-21 16:27:49 +0000699 pmd_t *pmd = pmd_offset(pud, addr);
R Sricharanba3e1872013-03-17 10:35:41 +0530700 unsigned long next;
Russell Kingae8f1542006-09-27 15:38:34 +0100701
R Sricharanba3e1872013-03-17 10:35:41 +0530702 do {
Russell King24e6c692007-04-21 10:21:28 +0100703 /*
R Sricharanba3e1872013-03-17 10:35:41 +0530704 * With LPAE, we must loop over to map
705 * all the pmds for the given range.
Russell King24e6c692007-04-21 10:21:28 +0100706 */
R Sricharanba3e1872013-03-17 10:35:41 +0530707 next = pmd_addr_end(addr, end);
708
709 /*
710 * Try a section mapping - addr, next and phys must all be
711 * aligned to a section boundary.
712 */
713 if (type->prot_sect &&
714 ((addr | next | phys) & ~SECTION_MASK) == 0) {
715 map_init_section(pmd, addr, next, phys, type);
716 } else {
717 alloc_init_pte(pmd, addr, next,
718 __phys_to_pfn(phys), type);
719 }
720
721 phys += next - addr;
722
723 } while (pmd++, addr = next, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100724}
725
Stephen Boyd14904922012-04-27 01:40:10 +0100726static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Laura Abbotta367aec2013-02-27 15:05:34 -0800727 unsigned long end, unsigned long phys, const struct mem_type *type)
Russell King516295e2010-11-21 16:27:49 +0000728{
729 pud_t *pud = pud_offset(pgd, addr);
730 unsigned long next;
731
732 do {
733 next = pud_addr_end(addr, end);
R Sricharanba3e1872013-03-17 10:35:41 +0530734 alloc_init_pmd(pud, addr, next, phys, type);
Russell King516295e2010-11-21 16:27:49 +0000735 phys += next - addr;
736 } while (pud++, addr = next, addr != end);
737}
738
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000739#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100740static void __init create_36bit_mapping(struct map_desc *md,
741 const struct mem_type *type)
742{
Russell King97092e02010-11-16 00:16:01 +0000743 unsigned long addr, length, end;
744 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100745 pgd_t *pgd;
746
747 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100748 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100749 length = PAGE_ALIGN(md->length);
750
751 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
752 printk(KERN_ERR "MM: CPU does not support supersection "
753 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100754 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100755 return;
756 }
757
758 /* N.B. ARMv6 supersections are only defined to work with domain 0.
759 * Since domain assignments can in fact be arbitrary, the
760 * 'domain == 0' check below is required to insure that ARMv6
761 * supersections are only allocated for domain 0 regardless
762 * of the actual domain assignments in use.
763 */
764 if (type->domain) {
765 printk(KERN_ERR "MM: invalid domain in supersection "
766 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100767 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100768 return;
769 }
770
771 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100772 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
773 " at 0x%08lx invalid alignment\n",
774 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100775 return;
776 }
777
778 /*
779 * Shift bits [35:32] of address into bits [23:20] of PMD
780 * (See ARMv6 spec).
781 */
782 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
783
784 pgd = pgd_offset_k(addr);
785 end = addr + length;
786 do {
Russell King516295e2010-11-21 16:27:49 +0000787 pud_t *pud = pud_offset(pgd, addr);
788 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100789 int i;
790
791 for (i = 0; i < 16; i++)
792 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
793
794 addr += SUPERSECTION_SIZE;
795 phys += SUPERSECTION_SIZE;
796 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
797 } while (addr != end);
798}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000799#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100800
Russell Kingae8f1542006-09-27 15:38:34 +0100801/*
802 * Create the page directory entries and any necessary
803 * page tables for the mapping specified by `md'. We
804 * are able to cope here with varying sizes and address
805 * offsets, and we take full advantage of sections and
806 * supersections.
807 */
Laura Abbotta367aec2013-02-27 15:05:34 -0800808static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100809{
Will Deaconcae62922011-02-15 12:42:57 +0100810 unsigned long addr, length, end;
811 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100812 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100813 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100814
Greg Reidcf105492012-10-12 12:14:12 -0400815 if ((md->virtual != vectors_base() &&
816 md->virtual != get_user_accessible_timers_base()) &&
817 md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100818 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
819 " at 0x%08lx in user region\n",
820 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100821 return;
822 }
823
824 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400825 md->virtual >= PAGE_OFFSET &&
826 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100827 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400828 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100829 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100830 }
831
Russell Kingd5c98172007-04-21 10:05:32 +0100832 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100833
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000834#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100835 /*
836 * Catch 36-bit addresses
837 */
Russell King4a56c1e2007-04-21 10:16:48 +0100838 if (md->pfn >= 0x100000) {
839 create_36bit_mapping(md, type);
840 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100841 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000842#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100843
Russell King7b9c7b42007-07-04 21:16:33 +0100844 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100845 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100846 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100847
Russell King24e6c692007-04-21 10:21:28 +0100848 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100849 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100850 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100851 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100852 return;
853 }
854
Russell King24e6c692007-04-21 10:21:28 +0100855 pgd = pgd_offset_k(addr);
856 end = addr + length;
857 do {
858 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100859
Laura Abbotta367aec2013-02-27 15:05:34 -0800860 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100861
Russell King24e6c692007-04-21 10:21:28 +0100862 phys += next - addr;
863 addr = next;
864 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100865}
866
867/*
868 * Create the architecture specific mappings
869 */
870void __init iotable_init(struct map_desc *io_desc, int nr)
871{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400872 struct map_desc *md;
873 struct vm_struct *vm;
Neeti Desaic278c942013-06-10 17:14:21 -0700874 int rc = 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100875
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400876 if (!nr)
877 return;
878
879 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
880
881 for (md = io_desc; nr; md++, nr--) {
Laura Abbotta367aec2013-02-27 15:05:34 -0800882 create_mapping(md);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400883 vm->addr = (void *)(md->virtual & PAGE_MASK);
884 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Neeti Desaic278c942013-06-10 17:14:21 -0700885 vm->phys_addr = __pfn_to_phys(md->pfn);
886 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400887 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400888 vm->caller = iotable_init;
Neeti Desaic278c942013-06-10 17:14:21 -0700889 rc = vm_area_check_early(vm);
890 if (!rc)
891 vm_area_add_early(vm++);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400892 }
Russell Kingae8f1542006-09-27 15:38:34 +0100893}
894
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100895#ifndef CONFIG_ARM_LPAE
896
897/*
898 * The Linux PMD is made of two consecutive section entries covering 2MB
899 * (see definition in include/asm/pgtable-2level.h). However a call to
900 * create_mapping() may optimize static mappings by using individual
901 * 1MB section mappings. This leaves the actual PMD potentially half
902 * initialized if the top or bottom section entry isn't used, leaving it
903 * open to problems if a subsequent ioremap() or vmalloc() tries to use
904 * the virtual space left free by that unused section entry.
905 *
906 * Let's avoid the issue by inserting dummy vm entries covering the unused
907 * PMD halves once the static mappings are in place.
908 */
909
910static void __init pmd_empty_section_gap(unsigned long addr)
911{
912 struct vm_struct *vm;
913
914 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
915 vm->addr = (void *)addr;
916 vm->size = SECTION_SIZE;
Russell King79db1f32012-08-22 12:26:47 +0530917 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100918 vm->caller = pmd_empty_section_gap;
919 vm_area_add_early(vm);
920}
921
922static void __init fill_pmd_gaps(void)
923{
924 struct vm_struct *vm;
925 unsigned long addr, next = 0;
926 pmd_t *pmd;
927
928 /* we're still single threaded hence no lock needed here */
929 for (vm = vmlist; vm; vm = vm->next) {
Russell King79db1f32012-08-22 12:26:47 +0530930 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100931 continue;
932 addr = (unsigned long)vm->addr;
933 if (addr < next)
934 continue;
935
936 /*
937 * Check if this vm starts on an odd section boundary.
938 * If so and the first section entry for this PMD is free
939 * then we block the corresponding virtual address.
940 */
941 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
942 pmd = pmd_off_k(addr);
943 if (pmd_none(*pmd))
944 pmd_empty_section_gap(addr & PMD_MASK);
945 }
946
947 /*
948 * Then check if this vm ends on an odd section boundary.
949 * If so and the second section entry for this PMD is empty
950 * then we block the corresponding virtual address.
951 */
952 addr += vm->size;
953 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
954 pmd = pmd_off_k(addr) + 1;
955 if (pmd_none(*pmd))
956 pmd_empty_section_gap(addr);
957 }
958
959 /* no need to look at any vm entry until we hit the next PMD */
960 next = (addr + PMD_SIZE - 1) & PMD_MASK;
961 }
962}
963
964#else
965#define fill_pmd_gaps() do { } while (0)
966#endif
967
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400968static void * __initdata vmalloc_min =
969 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100970
971/*
972 * vmalloc=size forces the vmalloc area to be exactly 'size'
973 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400974 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100975 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100976static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100977{
Russell King79612392010-05-22 16:20:14 +0100978 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100979
980 if (vmalloc_reserve < SZ_16M) {
981 vmalloc_reserve = SZ_16M;
982 printk(KERN_WARNING
983 "vmalloc area too small, limiting to %luMB\n",
984 vmalloc_reserve >> 20);
985 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400986
987 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
988 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
989 printk(KERN_WARNING
990 "vmalloc area is too big, limiting to %luMB\n",
991 vmalloc_reserve >> 20);
992 }
Russell King79612392010-05-22 16:20:14 +0100993
994 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100995 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100996}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100997early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +0100998
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100999phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +01001000
Russell King0371d3f2011-07-05 19:58:29 +01001001void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001002{
Russell Kingdde58282009-08-15 12:36:00 +01001003 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001004
Neeti Desaic76fd312013-07-18 15:29:20 -07001005#ifdef CONFIG_ENABLE_VMALLOC_SAVING
1006 unsigned long hole_start;
1007 for (i = 0; i < (meminfo.nr_banks - 1); i++) {
1008 hole_start = meminfo.bank[i].start + meminfo.bank[i].size;
1009 if (hole_start != meminfo.bank[i+1].start) {
1010 if (hole_start <= MAX_HOLE_ADDRESS) {
1011 vmalloc_min = (void *) (vmalloc_min +
1012 (meminfo.bank[i+1].start - hole_start));
1013 }
1014 }
1015 }
1016#endif
Larry Bassel31a949b2012-04-11 15:53:21 -07001017#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
Neeti Desai1b2cb552012-11-01 21:57:36 -07001018 find_memory_hole();
Larry Bassel31a949b2012-04-11 15:53:21 -07001019#endif
1020
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001021 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001022 struct membank *bank = &meminfo.bank[j];
1023 *bank = meminfo.bank[i];
1024
Will Deacon77f73a22011-11-22 17:30:32 +00001025 if (bank->start > ULONG_MAX)
1026 highmem = 1;
1027
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001028#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +01001029 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +01001030 __va(bank->start) < (void *)PAGE_OFFSET)
1031 highmem = 1;
1032
1033 bank->highmem = highmem;
1034
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001035 /*
1036 * Split those memory banks which are partially overlapping
1037 * the vmalloc area greatly simplifying things later.
1038 */
Will Deacon77f73a22011-11-22 17:30:32 +00001039 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +01001040 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001041 if (meminfo.nr_banks >= NR_BANKS) {
1042 printk(KERN_CRIT "NR_BANKS too low, "
1043 "ignoring high memory\n");
1044 } else {
1045 memmove(bank + 1, bank,
1046 (meminfo.nr_banks - i) * sizeof(*bank));
1047 meminfo.nr_banks++;
1048 i++;
Russell King79612392010-05-22 16:20:14 +01001049 bank[1].size -= vmalloc_min - __va(bank->start);
1050 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +01001051 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001052 j++;
1053 }
Russell King79612392010-05-22 16:20:14 +01001054 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001055 }
1056#else
Russell King041d7852009-09-27 17:40:42 +01001057 bank->highmem = highmem;
1058
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001059 /*
Will Deacon77f73a22011-11-22 17:30:32 +00001060 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1061 */
1062 if (highmem) {
1063 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1064 "(!CONFIG_HIGHMEM).\n",
1065 (unsigned long long)bank->start,
1066 (unsigned long long)bank->start + bank->size - 1);
1067 continue;
1068 }
1069
1070 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001071 * Check whether this memory bank would entirely overlap
1072 * the vmalloc area.
1073 */
Russell King79612392010-05-22 16:20:14 +01001074 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +01001075 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001076 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001077 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +00001078 (unsigned long long)bank->start,
1079 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001080 continue;
1081 }
1082
1083 /*
1084 * Check whether this memory bank would partially overlap
1085 * the vmalloc area.
1086 */
Russell King79612392010-05-22 16:20:14 +01001087 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001088 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +01001089 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +00001090 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1091 "to -%.8llx (vmalloc region overlap).\n",
1092 (unsigned long long)bank->start,
1093 (unsigned long long)bank->start + bank->size - 1,
1094 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001095 bank->size = newsize;
1096 }
1097#endif
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001098 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1099 arm_lowmem_limit = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001100
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001101 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001102 }
Russell Kinge616c592009-09-27 20:55:43 +01001103#ifdef CONFIG_HIGHMEM
1104 if (highmem) {
1105 const char *reason = NULL;
1106
1107 if (cache_is_vipt_aliasing()) {
1108 /*
1109 * Interactions between kmap and other mappings
1110 * make highmem support with aliasing VIPT caches
1111 * rather difficult.
1112 */
1113 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001114 }
1115 if (reason) {
1116 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1117 reason);
1118 while (j > 0 && meminfo.bank[j - 1].highmem)
1119 j--;
1120 }
1121 }
1122#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001123 meminfo.nr_banks = j;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001124 high_memory = __va(arm_lowmem_limit - 1) + 1;
1125 memblock_set_current_limit(arm_lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001126}
1127
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001128static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001129{
1130 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001131 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001132
1133 /*
1134 * Clear out all the mappings below the kernel image.
1135 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001136 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001137 pmd_clear(pmd_off_k(addr));
1138
1139#ifdef CONFIG_XIP_KERNEL
1140 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001141 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001142#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001143 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001144 pmd_clear(pmd_off_k(addr));
1145
1146 /*
Russell King8df65162010-10-27 19:57:38 +01001147 * Find the end of the first block of lowmem.
1148 */
1149 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001150 if (end >= arm_lowmem_limit)
1151 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001152
1153 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001154 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001155 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001156 */
Russell King8df65162010-10-27 19:57:38 +01001157 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001158 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001159 pmd_clear(pmd_off_k(addr));
1160}
1161
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001162#ifdef CONFIG_ARM_LPAE
1163/* the first page is reserved for pgd */
1164#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1165 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1166#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001167#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001168#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001169
Russell Kingd111e8f2006-09-27 15:27:33 +01001170/*
Russell King2778f622010-07-09 16:27:52 +01001171 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001172 */
Russell King2778f622010-07-09 16:27:52 +01001173void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001174{
Russell Kingd111e8f2006-09-27 15:27:33 +01001175 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001176 * Reserve the page tables. These are already in use,
1177 * and can only be in node 0.
1178 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001179 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001180
Russell Kingd111e8f2006-09-27 15:27:33 +01001181#ifdef CONFIG_SA1111
1182 /*
1183 * Because of the SA1111 DMA bug, we want to preserve our
1184 * precious DMA-able memory...
1185 */
Russell King2778f622010-07-09 16:27:52 +01001186 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001187#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001188}
1189
1190/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001191 * Set up the device mappings. Since we clear out the page tables for all
1192 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001193 * This means you have to be careful how you debug this function, or any
1194 * called function. This means you can't use any function or debugging
1195 * method which may touch any device, otherwise the kernel _will_ crash.
1196 */
1197static void __init devicemaps_init(struct machine_desc *mdesc)
1198{
1199 struct map_desc map;
1200 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001201 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001202
1203 /*
1204 * Allocate the vector page early.
1205 */
Russell Kinge76dd7c2013-07-04 11:40:32 +01001206 vectors = early_alloc(PAGE_SIZE * 2);
Russell King94e5a852012-01-18 15:32:49 +00001207
1208 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001209
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001210 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001211 pmd_clear(pmd_off_k(addr));
1212
1213 /*
1214 * Map the kernel if it is XIP.
1215 * It is always first in the modulearea.
1216 */
1217#ifdef CONFIG_XIP_KERNEL
1218 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001219 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001220 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001221 map.type = MT_ROM;
1222 create_mapping(&map);
1223#endif
1224
1225 /*
1226 * Map the cache flushing regions.
1227 */
1228#ifdef FLUSH_BASE
1229 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1230 map.virtual = FLUSH_BASE;
1231 map.length = SZ_1M;
1232 map.type = MT_CACHECLEAN;
1233 create_mapping(&map);
1234#endif
1235#ifdef FLUSH_BASE_MINICACHE
1236 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1237 map.virtual = FLUSH_BASE_MINICACHE;
1238 map.length = SZ_1M;
1239 map.type = MT_MINICLEAN;
1240 create_mapping(&map);
1241#endif
1242
1243 /*
1244 * Create a mapping for the machine vectors at the high-vectors
1245 * location (0xffff0000). If we aren't using high-vectors, also
1246 * create a mapping at the low-vectors virtual address.
1247 */
Russell King94e5a852012-01-18 15:32:49 +00001248 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001249 map.virtual = 0xffff0000;
1250 map.length = PAGE_SIZE;
Russell Kingfdee3722013-07-31 21:58:56 +01001251#ifdef CONFIG_KUSER_HELPERS
Russell Kingd111e8f2006-09-27 15:27:33 +01001252 map.type = MT_HIGH_VECTORS;
Russell Kingfdee3722013-07-31 21:58:56 +01001253#else
1254 map.type = MT_LOW_VECTORS;
1255#endif
Laura Abbotta367aec2013-02-27 15:05:34 -08001256 create_mapping(&map);
Russell Kingd111e8f2006-09-27 15:27:33 +01001257
1258 if (!vectors_high()) {
1259 map.virtual = 0;
Russell Kinge76dd7c2013-07-04 11:40:32 +01001260 map.length = PAGE_SIZE * 2;
Russell Kingd111e8f2006-09-27 15:27:33 +01001261 map.type = MT_LOW_VECTORS;
Laura Abbotta367aec2013-02-27 15:05:34 -08001262 create_mapping(&map);
Russell Kingd111e8f2006-09-27 15:27:33 +01001263 }
1264
Russell Kinge76dd7c2013-07-04 11:40:32 +01001265 /* Now create a kernel read-only mapping */
1266 map.pfn += 1;
1267 map.virtual = 0xffff0000 + PAGE_SIZE;
1268 map.length = PAGE_SIZE;
1269 map.type = MT_LOW_VECTORS;
1270 create_mapping(&map);
1271
Russell Kingd111e8f2006-09-27 15:27:33 +01001272 /*
1273 * Ask the machine support to map in the statically mapped devices.
1274 */
1275 if (mdesc->map_io)
1276 mdesc->map_io();
Nicolas Pitreb0884a92012-06-27 17:28:57 +01001277 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001278
Greg Reidcf105492012-10-12 12:14:12 -04001279 if (use_user_accessible_timers()) {
1280 /*
1281 * Generate a mapping for the timer page.
1282 */
1283 int page_addr = get_timer_page_address();
1284 if (page_addr != ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE) {
1285 map.pfn = __phys_to_pfn(page_addr);
1286 map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
1287 map.length = PAGE_SIZE;
1288 map.type = MT_DEVICE_USER_ACCESSIBLE;
Laura Abbotta367aec2013-02-27 15:05:34 -08001289 create_mapping(&map);
Greg Reidcf105492012-10-12 12:14:12 -04001290 }
1291 }
1292
Russell Kingd111e8f2006-09-27 15:27:33 +01001293 /*
1294 * Finally flush the caches and tlb to ensure that we're in a
1295 * consistent state wrt the writebuffer. This also ensures that
1296 * any write-allocated cache lines in the vector page are written
1297 * back. After this point, we can start to touch devices again.
1298 */
1299 local_flush_tlb_all();
1300 flush_cache_all();
1301}
1302
Nicolas Pitred73cd422008-09-15 16:44:55 -04001303static void __init kmap_init(void)
1304{
1305#ifdef CONFIG_HIGHMEM
Laura Abbotta367aec2013-02-27 15:05:34 -08001306 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
Russell King4bb2e272010-07-01 18:33:29 +01001307 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001308#endif
1309}
1310
Neil Leederf06ab972011-10-25 17:57:26 -04001311#ifdef CONFIG_STRICT_MEMORY_RWX
1312static struct {
1313 pmd_t *pmd_to_flush;
1314 pmd_t *pmd;
1315 unsigned long addr;
1316 pmd_t saved_pmd;
1317 bool made_writeable;
1318} mem_unprotect;
1319
1320static DEFINE_SPINLOCK(mem_text_writeable_lock);
1321
1322void mem_text_writeable_spinlock(unsigned long *flags)
1323{
1324 spin_lock_irqsave(&mem_text_writeable_lock, *flags);
1325}
1326
1327void mem_text_writeable_spinunlock(unsigned long *flags)
1328{
1329 spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
1330}
1331
1332/*
1333 * mem_text_address_writeable() and mem_text_address_restore()
1334 * should be called as a pair. They are used to make the
1335 * specified address in the kernel text section temporarily writeable
1336 * when it has been marked read-only by STRICT_MEMORY_RWX.
1337 * Used by kprobes and other debugging tools to set breakpoints etc.
1338 * mem_text_address_writeable() is invoked before writing.
1339 * After the write, mem_text_address_restore() must be called
1340 * to restore the original state.
1341 * This is only effective when used on the kernel text section
1342 * marked as MEMORY_RX by map_lowmem()
1343 *
1344 * They must each be called with mem_text_writeable_lock locked
1345 * by the caller, with no unlocking between the calls.
1346 * The caller should release mem_text_writeable_lock immediately
1347 * after the call to mem_text_address_restore().
1348 * Only the write and associated cache operations should be performed
1349 * between the calls.
1350 */
1351
1352/* this function must be called with mem_text_writeable_lock held */
1353void mem_text_address_writeable(unsigned long addr)
1354{
1355 struct task_struct *tsk = current;
1356 struct mm_struct *mm = tsk->active_mm;
1357 pgd_t *pgd = pgd_offset(mm, addr);
1358 pud_t *pud = pud_offset(pgd, addr);
1359
1360 mem_unprotect.made_writeable = 0;
1361
1362 if ((addr < (unsigned long)RX_AREA_START) ||
1363 (addr >= (unsigned long)RX_AREA_END))
1364 return;
1365
1366 mem_unprotect.pmd = pmd_offset(pud, addr);
1367 mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
1368 mem_unprotect.addr = addr & PAGE_MASK;
1369
1370 if (addr & SECTION_SIZE)
1371 mem_unprotect.pmd++;
1372
1373 mem_unprotect.saved_pmd = *mem_unprotect.pmd;
1374 if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
1375 return;
1376
1377 *mem_unprotect.pmd &= ~PMD_SECT_APX;
1378
1379 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1380 flush_tlb_kernel_page(mem_unprotect.addr);
1381 mem_unprotect.made_writeable = 1;
1382}
1383
1384/* this function must be called with mem_text_writeable_lock held */
1385void mem_text_address_restore(void)
1386{
1387 if (mem_unprotect.made_writeable) {
1388 *mem_unprotect.pmd = mem_unprotect.saved_pmd;
1389 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1390 flush_tlb_kernel_page(mem_unprotect.addr);
1391 }
1392}
1393#endif
1394
Neil Leeder32942752011-11-07 10:56:46 -05001395void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
1396{
1397 unsigned long flags;
1398
1399 mem_text_writeable_spinlock(&flags);
1400 mem_text_address_writeable((unsigned long)addr);
1401 *addr = word;
1402 flush_icache_range((unsigned long)addr,
1403 ((unsigned long)addr + sizeof(long)));
1404 mem_text_address_restore();
1405 mem_text_writeable_spinunlock(&flags);
1406}
1407EXPORT_SYMBOL(mem_text_write_kernel_word);
1408
Russell Kinga2227122010-03-25 18:56:05 +00001409static void __init map_lowmem(void)
1410{
Russell King8df65162010-10-27 19:57:38 +01001411 struct memblock_region *reg;
Neeti Desaic278c942013-06-10 17:14:21 -07001412 struct vm_struct *vm;
1413 phys_addr_t start;
1414 phys_addr_t end;
1415 unsigned long vaddr;
1416 unsigned long pfn;
1417 unsigned long length;
1418 unsigned int type;
1419 int nr = 0;
Russell Kinga2227122010-03-25 18:56:05 +00001420
1421 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001422 for_each_memblock(memory, reg) {
Laura Abbotta367aec2013-02-27 15:05:34 -08001423 struct map_desc map;
Neeti Desaic278c942013-06-10 17:14:21 -07001424 nr++;
1425 start = reg->base;
1426 end = start + reg->size;
Russell Kinga2227122010-03-25 18:56:05 +00001427
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001428 if (end > arm_lowmem_limit)
1429 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001430 if (start >= end)
1431 break;
1432
1433 map.pfn = __phys_to_pfn(start);
1434 map.virtual = __phys_to_virt(start);
Jin Hongada9e122011-07-19 12:44:39 -07001435#ifdef CONFIG_STRICT_MEMORY_RWX
1436 if (start <= __pa(_text) && __pa(_text) < end) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001437 map.length = SECTION_SIZE;
Larry Bassel833ef632013-07-29 13:43:17 -07001438 map.type = MT_MEMORY_RW;
Jin Hongada9e122011-07-19 12:44:39 -07001439
Laura Abbotta367aec2013-02-27 15:05:34 -08001440 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001441
Steve Mucklef132c6c2012-06-06 18:30:57 -07001442 map.pfn = __phys_to_pfn(start + SECTION_SIZE);
1443 map.virtual = __phys_to_virt(start + SECTION_SIZE);
1444 map.length = (unsigned long)RX_AREA_END - map.virtual;
Jin Hongada9e122011-07-19 12:44:39 -07001445 map.type = MT_MEMORY_RX;
1446
Laura Abbotta367aec2013-02-27 15:05:34 -08001447 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001448
1449 map.pfn = __phys_to_pfn(__pa(__start_rodata));
1450 map.virtual = (unsigned long)__start_rodata;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001451 map.length = __init_begin - __start_rodata;
Jin Hongada9e122011-07-19 12:44:39 -07001452 map.type = MT_MEMORY_R;
1453
Laura Abbotta367aec2013-02-27 15:05:34 -08001454 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001455
Steve Mucklef132c6c2012-06-06 18:30:57 -07001456 map.pfn = __phys_to_pfn(__pa(__init_begin));
1457 map.virtual = (unsigned long)__init_begin;
Larry Bassel833ef632013-07-29 13:43:17 -07001458 map.length = (char *)__arch_info_begin - __init_begin;
1459 map.type = MT_MEMORY_RX;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001460
Laura Abbotta367aec2013-02-27 15:05:34 -08001461 create_mapping(&map);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001462
Larry Bassel833ef632013-07-29 13:43:17 -07001463 map.pfn = __phys_to_pfn(__pa(__arch_info_begin));
1464 map.virtual = (unsigned long)__arch_info_begin;
1465 map.length = __phys_to_virt(end) -
1466 (unsigned long)__arch_info_begin;
Jin Hongada9e122011-07-19 12:44:39 -07001467 map.type = MT_MEMORY_RW;
1468 } else {
1469 map.length = end - start;
1470 map.type = MT_MEMORY_RW;
1471 }
1472#else
Russell King8df65162010-10-27 19:57:38 +01001473 map.length = end - start;
1474 map.type = MT_MEMORY;
Jin Hongada9e122011-07-19 12:44:39 -07001475#endif
Russell King8df65162010-10-27 19:57:38 +01001476
Laura Abbotta367aec2013-02-27 15:05:34 -08001477 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001478 }
Neeti Desaic278c942013-06-10 17:14:21 -07001479
1480 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
1481
1482 for_each_memblock(memory, reg) {
1483
1484 start = reg->base;
1485 end = start + reg->size;
1486
1487 if (end > arm_lowmem_limit)
1488 end = arm_lowmem_limit;
1489 if (start >= end)
1490 break;
1491
1492 pfn = __phys_to_pfn(start);
1493 vaddr = __phys_to_virt(start);
1494 length = end - start;
1495 type = MT_MEMORY;
1496
1497 vm->addr = (void *)(vaddr & PAGE_MASK);
1498 vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
1499 vm->phys_addr = __pfn_to_phys(pfn);
Laura Abbottd3263482013-08-22 13:46:07 -07001500 vm->flags = VM_LOWMEM | VM_ARM_STATIC_MAPPING;
Neeti Desaic278c942013-06-10 17:14:21 -07001501 vm->flags |= VM_ARM_MTYPE(type);
1502 vm->caller = map_lowmem;
Laura Abbottf2da5eb2013-12-20 13:17:19 -08001503 vm_area_add_early(vm);
1504 mark_vmalloc_reserved_area(vm->addr, vm->size);
1505 vm++;
Neeti Desaic278c942013-06-10 17:14:21 -07001506 }
Russell Kinga2227122010-03-25 18:56:05 +00001507}
1508
Russell Kingd111e8f2006-09-27 15:27:33 +01001509/*
1510 * paging_init() sets up the page tables, initialises the zone memory
1511 * maps, and sets up the zero page, bad page and bad page tables.
1512 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001513void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001514{
1515 void *zero_page;
1516
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001517 memblock_set_current_limit(arm_lowmem_limit);
Russell King0371d3f2011-07-05 19:58:29 +01001518
Russell Kingd111e8f2006-09-27 15:27:33 +01001519 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001520 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001521 map_lowmem();
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001522 dma_contiguous_remap();
Russell Kingd111e8f2006-09-27 15:27:33 +01001523 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001524 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001525
1526 top_pmd = pmd_off_k(0xffff0000);
1527
Russell King3abe9d32010-03-25 17:02:59 +00001528 /* allocate the zero page. */
1529 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001530
Russell King8d717a52010-05-22 19:47:18 +01001531 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001532
Russell Kingd111e8f2006-09-27 15:27:33 +01001533 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001534 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001535}