blob: 3d527357a234150e7fecc8e5d35f6dc546ca12e2 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010014#include <linux/mman.h>
15#include <linux/nodemask.h>
Russell King2778f622010-07-09 16:27:52 +010016#include <linux/memblock.h>
Catalin Marinasd9073872010-09-13 16:01:24 +010017#include <linux/fs.h>
Nicolas Pitre0536bdf2011-08-25 00:35:59 -040018#include <linux/vmalloc.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019
Russell King15d07dc2012-03-28 18:30:01 +010020#include <asm/cp15.h>
Russell King0ba8b9b2008-08-10 18:08:10 +010021#include <asm/cputype.h>
Russell King37efe642008-12-01 11:53:07 +000022#include <asm/sections.h>
Nicolas Pitre3f973e22008-11-04 00:48:42 -050023#include <asm/cachetype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010024#include <asm/setup.h>
25#include <asm/sizes.h>
Russell Kinge616c592009-09-27 20:55:43 +010026#include <asm/smp_plat.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010027#include <asm/tlb.h>
Nicolas Pitred73cd422008-09-15 16:44:55 -040028#include <asm/highmem.h>
David Howells9f97da72012-03-28 18:30:01 +010029#include <asm/system_info.h>
Catalin Marinas247055a2010-09-13 16:03:21 +010030#include <asm/traps.h>
Neil Leederf06ab972011-10-25 17:57:26 -040031#include <asm/mmu_writeable.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010032
33#include <asm/mach/arch.h>
34#include <asm/mach/map.h>
35
Greg Reidcf105492012-10-12 12:14:12 -040036#include <asm/user_accessible_timer.h>
37
Russell Kingd111e8f2006-09-27 15:27:33 +010038#include "mm.h"
39
Russell Kingd111e8f2006-09-27 15:27:33 +010040/*
41 * empty_zero_page is a special page that is used for
42 * zero-initialized data and COW.
43 */
44struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040045EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010046
47/*
48 * The pmd table for the upper-most set of pages.
49 */
50pmd_t *top_pmd;
51
Russell Kingae8f1542006-09-27 15:38:34 +010052#define CPOLICY_UNCACHED 0
53#define CPOLICY_BUFFERED 1
54#define CPOLICY_WRITETHROUGH 2
55#define CPOLICY_WRITEBACK 3
56#define CPOLICY_WRITEALLOC 4
57
Neil Leederf06ab972011-10-25 17:57:26 -040058#define RX_AREA_START _text
59#define RX_AREA_END __start_rodata
60
Russell Kingae8f1542006-09-27 15:38:34 +010061static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
62static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010063pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010064pgprot_t pgprot_kernel;
65
Imre_Deak44b18692007-02-11 13:45:13 +010066EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010067EXPORT_SYMBOL(pgprot_kernel);
68
69struct cachepolicy {
70 const char policy[16];
71 unsigned int cr_mask;
Catalin Marinas442e70c2011-09-05 17:51:56 +010072 pmdval_t pmd;
Russell Kingf6e33542010-11-16 00:22:09 +000073 pteval_t pte;
Russell Kingae8f1542006-09-27 15:38:34 +010074};
75
76static struct cachepolicy cache_policies[] __initdata = {
77 {
78 .policy = "uncached",
79 .cr_mask = CR_W|CR_C,
80 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010081 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010082 }, {
83 .policy = "buffered",
84 .cr_mask = CR_C,
85 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010086 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010087 }, {
88 .policy = "writethrough",
89 .cr_mask = 0,
90 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010091 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010092 }, {
93 .policy = "writeback",
94 .cr_mask = 0,
95 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010096 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010097 }, {
98 .policy = "writealloc",
99 .cr_mask = 0,
100 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +0100101 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +0100102 }
103};
104
105/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +0100106 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +0100107 * problems by allowing the cache or the cache and
108 * writebuffer to be turned off. (Note: the write
109 * buffer should not be on and the cache off).
110 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100111static int __init early_cachepolicy(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100112{
113 int i;
114
115 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
116 int len = strlen(cache_policies[i].policy);
117
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100118 if (memcmp(p, cache_policies[i].policy, len) == 0) {
Russell Kingae8f1542006-09-27 15:38:34 +0100119 cachepolicy = i;
120 cr_alignment &= ~cache_policies[i].cr_mask;
121 cr_no_alignment &= ~cache_policies[i].cr_mask;
Russell Kingae8f1542006-09-27 15:38:34 +0100122 break;
123 }
124 }
125 if (i == ARRAY_SIZE(cache_policies))
126 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Russell King4b46d642009-11-01 17:44:24 +0000127 /*
128 * This restriction is partly to do with the way we boot; it is
129 * unpredictable to have memory mapped using two different sets of
130 * memory attributes (shared, type, and cache attribs). We can not
131 * change these attributes once the initial assembly has setup the
132 * page tables.
133 */
Catalin Marinas11179d82007-07-20 11:42:24 +0100134 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
135 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
136 cachepolicy = CPOLICY_WRITEBACK;
137 }
Russell Kingae8f1542006-09-27 15:38:34 +0100138 flush_cache_all();
139 set_cr(cr_alignment);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100140 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100141}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100142early_param("cachepolicy", early_cachepolicy);
Russell Kingae8f1542006-09-27 15:38:34 +0100143
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100144static int __init early_nocache(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100145{
146 char *p = "buffered";
147 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100148 early_cachepolicy(p);
149 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100150}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100151early_param("nocache", early_nocache);
Russell Kingae8f1542006-09-27 15:38:34 +0100152
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100153static int __init early_nowrite(char *__unused)
Russell Kingae8f1542006-09-27 15:38:34 +0100154{
155 char *p = "uncached";
156 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100157 early_cachepolicy(p);
158 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100159}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100160early_param("nowb", early_nowrite);
Russell Kingae8f1542006-09-27 15:38:34 +0100161
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000162#ifndef CONFIG_ARM_LPAE
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100163static int __init early_ecc(char *p)
Russell Kingae8f1542006-09-27 15:38:34 +0100164{
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100165 if (memcmp(p, "on", 2) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100166 ecc_mask = PMD_PROTECTION;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100167 else if (memcmp(p, "off", 3) == 0)
Russell Kingae8f1542006-09-27 15:38:34 +0100168 ecc_mask = 0;
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100169 return 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100170}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100171early_param("ecc", early_ecc);
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000172#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100173
174static int __init noalign_setup(char *__unused)
175{
176 cr_alignment &= ~CR_A;
177 cr_no_alignment &= ~CR_A;
178 set_cr(cr_alignment);
179 return 1;
180}
181__setup("noalign", noalign_setup);
182
Russell King255d1f82006-12-18 00:12:47 +0000183#ifndef CONFIG_SMP
184void adjust_cr(unsigned long mask, unsigned long set)
185{
186 unsigned long flags;
187
188 mask &= ~CR_A;
189
190 set &= mask;
191
192 local_irq_save(flags);
193
194 cr_no_alignment = (cr_no_alignment & ~mask) | set;
195 cr_alignment = (cr_alignment & ~mask) | set;
196
197 set_cr((get_cr() & ~mask) | set);
198
199 local_irq_restore(flags);
200}
201#endif
202
Russell King36bb94b2010-11-16 08:40:36 +0000203#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_XN
Russell Kingb1cce6b2008-11-04 10:52:28 +0000204#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100205
Russell Kingb29e9f52007-04-21 10:47:29 +0100206static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100207 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100208 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
209 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100210 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000211 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100212 .domain = DOMAIN_IO,
213 },
214 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100215 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100216 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000217 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100218 .domain = DOMAIN_IO,
219 },
220 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100221 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100222 .prot_l1 = PMD_TYPE_TABLE,
223 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
224 .domain = DOMAIN_IO,
225 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100226 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100227 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100228 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000229 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100230 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100231 },
Russell Kingebb4c652008-11-09 11:18:36 +0000232 [MT_UNCACHED] = {
233 .prot_pte = PROT_PTE_DEVICE,
234 .prot_l1 = PMD_TYPE_TABLE,
235 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
236 .domain = DOMAIN_IO,
237 },
Russell Kingae8f1542006-09-27 15:38:34 +0100238 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100240 .domain = DOMAIN_KERNEL,
241 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000242#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100243 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100244 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100245 .domain = DOMAIN_KERNEL,
246 },
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000247#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100248 [MT_LOW_VECTORS] = {
249 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000250 L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100251 .prot_l1 = PMD_TYPE_TABLE,
252 .domain = DOMAIN_USER,
253 },
254 [MT_HIGH_VECTORS] = {
255 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000256 L_PTE_USER | L_PTE_RDONLY,
Russell Kingae8f1542006-09-27 15:38:34 +0100257 .prot_l1 = PMD_TYPE_TABLE,
258 .domain = DOMAIN_USER,
259 },
260 [MT_MEMORY] = {
Russell King36bb94b2010-11-16 08:40:36 +0000261 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100262 .prot_l1 = PMD_TYPE_TABLE,
Russell King9ef79632007-05-05 20:03:35 +0100263 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100264 .domain = DOMAIN_KERNEL,
265 },
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700266 [MT_MEMORY_R] = {
267 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
268 .domain = DOMAIN_KERNEL,
269 },
270 [MT_MEMORY_RW] = {
271 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_XN,
272 .domain = DOMAIN_KERNEL,
273 },
274 [MT_MEMORY_RX] = {
275 .prot_sect = PMD_TYPE_SECT,
276 .domain = DOMAIN_KERNEL,
277 },
Russell Kingae8f1542006-09-27 15:38:34 +0100278 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100279 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100280 .domain = DOMAIN_KERNEL,
281 },
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100282 [MT_MEMORY_NONCACHED] = {
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100283 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000284 L_PTE_MT_BUFFERABLE,
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100285 .prot_l1 = PMD_TYPE_TABLE,
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100286 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
287 .domain = DOMAIN_KERNEL,
288 },
Linus Walleijcb9d7702010-07-12 21:50:59 +0100289 [MT_MEMORY_DTCM] = {
Linus Walleijf444fce2010-10-18 09:03:03 +0100290 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
Russell King36bb94b2010-11-16 08:40:36 +0000291 L_PTE_XN,
Linus Walleijf444fce2010-10-18 09:03:03 +0100292 .prot_l1 = PMD_TYPE_TABLE,
293 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
294 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100295 },
296 [MT_MEMORY_ITCM] = {
Russell King36bb94b2010-11-16 08:40:36 +0000297 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100298 .prot_l1 = PMD_TYPE_TABLE,
Linus Walleijf444fce2010-10-18 09:03:03 +0100299 .domain = DOMAIN_KERNEL,
Linus Walleijcb9d7702010-07-12 21:50:59 +0100300 },
Santosh Shilimkar8fb54282011-06-28 12:42:56 -0700301 [MT_MEMORY_SO] = {
302 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
303 L_PTE_MT_UNCACHED,
304 .prot_l1 = PMD_TYPE_TABLE,
305 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE | PMD_SECT_S |
306 PMD_SECT_UNCACHED | PMD_SECT_XN,
307 .domain = DOMAIN_KERNEL,
308 },
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100309 [MT_MEMORY_DMA_READY] = {
Russell King784b0322013-11-25 12:01:03 +0000310 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
311 L_PTE_XN,
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100312 .prot_l1 = PMD_TYPE_TABLE,
313 .domain = DOMAIN_KERNEL,
314 },
Greg Reidcf105492012-10-12 12:14:12 -0400315 [MT_DEVICE_USER_ACCESSIBLE] = {
316 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
317 L_PTE_SHARED | L_PTE_USER | L_PTE_RDONLY,
318 .prot_l1 = PMD_TYPE_TABLE,
319 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
320 .domain = DOMAIN_IO,
321 },
Russell Kingae8f1542006-09-27 15:38:34 +0100322};
323
Russell Kingb29e9f52007-04-21 10:47:29 +0100324const struct mem_type *get_mem_type(unsigned int type)
325{
326 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
327}
Hiroshi DOYU69d3a842009-01-28 21:32:08 +0200328EXPORT_SYMBOL(get_mem_type);
Russell Kingb29e9f52007-04-21 10:47:29 +0100329
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700330#define PTE_SET_FN(_name, pteop) \
331static int pte_set_##_name(pte_t *ptep, pgtable_t token, unsigned long addr, \
332 void *data) \
333{ \
334 pte_t pte = pteop(*ptep); \
335\
336 set_pte_ext(ptep, pte, 0); \
337 return 0; \
338} \
339
340#define SET_MEMORY_FN(_name, callback) \
341int set_memory_##_name(unsigned long addr, int numpages) \
342{ \
343 unsigned long start = addr; \
344 unsigned long size = PAGE_SIZE*numpages; \
345 unsigned end = start + size; \
346\
Laura Abbott835d5322014-04-14 19:42:04 -0700347 if (!IS_ENABLED(CONFIG_FORCE_PAGES)) { \
348 if (start < MODULES_VADDR || start >= MODULES_END) \
349 return -EINVAL;\
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700350\
Laura Abbott835d5322014-04-14 19:42:04 -0700351 if (end < MODULES_VADDR || end >= MODULES_END) \
352 return -EINVAL; \
353 } \
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700354\
355 apply_to_page_range(&init_mm, start, size, callback, NULL); \
356 flush_tlb_kernel_range(start, end); \
357 return 0;\
358}
359
360PTE_SET_FN(ro, pte_wrprotect)
361PTE_SET_FN(rw, pte_mkwrite)
362PTE_SET_FN(x, pte_mkexec)
363PTE_SET_FN(nx, pte_mknexec)
364
365SET_MEMORY_FN(ro, pte_set_ro)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700366EXPORT_SYMBOL(set_memory_ro);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700367SET_MEMORY_FN(rw, pte_set_rw)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700368EXPORT_SYMBOL(set_memory_rw);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700369SET_MEMORY_FN(x, pte_set_x)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700370EXPORT_SYMBOL(set_memory_x);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700371SET_MEMORY_FN(nx, pte_set_nx)
Laura Abbott3aa4dc22013-07-09 11:06:35 -0700372EXPORT_SYMBOL(set_memory_nx);
Laura Abbottfbaf31e2013-06-12 09:44:18 -0700373
Russell Kingae8f1542006-09-27 15:38:34 +0100374/*
375 * Adjust the PMD section entries according to the CPU in use.
376 */
377static void __init build_mem_type_table(void)
378{
379 struct cachepolicy *cp;
380 unsigned int cr = get_cr();
Catalin Marinas442e70c2011-09-05 17:51:56 +0100381 pteval_t user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100382 int cpu_arch = cpu_architecture();
383 int i;
384
Catalin Marinas11179d82007-07-20 11:42:24 +0100385 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100386#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100387 if (cachepolicy > CPOLICY_BUFFERED)
388 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100389#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100390 if (cachepolicy > CPOLICY_WRITETHROUGH)
391 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100392#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100393 }
Russell Kingae8f1542006-09-27 15:38:34 +0100394 if (cpu_arch < CPU_ARCH_ARMv5) {
395 if (cachepolicy >= CPOLICY_WRITEALLOC)
396 cachepolicy = CPOLICY_WRITEBACK;
397 ecc_mask = 0;
398 }
Russell Kingf00ec482010-09-04 10:47:48 +0100399 if (is_smp())
400 cachepolicy = CPOLICY_WRITEALLOC;
Russell Kingae8f1542006-09-27 15:38:34 +0100401
402 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000403 * Strip out features not present on earlier architectures.
404 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
405 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100406 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000407 if (cpu_arch < CPU_ARCH_ARMv5)
408 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
409 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
410 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
411 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
412 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100413
414 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000415 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
416 * "update-able on write" bit on ARM610). However, Xscale and
417 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100418 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000419 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100420 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100421 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100422 mem_types[i].prot_l1 &= ~PMD_BIT4;
423 }
424 } else if (cpu_arch < CPU_ARCH_ARMv6) {
425 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100426 if (mem_types[i].prot_l1)
427 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100428 if (mem_types[i].prot_sect)
429 mem_types[i].prot_sect |= PMD_BIT4;
430 }
431 }
Russell Kingae8f1542006-09-27 15:38:34 +0100432
Russell Kingb1cce6b2008-11-04 10:52:28 +0000433 /*
434 * Mark the device areas according to the CPU/architecture.
435 */
436 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
437 if (!cpu_is_xsc3()) {
438 /*
439 * Mark device regions on ARMv6+ as execute-never
440 * to prevent speculative instruction fetches.
441 */
442 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
443 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
444 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
445 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
446 }
447 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
448 /*
449 * For ARMv7 with TEX remapping,
450 * - shared device is SXCB=1100
451 * - nonshared device is SXCB=0100
452 * - write combine device mem is SXCB=0001
453 * (Uncached Normal memory)
454 */
455 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
456 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
457 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
458 } else if (cpu_is_xsc3()) {
459 /*
460 * For Xscale3,
461 * - shared device is TEXCB=00101
462 * - nonshared device is TEXCB=01000
463 * - write combine device mem is TEXCB=00100
464 * (Inner/Outer Uncacheable in xsc3 parlance)
465 */
466 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
467 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
468 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
469 } else {
470 /*
471 * For ARMv6 and ARMv7 without TEX remapping,
472 * - shared device is TEXCB=00001
473 * - nonshared device is TEXCB=01000
474 * - write combine device mem is TEXCB=00100
475 * (Uncached Normal in ARMv6 parlance).
476 */
477 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
478 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
479 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
480 }
481 } else {
482 /*
483 * On others, write combining is "Uncached/Buffered"
484 */
485 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
486 }
487
488 /*
489 * Now deal with the memory-type mappings
490 */
Russell Kingae8f1542006-09-27 15:38:34 +0100491 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100492 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
493
Russell Kingbb30f362008-09-06 20:04:59 +0100494 /*
495 * Only use write-through for non-SMP systems
496 */
Russell Kingf00ec482010-09-04 10:47:48 +0100497 if (!is_smp() && cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
Russell Kingbb30f362008-09-06 20:04:59 +0100498 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100499
500 /*
501 * Enable CPU-specific coherency if supported.
502 * (Only available on XSC3 at the moment.)
503 */
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100504 if (arch_is_coherent() && cpu_is_xsc3()) {
Russell Kingb1cce6b2008-11-04 10:52:28 +0000505 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100506 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100507 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100508 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
509 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
510 }
Russell Kingae8f1542006-09-27 15:38:34 +0100511 /*
512 * ARMv6 and above have extended page tables.
513 */
514 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000515#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100516 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100517 * Mark cache clean areas and XIP ROM read only
518 * from SVC mode and no access from userspace.
519 */
520 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700521 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
522 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Russell Kingae8f1542006-09-27 15:38:34 +0100523 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
524 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000525#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100526
Russell Kingf00ec482010-09-04 10:47:48 +0100527 if (is_smp()) {
528 /*
529 * Mark memory with the "shared" attribute
530 * for SMP systems
531 */
532 user_pgprot |= L_PTE_SHARED;
533 kern_pgprot |= L_PTE_SHARED;
534 vecs_pgprot |= L_PTE_SHARED;
535 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_S;
536 mem_types[MT_DEVICE_WC].prot_pte |= L_PTE_SHARED;
537 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_S;
538 mem_types[MT_DEVICE_CACHED].prot_pte |= L_PTE_SHARED;
539 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
540 mem_types[MT_MEMORY].prot_pte |= L_PTE_SHARED;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100541 mem_types[MT_MEMORY_DMA_READY].prot_pte |= L_PTE_SHARED;
Russell Kingf00ec482010-09-04 10:47:48 +0100542 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700543 mem_types[MT_MEMORY_R].prot_sect |= PMD_SECT_S;
544 mem_types[MT_MEMORY_RW].prot_sect |= PMD_SECT_S;
545 mem_types[MT_MEMORY_RX].prot_sect |= PMD_SECT_S;
Russell Kingf00ec482010-09-04 10:47:48 +0100546 mem_types[MT_MEMORY_NONCACHED].prot_pte |= L_PTE_SHARED;
547 }
Russell Kingae8f1542006-09-27 15:38:34 +0100548 }
549
Paul Walmsleye4707dd2009-03-12 20:11:43 +0100550 /*
551 * Non-cacheable Normal - intended for memory areas that must
552 * not cause dirty cache line writebacks when used
553 */
554 if (cpu_arch >= CPU_ARCH_ARMv6) {
555 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
556 /* Non-cacheable Normal is XCB = 001 */
557 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
558 PMD_SECT_BUFFERED;
559 } else {
560 /* For both ARMv6 and non-TEX-remapping ARMv7 */
561 mem_types[MT_MEMORY_NONCACHED].prot_sect |=
562 PMD_SECT_TEX(1);
563 }
564 } else {
565 mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE;
566 }
567
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000568#ifdef CONFIG_ARM_LPAE
569 /*
570 * Do not generate access flag faults for the kernel mappings.
571 */
572 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
573 mem_types[i].prot_pte |= PTE_EXT_AF;
Vitaly Andrianov1a3abcf2012-05-15 15:01:16 +0100574 if (mem_types[i].prot_sect)
575 mem_types[i].prot_sect |= PMD_SECT_AF;
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000576 }
577 kern_pgprot |= PTE_EXT_AF;
578 vecs_pgprot |= PTE_EXT_AF;
579#endif
580
Russell Kingae8f1542006-09-27 15:38:34 +0100581 for (i = 0; i < 16; i++) {
582 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100583 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100584 }
585
Russell Kingbb30f362008-09-06 20:04:59 +0100586 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
587 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100588
Imre_Deak44b18692007-02-11 13:45:13 +0100589 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100590 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
Russell King36bb94b2010-11-16 08:40:36 +0000591 L_PTE_DIRTY | kern_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100592
593 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
594 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
595 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100596 mem_types[MT_MEMORY].prot_pte |= kern_pgprot;
Marek Szyprowskid4398df2011-12-29 13:09:51 +0100597 mem_types[MT_MEMORY_DMA_READY].prot_pte |= kern_pgprot;
Santosh Shilimkarf1a24812010-09-24 07:18:22 +0100598 mem_types[MT_MEMORY_NONCACHED].prot_sect |= ecc_mask;
Bryan Huntsman3f2bc4d2011-08-16 17:27:22 -0700599 mem_types[MT_MEMORY_R].prot_sect |= ecc_mask | cp->pmd;
600 mem_types[MT_MEMORY_RW].prot_sect |= ecc_mask | cp->pmd;
601 mem_types[MT_MEMORY_RX].prot_sect |= ecc_mask | cp->pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100602 mem_types[MT_ROM].prot_sect |= cp->pmd;
603
604 switch (cp->pmd) {
605 case PMD_SECT_WT:
606 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
607 break;
608 case PMD_SECT_WB:
609 case PMD_SECT_WBWA:
610 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
611 break;
612 }
613 printk("Memory policy: ECC %sabled, Data cache %s\n",
614 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100615
616 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
617 struct mem_type *t = &mem_types[i];
618 if (t->prot_l1)
619 t->prot_l1 |= PMD_DOMAIN(t->domain);
620 if (t->prot_sect)
621 t->prot_sect |= PMD_DOMAIN(t->domain);
622 }
Russell Kingae8f1542006-09-27 15:38:34 +0100623}
624
Catalin Marinasd9073872010-09-13 16:01:24 +0100625#ifdef CONFIG_ARM_DMA_MEM_BUFFERABLE
626pgprot_t phys_mem_access_prot(struct file *file, unsigned long pfn,
627 unsigned long size, pgprot_t vma_prot)
628{
629 if (!pfn_valid(pfn))
630 return pgprot_noncached(vma_prot);
631 else if (file->f_flags & O_SYNC)
632 return pgprot_writecombine(vma_prot);
633 return vma_prot;
634}
635EXPORT_SYMBOL(phys_mem_access_prot);
636#endif
637
Russell Kingae8f1542006-09-27 15:38:34 +0100638#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
639
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400640static void __init *early_alloc_aligned(unsigned long sz, unsigned long align)
Russell King3abe9d32010-03-25 17:02:59 +0000641{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400642 void *ptr = __va(memblock_alloc(sz, align));
Russell King2778f622010-07-09 16:27:52 +0100643 memset(ptr, 0, sz);
644 return ptr;
Russell King3abe9d32010-03-25 17:02:59 +0000645}
646
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400647static void __init *early_alloc(unsigned long sz)
648{
649 return early_alloc_aligned(sz, sz);
650}
651
Laura Abbotta367aec2013-02-27 15:05:34 -0800652static pte_t * __init early_pte_alloc(pmd_t *pmd, unsigned long addr, unsigned long prot)
Russell King4bb2e272010-07-01 18:33:29 +0100653{
654 if (pmd_none(*pmd)) {
Laura Abbotta367aec2013-02-27 15:05:34 -0800655 pte_t *pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
656 __pmd_populate(pmd, __pa(pte), prot);
Russell King4bb2e272010-07-01 18:33:29 +0100657 }
658 BUG_ON(pmd_bad(*pmd));
659 return pte_offset_kernel(pmd, addr);
660}
661
Russell King24e6c692007-04-21 10:21:28 +0100662static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
663 unsigned long end, unsigned long pfn,
664 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100665{
Laura Abbotta367aec2013-02-27 15:05:34 -0800666 pte_t *pte = early_pte_alloc(pmd, addr, type->prot_l1);
Russell King24e6c692007-04-21 10:21:28 +0100667 do {
Russell King40d192b2008-09-06 21:15:56 +0100668 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100669 pfn++;
670 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100671}
672
R Sricharanba3e1872013-03-17 10:35:41 +0530673static void __init map_init_section(pmd_t *pmd, unsigned long addr,
674 unsigned long end, phys_addr_t phys,
675 const struct mem_type *type)
676{
677#ifndef CONFIG_ARM_LPAE
678 /*
679 * In classic MMU format, puds and pmds are folded in to
680 * the pgds. pmd_offset gives the PGD entry. PGDs refer to a
681 * group of L1 entries making up one logical pointer to
682 * an L2 table (2MB), where as PMDs refer to the individual
683 * L1 entries (1MB). Hence increment to get the correct
684 * offset for odd 1MB sections.
685 * (See arch/arm/include/asm/pgtable-2level.h)
686 */
687 if (addr & SECTION_SIZE)
688 pmd++;
689#endif
690 do {
691 *pmd = __pmd(phys | type->prot_sect);
692 phys += SECTION_SIZE;
693 } while (pmd++, addr += SECTION_SIZE, addr != end);
694
695 flush_pmd_entry(pmd);
696}
697
698static void __init alloc_init_pmd(pud_t *pud, unsigned long addr,
Russell King97092e02010-11-16 00:16:01 +0000699 unsigned long end, phys_addr_t phys,
Laura Abbotta367aec2013-02-27 15:05:34 -0800700 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100701{
Russell King516295e2010-11-21 16:27:49 +0000702 pmd_t *pmd = pmd_offset(pud, addr);
R Sricharanba3e1872013-03-17 10:35:41 +0530703 unsigned long next;
Russell Kingae8f1542006-09-27 15:38:34 +0100704
R Sricharanba3e1872013-03-17 10:35:41 +0530705 do {
Russell King24e6c692007-04-21 10:21:28 +0100706 /*
R Sricharanba3e1872013-03-17 10:35:41 +0530707 * With LPAE, we must loop over to map
708 * all the pmds for the given range.
Russell King24e6c692007-04-21 10:21:28 +0100709 */
R Sricharanba3e1872013-03-17 10:35:41 +0530710 next = pmd_addr_end(addr, end);
711
712 /*
713 * Try a section mapping - addr, next and phys must all be
714 * aligned to a section boundary.
715 */
716 if (type->prot_sect &&
717 ((addr | next | phys) & ~SECTION_MASK) == 0) {
718 map_init_section(pmd, addr, next, phys, type);
719 } else {
720 alloc_init_pte(pmd, addr, next,
721 __phys_to_pfn(phys), type);
722 }
723
724 phys += next - addr;
725
726 } while (pmd++, addr = next, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100727}
728
Stephen Boyd14904922012-04-27 01:40:10 +0100729static void __init alloc_init_pud(pgd_t *pgd, unsigned long addr,
Laura Abbotta367aec2013-02-27 15:05:34 -0800730 unsigned long end, unsigned long phys, const struct mem_type *type)
Russell King516295e2010-11-21 16:27:49 +0000731{
732 pud_t *pud = pud_offset(pgd, addr);
733 unsigned long next;
734
735 do {
736 next = pud_addr_end(addr, end);
R Sricharanba3e1872013-03-17 10:35:41 +0530737 alloc_init_pmd(pud, addr, next, phys, type);
Russell King516295e2010-11-21 16:27:49 +0000738 phys += next - addr;
739 } while (pud++, addr = next, addr != end);
740}
741
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000742#ifndef CONFIG_ARM_LPAE
Russell King4a56c1e2007-04-21 10:16:48 +0100743static void __init create_36bit_mapping(struct map_desc *md,
744 const struct mem_type *type)
745{
Russell King97092e02010-11-16 00:16:01 +0000746 unsigned long addr, length, end;
747 phys_addr_t phys;
Russell King4a56c1e2007-04-21 10:16:48 +0100748 pgd_t *pgd;
749
750 addr = md->virtual;
Will Deaconcae62922011-02-15 12:42:57 +0100751 phys = __pfn_to_phys(md->pfn);
Russell King4a56c1e2007-04-21 10:16:48 +0100752 length = PAGE_ALIGN(md->length);
753
754 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
755 printk(KERN_ERR "MM: CPU does not support supersection "
756 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100757 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100758 return;
759 }
760
761 /* N.B. ARMv6 supersections are only defined to work with domain 0.
762 * Since domain assignments can in fact be arbitrary, the
763 * 'domain == 0' check below is required to insure that ARMv6
764 * supersections are only allocated for domain 0 regardless
765 * of the actual domain assignments in use.
766 */
767 if (type->domain) {
768 printk(KERN_ERR "MM: invalid domain in supersection "
769 "mapping for 0x%08llx at 0x%08lx\n",
Will Deacon29a38192011-02-15 14:31:37 +0100770 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100771 return;
772 }
773
774 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
Will Deacon29a38192011-02-15 14:31:37 +0100775 printk(KERN_ERR "MM: cannot create mapping for 0x%08llx"
776 " at 0x%08lx invalid alignment\n",
777 (long long)__pfn_to_phys((u64)md->pfn), addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100778 return;
779 }
780
781 /*
782 * Shift bits [35:32] of address into bits [23:20] of PMD
783 * (See ARMv6 spec).
784 */
785 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
786
787 pgd = pgd_offset_k(addr);
788 end = addr + length;
789 do {
Russell King516295e2010-11-21 16:27:49 +0000790 pud_t *pud = pud_offset(pgd, addr);
791 pmd_t *pmd = pmd_offset(pud, addr);
Russell King4a56c1e2007-04-21 10:16:48 +0100792 int i;
793
794 for (i = 0; i < 16; i++)
795 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
796
797 addr += SUPERSECTION_SIZE;
798 phys += SUPERSECTION_SIZE;
799 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
800 } while (addr != end);
801}
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000802#endif /* !CONFIG_ARM_LPAE */
Russell King4a56c1e2007-04-21 10:16:48 +0100803
Russell Kingae8f1542006-09-27 15:38:34 +0100804/*
805 * Create the page directory entries and any necessary
806 * page tables for the mapping specified by `md'. We
807 * are able to cope here with varying sizes and address
808 * offsets, and we take full advantage of sections and
809 * supersections.
810 */
Laura Abbotta367aec2013-02-27 15:05:34 -0800811static void __init create_mapping(struct map_desc *md)
Russell Kingae8f1542006-09-27 15:38:34 +0100812{
Will Deaconcae62922011-02-15 12:42:57 +0100813 unsigned long addr, length, end;
814 phys_addr_t phys;
Russell Kingd5c98172007-04-21 10:05:32 +0100815 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100816 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100817
Greg Reidcf105492012-10-12 12:14:12 -0400818 if ((md->virtual != vectors_base() &&
819 md->virtual != get_user_accessible_timers_base()) &&
820 md->virtual < TASK_SIZE) {
Will Deacon29a38192011-02-15 14:31:37 +0100821 printk(KERN_WARNING "BUG: not creating mapping for 0x%08llx"
822 " at 0x%08lx in user region\n",
823 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100824 return;
825 }
826
827 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400828 md->virtual >= PAGE_OFFSET &&
829 (md->virtual < VMALLOC_START || md->virtual >= VMALLOC_END)) {
Will Deacon29a38192011-02-15 14:31:37 +0100830 printk(KERN_WARNING "BUG: mapping for 0x%08llx"
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400831 " at 0x%08lx out of vmalloc space\n",
Will Deacon29a38192011-02-15 14:31:37 +0100832 (long long)__pfn_to_phys((u64)md->pfn), md->virtual);
Russell Kingae8f1542006-09-27 15:38:34 +0100833 }
834
Russell Kingd5c98172007-04-21 10:05:32 +0100835 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100836
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000837#ifndef CONFIG_ARM_LPAE
Russell Kingae8f1542006-09-27 15:38:34 +0100838 /*
839 * Catch 36-bit addresses
840 */
Russell King4a56c1e2007-04-21 10:16:48 +0100841 if (md->pfn >= 0x100000) {
842 create_36bit_mapping(md, type);
843 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100844 }
Catalin Marinas1b6ba462011-11-22 17:30:29 +0000845#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100846
Russell King7b9c7b42007-07-04 21:16:33 +0100847 addr = md->virtual & PAGE_MASK;
Will Deaconcae62922011-02-15 12:42:57 +0100848 phys = __pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100849 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100850
Russell King24e6c692007-04-21 10:21:28 +0100851 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Will Deacon29a38192011-02-15 14:31:37 +0100852 printk(KERN_WARNING "BUG: map for 0x%08llx at 0x%08lx can not "
Russell Kingae8f1542006-09-27 15:38:34 +0100853 "be mapped using pages, ignoring.\n",
Will Deacon29a38192011-02-15 14:31:37 +0100854 (long long)__pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100855 return;
856 }
857
Russell King24e6c692007-04-21 10:21:28 +0100858 pgd = pgd_offset_k(addr);
859 end = addr + length;
860 do {
861 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100862
Laura Abbotta367aec2013-02-27 15:05:34 -0800863 alloc_init_pud(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100864
Russell King24e6c692007-04-21 10:21:28 +0100865 phys += next - addr;
866 addr = next;
867 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100868}
869
870/*
871 * Create the architecture specific mappings
872 */
873void __init iotable_init(struct map_desc *io_desc, int nr)
874{
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400875 struct map_desc *md;
876 struct vm_struct *vm;
Neeti Desaic278c942013-06-10 17:14:21 -0700877 int rc = 0;
Russell Kingae8f1542006-09-27 15:38:34 +0100878
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400879 if (!nr)
880 return;
881
882 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
883
884 for (md = io_desc; nr; md++, nr--) {
Laura Abbotta367aec2013-02-27 15:05:34 -0800885 create_mapping(md);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400886 vm->addr = (void *)(md->virtual & PAGE_MASK);
887 vm->size = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Neeti Desaic278c942013-06-10 17:14:21 -0700888 vm->phys_addr = __pfn_to_phys(md->pfn);
889 vm->flags = VM_IOREMAP | VM_ARM_STATIC_MAPPING;
Nicolas Pitre576d2f22011-09-16 01:14:23 -0400890 vm->flags |= VM_ARM_MTYPE(md->type);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400891 vm->caller = iotable_init;
Neeti Desaic278c942013-06-10 17:14:21 -0700892 rc = vm_area_check_early(vm);
893 if (!rc)
894 vm_area_add_early(vm++);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400895 }
Russell Kingae8f1542006-09-27 15:38:34 +0100896}
897
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100898#ifndef CONFIG_ARM_LPAE
899
900/*
901 * The Linux PMD is made of two consecutive section entries covering 2MB
902 * (see definition in include/asm/pgtable-2level.h). However a call to
903 * create_mapping() may optimize static mappings by using individual
904 * 1MB section mappings. This leaves the actual PMD potentially half
905 * initialized if the top or bottom section entry isn't used, leaving it
906 * open to problems if a subsequent ioremap() or vmalloc() tries to use
907 * the virtual space left free by that unused section entry.
908 *
909 * Let's avoid the issue by inserting dummy vm entries covering the unused
910 * PMD halves once the static mappings are in place.
911 */
912
913static void __init pmd_empty_section_gap(unsigned long addr)
914{
915 struct vm_struct *vm;
916
917 vm = early_alloc_aligned(sizeof(*vm), __alignof__(*vm));
918 vm->addr = (void *)addr;
919 vm->size = SECTION_SIZE;
Russell King79db1f32012-08-22 12:26:47 +0530920 vm->flags = VM_IOREMAP | VM_ARM_EMPTY_MAPPING;
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100921 vm->caller = pmd_empty_section_gap;
922 vm_area_add_early(vm);
923}
924
925static void __init fill_pmd_gaps(void)
926{
927 struct vm_struct *vm;
928 unsigned long addr, next = 0;
929 pmd_t *pmd;
930
931 /* we're still single threaded hence no lock needed here */
932 for (vm = vmlist; vm; vm = vm->next) {
Russell King79db1f32012-08-22 12:26:47 +0530933 if (!(vm->flags & (VM_ARM_STATIC_MAPPING | VM_ARM_EMPTY_MAPPING)))
Nicolas Pitreb0884a92012-06-27 17:28:57 +0100934 continue;
935 addr = (unsigned long)vm->addr;
936 if (addr < next)
937 continue;
938
939 /*
940 * Check if this vm starts on an odd section boundary.
941 * If so and the first section entry for this PMD is free
942 * then we block the corresponding virtual address.
943 */
944 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
945 pmd = pmd_off_k(addr);
946 if (pmd_none(*pmd))
947 pmd_empty_section_gap(addr & PMD_MASK);
948 }
949
950 /*
951 * Then check if this vm ends on an odd section boundary.
952 * If so and the second section entry for this PMD is empty
953 * then we block the corresponding virtual address.
954 */
955 addr += vm->size;
956 if ((addr & ~PMD_MASK) == SECTION_SIZE) {
957 pmd = pmd_off_k(addr) + 1;
958 if (pmd_none(*pmd))
959 pmd_empty_section_gap(addr);
960 }
961
962 /* no need to look at any vm entry until we hit the next PMD */
963 next = (addr + PMD_SIZE - 1) & PMD_MASK;
964 }
965}
966
967#else
968#define fill_pmd_gaps() do { } while (0)
969#endif
970
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400971static void * __initdata vmalloc_min =
972 (void *)(VMALLOC_END - (240 << 20) - VMALLOC_OFFSET);
Russell King6c5da7a2008-09-30 19:31:44 +0100973
974/*
975 * vmalloc=size forces the vmalloc area to be exactly 'size'
976 * bytes. This can be used to increase (or decrease) the vmalloc
Nicolas Pitre0536bdf2011-08-25 00:35:59 -0400977 * area - the default is 240m.
Russell King6c5da7a2008-09-30 19:31:44 +0100978 */
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100979static int __init early_vmalloc(char *arg)
Russell King6c5da7a2008-09-30 19:31:44 +0100980{
Russell King79612392010-05-22 16:20:14 +0100981 unsigned long vmalloc_reserve = memparse(arg, NULL);
Russell King6c5da7a2008-09-30 19:31:44 +0100982
983 if (vmalloc_reserve < SZ_16M) {
984 vmalloc_reserve = SZ_16M;
985 printk(KERN_WARNING
986 "vmalloc area too small, limiting to %luMB\n",
987 vmalloc_reserve >> 20);
988 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400989
990 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
991 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
992 printk(KERN_WARNING
993 "vmalloc area is too big, limiting to %luMB\n",
994 vmalloc_reserve >> 20);
995 }
Russell King79612392010-05-22 16:20:14 +0100996
997 vmalloc_min = (void *)(VMALLOC_END - vmalloc_reserve);
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +0100998 return 0;
Russell King6c5da7a2008-09-30 19:31:44 +0100999}
Jeremy Kerr2b0d8c22010-01-11 23:17:34 +01001000early_param("vmalloc", early_vmalloc);
Russell King6c5da7a2008-09-30 19:31:44 +01001001
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001002phys_addr_t arm_lowmem_limit __initdata = 0;
Russell King8df65162010-10-27 19:57:38 +01001003
Russell King0371d3f2011-07-05 19:58:29 +01001004void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001005{
Russell Kingdde58282009-08-15 12:36:00 +01001006 int i, j, highmem = 0;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001007
Neeti Desaic76fd312013-07-18 15:29:20 -07001008#ifdef CONFIG_ENABLE_VMALLOC_SAVING
1009 unsigned long hole_start;
1010 for (i = 0; i < (meminfo.nr_banks - 1); i++) {
1011 hole_start = meminfo.bank[i].start + meminfo.bank[i].size;
1012 if (hole_start != meminfo.bank[i+1].start) {
1013 if (hole_start <= MAX_HOLE_ADDRESS) {
1014 vmalloc_min = (void *) (vmalloc_min +
1015 (meminfo.bank[i+1].start - hole_start));
1016 }
1017 }
1018 }
1019#endif
Larry Bassel31a949b2012-04-11 15:53:21 -07001020#ifdef CONFIG_DONT_MAP_HOLE_AFTER_MEMBANK0
Neeti Desai1b2cb552012-11-01 21:57:36 -07001021 find_memory_hole();
Larry Bassel31a949b2012-04-11 15:53:21 -07001022#endif
1023
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001024 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001025 struct membank *bank = &meminfo.bank[j];
1026 *bank = meminfo.bank[i];
1027
Will Deacon77f73a22011-11-22 17:30:32 +00001028 if (bank->start > ULONG_MAX)
1029 highmem = 1;
1030
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001031#ifdef CONFIG_HIGHMEM
Will Deacon40f7bfe2011-05-19 13:22:48 +01001032 if (__va(bank->start) >= vmalloc_min ||
Russell Kingdde58282009-08-15 12:36:00 +01001033 __va(bank->start) < (void *)PAGE_OFFSET)
1034 highmem = 1;
1035
1036 bank->highmem = highmem;
1037
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001038 /*
1039 * Split those memory banks which are partially overlapping
1040 * the vmalloc area greatly simplifying things later.
1041 */
Will Deacon77f73a22011-11-22 17:30:32 +00001042 if (!highmem && __va(bank->start) < vmalloc_min &&
Russell King79612392010-05-22 16:20:14 +01001043 bank->size > vmalloc_min - __va(bank->start)) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001044 if (meminfo.nr_banks >= NR_BANKS) {
1045 printk(KERN_CRIT "NR_BANKS too low, "
1046 "ignoring high memory\n");
1047 } else {
1048 memmove(bank + 1, bank,
1049 (meminfo.nr_banks - i) * sizeof(*bank));
1050 meminfo.nr_banks++;
1051 i++;
Russell King79612392010-05-22 16:20:14 +01001052 bank[1].size -= vmalloc_min - __va(bank->start);
1053 bank[1].start = __pa(vmalloc_min - 1) + 1;
Russell Kingdde58282009-08-15 12:36:00 +01001054 bank[1].highmem = highmem = 1;
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001055 j++;
1056 }
Russell King79612392010-05-22 16:20:14 +01001057 bank->size = vmalloc_min - __va(bank->start);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001058 }
1059#else
Russell King041d7852009-09-27 17:40:42 +01001060 bank->highmem = highmem;
1061
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001062 /*
Will Deacon77f73a22011-11-22 17:30:32 +00001063 * Highmem banks not allowed with !CONFIG_HIGHMEM.
1064 */
1065 if (highmem) {
1066 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
1067 "(!CONFIG_HIGHMEM).\n",
1068 (unsigned long long)bank->start,
1069 (unsigned long long)bank->start + bank->size - 1);
1070 continue;
1071 }
1072
1073 /*
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001074 * Check whether this memory bank would entirely overlap
1075 * the vmalloc area.
1076 */
Russell King79612392010-05-22 16:20:14 +01001077 if (__va(bank->start) >= vmalloc_min ||
Mikael Petterssonf0bba9f2009-03-28 19:18:05 +01001078 __va(bank->start) < (void *)PAGE_OFFSET) {
Russell Kinge33b9d02011-02-20 11:47:41 +00001079 printk(KERN_NOTICE "Ignoring RAM at %.8llx-%.8llx "
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001080 "(vmalloc region overlap).\n",
Russell Kinge33b9d02011-02-20 11:47:41 +00001081 (unsigned long long)bank->start,
1082 (unsigned long long)bank->start + bank->size - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001083 continue;
1084 }
1085
1086 /*
1087 * Check whether this memory bank would partially overlap
1088 * the vmalloc area.
1089 */
Russell King79612392010-05-22 16:20:14 +01001090 if (__va(bank->start + bank->size) > vmalloc_min ||
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001091 __va(bank->start + bank->size) < __va(bank->start)) {
Russell King79612392010-05-22 16:20:14 +01001092 unsigned long newsize = vmalloc_min - __va(bank->start);
Russell Kinge33b9d02011-02-20 11:47:41 +00001093 printk(KERN_NOTICE "Truncating RAM at %.8llx-%.8llx "
1094 "to -%.8llx (vmalloc region overlap).\n",
1095 (unsigned long long)bank->start,
1096 (unsigned long long)bank->start + bank->size - 1,
1097 (unsigned long long)bank->start + newsize - 1);
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001098 bank->size = newsize;
1099 }
1100#endif
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001101 if (!bank->highmem && bank->start + bank->size > arm_lowmem_limit)
1102 arm_lowmem_limit = bank->start + bank->size;
Will Deacon40f7bfe2011-05-19 13:22:48 +01001103
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -04001104 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001105 }
Russell Kinge616c592009-09-27 20:55:43 +01001106#ifdef CONFIG_HIGHMEM
1107 if (highmem) {
1108 const char *reason = NULL;
1109
1110 if (cache_is_vipt_aliasing()) {
1111 /*
1112 * Interactions between kmap and other mappings
1113 * make highmem support with aliasing VIPT caches
1114 * rather difficult.
1115 */
1116 reason = "with VIPT aliasing cache";
Russell Kinge616c592009-09-27 20:55:43 +01001117 }
1118 if (reason) {
1119 printk(KERN_CRIT "HIGHMEM is not supported %s, ignoring high memory\n",
1120 reason);
1121 while (j > 0 && meminfo.bank[j - 1].highmem)
1122 j--;
1123 }
1124 }
1125#endif
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001126 meminfo.nr_banks = j;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001127 high_memory = __va(arm_lowmem_limit - 1) + 1;
1128 memblock_set_current_limit(arm_lowmem_limit);
Lennert Buytenhek60296c72008-08-05 01:56:13 +02001129}
1130
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001131static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001132{
1133 unsigned long addr;
Russell King8df65162010-10-27 19:57:38 +01001134 phys_addr_t end;
Russell Kingd111e8f2006-09-27 15:27:33 +01001135
1136 /*
1137 * Clear out all the mappings below the kernel image.
1138 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001139 for (addr = 0; addr < MODULES_VADDR; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001140 pmd_clear(pmd_off_k(addr));
1141
1142#ifdef CONFIG_XIP_KERNEL
1143 /* The XIP kernel is mapped in the module area -- skip over it */
Catalin Marinase73fc882011-08-23 14:07:23 +01001144 addr = ((unsigned long)_etext + PMD_SIZE - 1) & PMD_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001145#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001146 for ( ; addr < PAGE_OFFSET; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001147 pmd_clear(pmd_off_k(addr));
1148
1149 /*
Russell King8df65162010-10-27 19:57:38 +01001150 * Find the end of the first block of lowmem.
1151 */
1152 end = memblock.memory.regions[0].base + memblock.memory.regions[0].size;
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001153 if (end >= arm_lowmem_limit)
1154 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001155
1156 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001157 * Clear out all the kernel space mappings, except for the first
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001158 * memory bank, up to the vmalloc region.
Russell Kingd111e8f2006-09-27 15:27:33 +01001159 */
Russell King8df65162010-10-27 19:57:38 +01001160 for (addr = __phys_to_virt(end);
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001161 addr < VMALLOC_START; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001162 pmd_clear(pmd_off_k(addr));
1163}
1164
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001165#ifdef CONFIG_ARM_LPAE
1166/* the first page is reserved for pgd */
1167#define SWAPPER_PG_DIR_SIZE (PAGE_SIZE + \
1168 PTRS_PER_PGD * PTRS_PER_PMD * sizeof(pmd_t))
1169#else
Catalin Marinase73fc882011-08-23 14:07:23 +01001170#define SWAPPER_PG_DIR_SIZE (PTRS_PER_PGD * sizeof(pgd_t))
Catalin Marinas1b6ba462011-11-22 17:30:29 +00001171#endif
Catalin Marinase73fc882011-08-23 14:07:23 +01001172
Russell Kingd111e8f2006-09-27 15:27:33 +01001173/*
Russell King2778f622010-07-09 16:27:52 +01001174 * Reserve the special regions of memory
Russell Kingd111e8f2006-09-27 15:27:33 +01001175 */
Russell King2778f622010-07-09 16:27:52 +01001176void __init arm_mm_memblock_reserve(void)
Russell Kingd111e8f2006-09-27 15:27:33 +01001177{
Russell Kingd111e8f2006-09-27 15:27:33 +01001178 /*
Russell Kingd111e8f2006-09-27 15:27:33 +01001179 * Reserve the page tables. These are already in use,
1180 * and can only be in node 0.
1181 */
Catalin Marinase73fc882011-08-23 14:07:23 +01001182 memblock_reserve(__pa(swapper_pg_dir), SWAPPER_PG_DIR_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +01001183
Russell Kingd111e8f2006-09-27 15:27:33 +01001184#ifdef CONFIG_SA1111
1185 /*
1186 * Because of the SA1111 DMA bug, we want to preserve our
1187 * precious DMA-able memory...
1188 */
Russell King2778f622010-07-09 16:27:52 +01001189 memblock_reserve(PHYS_OFFSET, __pa(swapper_pg_dir) - PHYS_OFFSET);
Russell Kingd111e8f2006-09-27 15:27:33 +01001190#endif
Russell Kingd111e8f2006-09-27 15:27:33 +01001191}
1192
1193/*
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001194 * Set up the device mappings. Since we clear out the page tables for all
1195 * mappings above VMALLOC_START, we will remove any debug device mappings.
Russell Kingd111e8f2006-09-27 15:27:33 +01001196 * This means you have to be careful how you debug this function, or any
1197 * called function. This means you can't use any function or debugging
1198 * method which may touch any device, otherwise the kernel _will_ crash.
1199 */
1200static void __init devicemaps_init(struct machine_desc *mdesc)
1201{
1202 struct map_desc map;
1203 unsigned long addr;
Russell King94e5a852012-01-18 15:32:49 +00001204 void *vectors;
Russell Kingd111e8f2006-09-27 15:27:33 +01001205
1206 /*
1207 * Allocate the vector page early.
1208 */
Russell Kinge76dd7c2013-07-04 11:40:32 +01001209 vectors = early_alloc(PAGE_SIZE * 2);
Russell King94e5a852012-01-18 15:32:49 +00001210
1211 early_trap_init(vectors);
Russell Kingd111e8f2006-09-27 15:27:33 +01001212
Nicolas Pitre0536bdf2011-08-25 00:35:59 -04001213 for (addr = VMALLOC_START; addr; addr += PMD_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +01001214 pmd_clear(pmd_off_k(addr));
1215
1216 /*
1217 * Map the kernel if it is XIP.
1218 * It is always first in the modulearea.
1219 */
1220#ifdef CONFIG_XIP_KERNEL
1221 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +00001222 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +00001223 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +01001224 map.type = MT_ROM;
1225 create_mapping(&map);
1226#endif
1227
1228 /*
1229 * Map the cache flushing regions.
1230 */
1231#ifdef FLUSH_BASE
1232 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
1233 map.virtual = FLUSH_BASE;
1234 map.length = SZ_1M;
1235 map.type = MT_CACHECLEAN;
1236 create_mapping(&map);
1237#endif
1238#ifdef FLUSH_BASE_MINICACHE
1239 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
1240 map.virtual = FLUSH_BASE_MINICACHE;
1241 map.length = SZ_1M;
1242 map.type = MT_MINICLEAN;
1243 create_mapping(&map);
1244#endif
1245
1246 /*
1247 * Create a mapping for the machine vectors at the high-vectors
1248 * location (0xffff0000). If we aren't using high-vectors, also
1249 * create a mapping at the low-vectors virtual address.
1250 */
Russell King94e5a852012-01-18 15:32:49 +00001251 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
Russell Kingd111e8f2006-09-27 15:27:33 +01001252 map.virtual = 0xffff0000;
1253 map.length = PAGE_SIZE;
Russell Kingfdee3722013-07-31 21:58:56 +01001254#ifdef CONFIG_KUSER_HELPERS
Russell Kingd111e8f2006-09-27 15:27:33 +01001255 map.type = MT_HIGH_VECTORS;
Russell Kingfdee3722013-07-31 21:58:56 +01001256#else
1257 map.type = MT_LOW_VECTORS;
1258#endif
Laura Abbotta367aec2013-02-27 15:05:34 -08001259 create_mapping(&map);
Russell Kingd111e8f2006-09-27 15:27:33 +01001260
1261 if (!vectors_high()) {
1262 map.virtual = 0;
Russell Kinge76dd7c2013-07-04 11:40:32 +01001263 map.length = PAGE_SIZE * 2;
Russell Kingd111e8f2006-09-27 15:27:33 +01001264 map.type = MT_LOW_VECTORS;
Laura Abbotta367aec2013-02-27 15:05:34 -08001265 create_mapping(&map);
Russell Kingd111e8f2006-09-27 15:27:33 +01001266 }
1267
Russell Kinge76dd7c2013-07-04 11:40:32 +01001268 /* Now create a kernel read-only mapping */
1269 map.pfn += 1;
1270 map.virtual = 0xffff0000 + PAGE_SIZE;
1271 map.length = PAGE_SIZE;
1272 map.type = MT_LOW_VECTORS;
1273 create_mapping(&map);
1274
Russell Kingd111e8f2006-09-27 15:27:33 +01001275 /*
1276 * Ask the machine support to map in the statically mapped devices.
1277 */
1278 if (mdesc->map_io)
1279 mdesc->map_io();
Nicolas Pitreb0884a92012-06-27 17:28:57 +01001280 fill_pmd_gaps();
Russell Kingd111e8f2006-09-27 15:27:33 +01001281
Greg Reidcf105492012-10-12 12:14:12 -04001282 if (use_user_accessible_timers()) {
1283 /*
1284 * Generate a mapping for the timer page.
1285 */
1286 int page_addr = get_timer_page_address();
1287 if (page_addr != ARM_USER_ACCESSIBLE_TIMERS_INVALID_PAGE) {
1288 map.pfn = __phys_to_pfn(page_addr);
1289 map.virtual = CONFIG_ARM_USER_ACCESSIBLE_TIMER_BASE;
1290 map.length = PAGE_SIZE;
1291 map.type = MT_DEVICE_USER_ACCESSIBLE;
Laura Abbotta367aec2013-02-27 15:05:34 -08001292 create_mapping(&map);
Greg Reidcf105492012-10-12 12:14:12 -04001293 }
1294 }
1295
Russell Kingd111e8f2006-09-27 15:27:33 +01001296 /*
1297 * Finally flush the caches and tlb to ensure that we're in a
1298 * consistent state wrt the writebuffer. This also ensures that
1299 * any write-allocated cache lines in the vector page are written
1300 * back. After this point, we can start to touch devices again.
1301 */
1302 local_flush_tlb_all();
1303 flush_cache_all();
1304}
1305
Nicolas Pitred73cd422008-09-15 16:44:55 -04001306static void __init kmap_init(void)
1307{
1308#ifdef CONFIG_HIGHMEM
Laura Abbotta367aec2013-02-27 15:05:34 -08001309 pkmap_page_table = early_pte_alloc(pmd_off_k(PKMAP_BASE),
Russell King4bb2e272010-07-01 18:33:29 +01001310 PKMAP_BASE, _PAGE_KERNEL_TABLE);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001311#endif
1312}
1313
Neil Leederf06ab972011-10-25 17:57:26 -04001314#ifdef CONFIG_STRICT_MEMORY_RWX
1315static struct {
1316 pmd_t *pmd_to_flush;
1317 pmd_t *pmd;
1318 unsigned long addr;
1319 pmd_t saved_pmd;
1320 bool made_writeable;
1321} mem_unprotect;
1322
1323static DEFINE_SPINLOCK(mem_text_writeable_lock);
1324
1325void mem_text_writeable_spinlock(unsigned long *flags)
1326{
1327 spin_lock_irqsave(&mem_text_writeable_lock, *flags);
1328}
1329
1330void mem_text_writeable_spinunlock(unsigned long *flags)
1331{
1332 spin_unlock_irqrestore(&mem_text_writeable_lock, *flags);
1333}
1334
1335/*
1336 * mem_text_address_writeable() and mem_text_address_restore()
1337 * should be called as a pair. They are used to make the
1338 * specified address in the kernel text section temporarily writeable
1339 * when it has been marked read-only by STRICT_MEMORY_RWX.
1340 * Used by kprobes and other debugging tools to set breakpoints etc.
1341 * mem_text_address_writeable() is invoked before writing.
1342 * After the write, mem_text_address_restore() must be called
1343 * to restore the original state.
1344 * This is only effective when used on the kernel text section
1345 * marked as MEMORY_RX by map_lowmem()
1346 *
1347 * They must each be called with mem_text_writeable_lock locked
1348 * by the caller, with no unlocking between the calls.
1349 * The caller should release mem_text_writeable_lock immediately
1350 * after the call to mem_text_address_restore().
1351 * Only the write and associated cache operations should be performed
1352 * between the calls.
1353 */
1354
1355/* this function must be called with mem_text_writeable_lock held */
1356void mem_text_address_writeable(unsigned long addr)
1357{
1358 struct task_struct *tsk = current;
1359 struct mm_struct *mm = tsk->active_mm;
1360 pgd_t *pgd = pgd_offset(mm, addr);
1361 pud_t *pud = pud_offset(pgd, addr);
1362
1363 mem_unprotect.made_writeable = 0;
1364
1365 if ((addr < (unsigned long)RX_AREA_START) ||
1366 (addr >= (unsigned long)RX_AREA_END))
1367 return;
1368
1369 mem_unprotect.pmd = pmd_offset(pud, addr);
1370 mem_unprotect.pmd_to_flush = mem_unprotect.pmd;
1371 mem_unprotect.addr = addr & PAGE_MASK;
1372
1373 if (addr & SECTION_SIZE)
1374 mem_unprotect.pmd++;
1375
1376 mem_unprotect.saved_pmd = *mem_unprotect.pmd;
1377 if ((mem_unprotect.saved_pmd & PMD_TYPE_MASK) != PMD_TYPE_SECT)
1378 return;
1379
1380 *mem_unprotect.pmd &= ~PMD_SECT_APX;
1381
1382 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1383 flush_tlb_kernel_page(mem_unprotect.addr);
1384 mem_unprotect.made_writeable = 1;
1385}
1386
1387/* this function must be called with mem_text_writeable_lock held */
1388void mem_text_address_restore(void)
1389{
1390 if (mem_unprotect.made_writeable) {
1391 *mem_unprotect.pmd = mem_unprotect.saved_pmd;
1392 flush_pmd_entry(mem_unprotect.pmd_to_flush);
1393 flush_tlb_kernel_page(mem_unprotect.addr);
1394 }
1395}
1396#endif
1397
Neil Leeder32942752011-11-07 10:56:46 -05001398void mem_text_write_kernel_word(unsigned long *addr, unsigned long word)
1399{
1400 unsigned long flags;
1401
1402 mem_text_writeable_spinlock(&flags);
1403 mem_text_address_writeable((unsigned long)addr);
1404 *addr = word;
1405 flush_icache_range((unsigned long)addr,
1406 ((unsigned long)addr + sizeof(long)));
1407 mem_text_address_restore();
1408 mem_text_writeable_spinunlock(&flags);
1409}
1410EXPORT_SYMBOL(mem_text_write_kernel_word);
1411
Russell Kinga2227122010-03-25 18:56:05 +00001412static void __init map_lowmem(void)
1413{
Russell King8df65162010-10-27 19:57:38 +01001414 struct memblock_region *reg;
Neeti Desaic278c942013-06-10 17:14:21 -07001415 struct vm_struct *vm;
1416 phys_addr_t start;
1417 phys_addr_t end;
1418 unsigned long vaddr;
1419 unsigned long pfn;
1420 unsigned long length;
1421 unsigned int type;
1422 int nr = 0;
Russell Kinga2227122010-03-25 18:56:05 +00001423
1424 /* Map all the lowmem memory banks. */
Russell King8df65162010-10-27 19:57:38 +01001425 for_each_memblock(memory, reg) {
Laura Abbotta367aec2013-02-27 15:05:34 -08001426 struct map_desc map;
Neeti Desaic278c942013-06-10 17:14:21 -07001427 nr++;
1428 start = reg->base;
1429 end = start + reg->size;
Russell Kinga2227122010-03-25 18:56:05 +00001430
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001431 if (end > arm_lowmem_limit)
1432 end = arm_lowmem_limit;
Russell King8df65162010-10-27 19:57:38 +01001433 if (start >= end)
1434 break;
1435
1436 map.pfn = __phys_to_pfn(start);
1437 map.virtual = __phys_to_virt(start);
Jin Hongada9e122011-07-19 12:44:39 -07001438#ifdef CONFIG_STRICT_MEMORY_RWX
1439 if (start <= __pa(_text) && __pa(_text) < end) {
Steve Mucklef132c6c2012-06-06 18:30:57 -07001440 map.length = SECTION_SIZE;
Larry Bassel833ef632013-07-29 13:43:17 -07001441 map.type = MT_MEMORY_RW;
Jin Hongada9e122011-07-19 12:44:39 -07001442
Laura Abbotta367aec2013-02-27 15:05:34 -08001443 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001444
Steve Mucklef132c6c2012-06-06 18:30:57 -07001445 map.pfn = __phys_to_pfn(start + SECTION_SIZE);
1446 map.virtual = __phys_to_virt(start + SECTION_SIZE);
1447 map.length = (unsigned long)RX_AREA_END - map.virtual;
Jin Hongada9e122011-07-19 12:44:39 -07001448 map.type = MT_MEMORY_RX;
1449
Laura Abbotta367aec2013-02-27 15:05:34 -08001450 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001451
1452 map.pfn = __phys_to_pfn(__pa(__start_rodata));
1453 map.virtual = (unsigned long)__start_rodata;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001454 map.length = __init_begin - __start_rodata;
Jin Hongada9e122011-07-19 12:44:39 -07001455 map.type = MT_MEMORY_R;
1456
Laura Abbotta367aec2013-02-27 15:05:34 -08001457 create_mapping(&map);
Jin Hongada9e122011-07-19 12:44:39 -07001458
Steve Mucklef132c6c2012-06-06 18:30:57 -07001459 map.pfn = __phys_to_pfn(__pa(__init_begin));
1460 map.virtual = (unsigned long)__init_begin;
Larry Bassel833ef632013-07-29 13:43:17 -07001461 map.length = (char *)__arch_info_begin - __init_begin;
1462 map.type = MT_MEMORY_RX;
Steve Mucklef132c6c2012-06-06 18:30:57 -07001463
Laura Abbotta367aec2013-02-27 15:05:34 -08001464 create_mapping(&map);
Steve Mucklef132c6c2012-06-06 18:30:57 -07001465
Larry Bassel833ef632013-07-29 13:43:17 -07001466 map.pfn = __phys_to_pfn(__pa(__arch_info_begin));
1467 map.virtual = (unsigned long)__arch_info_begin;
1468 map.length = __phys_to_virt(end) -
1469 (unsigned long)__arch_info_begin;
Jin Hongada9e122011-07-19 12:44:39 -07001470 map.type = MT_MEMORY_RW;
1471 } else {
1472 map.length = end - start;
1473 map.type = MT_MEMORY_RW;
1474 }
1475#else
Russell King8df65162010-10-27 19:57:38 +01001476 map.length = end - start;
1477 map.type = MT_MEMORY;
Jin Hongada9e122011-07-19 12:44:39 -07001478#endif
Russell King8df65162010-10-27 19:57:38 +01001479
Laura Abbotta367aec2013-02-27 15:05:34 -08001480 create_mapping(&map);
Russell Kinga2227122010-03-25 18:56:05 +00001481 }
Neeti Desaic278c942013-06-10 17:14:21 -07001482
1483 vm = early_alloc_aligned(sizeof(*vm) * nr, __alignof__(*vm));
1484
1485 for_each_memblock(memory, reg) {
1486
1487 start = reg->base;
1488 end = start + reg->size;
1489
1490 if (end > arm_lowmem_limit)
1491 end = arm_lowmem_limit;
1492 if (start >= end)
1493 break;
1494
1495 pfn = __phys_to_pfn(start);
1496 vaddr = __phys_to_virt(start);
1497 length = end - start;
1498 type = MT_MEMORY;
1499
1500 vm->addr = (void *)(vaddr & PAGE_MASK);
1501 vm->size = PAGE_ALIGN(length + (vaddr & ~PAGE_MASK));
1502 vm->phys_addr = __pfn_to_phys(pfn);
Laura Abbottd3263482013-08-22 13:46:07 -07001503 vm->flags = VM_LOWMEM | VM_ARM_STATIC_MAPPING;
Neeti Desaic278c942013-06-10 17:14:21 -07001504 vm->flags |= VM_ARM_MTYPE(type);
1505 vm->caller = map_lowmem;
Laura Abbottf2da5eb2013-12-20 13:17:19 -08001506 vm_area_add_early(vm);
1507 mark_vmalloc_reserved_area(vm->addr, vm->size);
1508 vm++;
Neeti Desaic278c942013-06-10 17:14:21 -07001509 }
Russell Kinga2227122010-03-25 18:56:05 +00001510}
1511
Laura Abbott835d5322014-04-14 19:42:04 -07001512#ifdef CONFIG_FORCE_PAGES
1513/*
1514 * remap a PMD into pages
1515 * We split a single pmd here none of this two pmd nonsense
1516 */
1517static noinline void split_pmd(pmd_t *pmd, unsigned long addr,
1518 unsigned long end, unsigned long pfn,
1519 const struct mem_type *type)
1520{
1521 pte_t *pte, *start_pte;
1522
1523 start_pte = early_alloc(PTE_HWTABLE_OFF + PTE_HWTABLE_SIZE);
1524
1525 pte = start_pte;
1526
1527 do {
1528 set_pte_ext(pte, pfn_pte(pfn, type->prot_pte), 0);
1529 pfn++;
1530 } while (pte++, addr += PAGE_SIZE, addr != end);
1531
1532 *pmd = __pmd((__pa(start_pte) + PTE_HWTABLE_OFF) | type->prot_l1);
1533 mb();
1534 flush_pmd_entry(pmd);
1535 flush_tlb_all();
1536}
1537
1538/*
1539 * It's significantly easier to remap as pages later after all memory is
1540 * mapped. Everything is sections so all we have to do is split
1541 */
1542static void __init remap_pages(void)
1543{
1544 struct memblock_region *reg;
1545
1546 for_each_memblock(memory, reg) {
1547 phys_addr_t phys_start = reg->base;
1548 phys_addr_t phys_end = reg->base + reg->size;
1549 unsigned long addr = (unsigned long)__va(phys_start);
1550 unsigned long end = (unsigned long)__va(phys_end);
1551 pmd_t *pmd = NULL;
1552 unsigned long next;
1553 unsigned long pfn = __phys_to_pfn(phys_start);
1554 bool fixup = false;
1555 unsigned long saved_start = addr;
1556
1557 if (phys_end > arm_lowmem_limit)
1558 end = (unsigned long)__va(arm_lowmem_limit);
1559 if (phys_start >= phys_end)
1560 break;
1561
1562 pmd = pmd_offset(
1563 pud_offset(pgd_offset(&init_mm, addr), addr), addr);
1564
1565#ifndef CONFIG_ARM_LPAE
1566 if (addr & SECTION_SIZE) {
1567 fixup = true;
1568 pmd_empty_section_gap((addr - SECTION_SIZE) & PMD_MASK);
1569 pmd++;
1570 }
1571
1572 if (end & SECTION_SIZE)
1573 pmd_empty_section_gap(end);
1574#endif
1575
1576 do {
1577 next = addr + SECTION_SIZE;
1578
1579 if (pmd_none(*pmd) || pmd_bad(*pmd))
1580 split_pmd(pmd, addr, next, pfn,
1581 &mem_types[MT_MEMORY]);
1582 pmd++;
1583 pfn += SECTION_SIZE >> PAGE_SHIFT;
1584
1585 } while (addr = next, addr < end);
1586
1587 if (fixup) {
1588 /*
1589 * Put a faulting page table here to avoid detecting no
1590 * pmd when accessing an odd section boundary. This
1591 * needs to be faulting to help catch errors and avoid
1592 * speculation
1593 */
1594 pmd = pmd_off_k(saved_start);
1595 pmd[0] = pmd[1] & ~1;
1596 }
1597 }
1598}
1599#else
1600static void __init remap_pages(void)
1601{
1602
1603}
1604#endif
1605
Russell Kingd111e8f2006-09-27 15:27:33 +01001606/*
1607 * paging_init() sets up the page tables, initialises the zone memory
1608 * maps, and sets up the zero page, bad page and bad page tables.
1609 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001610void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +01001611{
1612 void *zero_page;
1613
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001614 memblock_set_current_limit(arm_lowmem_limit);
Russell King0371d3f2011-07-05 19:58:29 +01001615
Russell Kingd111e8f2006-09-27 15:27:33 +01001616 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -04001617 prepare_page_table();
Russell Kinga2227122010-03-25 18:56:05 +00001618 map_lowmem();
Marek Szyprowskid4398df2011-12-29 13:09:51 +01001619 dma_contiguous_remap();
Laura Abbott835d5322014-04-14 19:42:04 -07001620 remap_pages();
Russell Kingd111e8f2006-09-27 15:27:33 +01001621 devicemaps_init(mdesc);
Nicolas Pitred73cd422008-09-15 16:44:55 -04001622 kmap_init();
Russell Kingd111e8f2006-09-27 15:27:33 +01001623
1624 top_pmd = pmd_off_k(0xffff0000);
1625
Russell King3abe9d32010-03-25 17:02:59 +00001626 /* allocate the zero page. */
1627 zero_page = early_alloc(PAGE_SIZE);
Russell King2778f622010-07-09 16:27:52 +01001628
Russell King8d717a52010-05-22 19:47:18 +01001629 bootmem_init();
Russell King2778f622010-07-09 16:27:52 +01001630
Russell Kingd111e8f2006-09-27 15:27:33 +01001631 empty_zero_page = virt_to_page(zero_page);
Russell King421fe932009-10-25 10:23:04 +00001632 __flush_dcache_page(NULL, empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +01001633}