blob: d4d082c5c2d4ad2be9f1eb331cce837fff3de325 [file] [log] [blame]
Russell Kingd111e8f2006-09-27 15:27:33 +01001/*
2 * linux/arch/arm/mm/mmu.c
3 *
4 * Copyright (C) 1995-2005 Russell King
5 *
6 * This program is free software; you can redistribute it and/or modify
7 * it under the terms of the GNU General Public License version 2 as
8 * published by the Free Software Foundation.
9 */
Russell Kingae8f1542006-09-27 15:38:34 +010010#include <linux/module.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010011#include <linux/kernel.h>
12#include <linux/errno.h>
13#include <linux/init.h>
14#include <linux/bootmem.h>
15#include <linux/mman.h>
16#include <linux/nodemask.h>
17
Russell King0ba8b9b2008-08-10 18:08:10 +010018#include <asm/cputype.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010019#include <asm/mach-types.h>
Russell King37efe642008-12-01 11:53:07 +000020#include <asm/sections.h>
Russell Kingd111e8f2006-09-27 15:27:33 +010021#include <asm/setup.h>
22#include <asm/sizes.h>
23#include <asm/tlb.h>
24
25#include <asm/mach/arch.h>
26#include <asm/mach/map.h>
27
28#include "mm.h"
29
30DEFINE_PER_CPU(struct mmu_gather, mmu_gathers);
31
Russell Kingd111e8f2006-09-27 15:27:33 +010032/*
33 * empty_zero_page is a special page that is used for
34 * zero-initialized data and COW.
35 */
36struct page *empty_zero_page;
Aneesh Kumar K.V3653f3a2008-04-29 08:11:12 -040037EXPORT_SYMBOL(empty_zero_page);
Russell Kingd111e8f2006-09-27 15:27:33 +010038
39/*
40 * The pmd table for the upper-most set of pages.
41 */
42pmd_t *top_pmd;
43
Russell Kingae8f1542006-09-27 15:38:34 +010044#define CPOLICY_UNCACHED 0
45#define CPOLICY_BUFFERED 1
46#define CPOLICY_WRITETHROUGH 2
47#define CPOLICY_WRITEBACK 3
48#define CPOLICY_WRITEALLOC 4
49
50static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK;
51static unsigned int ecc_mask __initdata = 0;
Imre_Deak44b18692007-02-11 13:45:13 +010052pgprot_t pgprot_user;
Russell Kingae8f1542006-09-27 15:38:34 +010053pgprot_t pgprot_kernel;
54
Imre_Deak44b18692007-02-11 13:45:13 +010055EXPORT_SYMBOL(pgprot_user);
Russell Kingae8f1542006-09-27 15:38:34 +010056EXPORT_SYMBOL(pgprot_kernel);
57
58struct cachepolicy {
59 const char policy[16];
60 unsigned int cr_mask;
61 unsigned int pmd;
62 unsigned int pte;
63};
64
65static struct cachepolicy cache_policies[] __initdata = {
66 {
67 .policy = "uncached",
68 .cr_mask = CR_W|CR_C,
69 .pmd = PMD_SECT_UNCACHED,
Russell Kingbb30f362008-09-06 20:04:59 +010070 .pte = L_PTE_MT_UNCACHED,
Russell Kingae8f1542006-09-27 15:38:34 +010071 }, {
72 .policy = "buffered",
73 .cr_mask = CR_C,
74 .pmd = PMD_SECT_BUFFERED,
Russell Kingbb30f362008-09-06 20:04:59 +010075 .pte = L_PTE_MT_BUFFERABLE,
Russell Kingae8f1542006-09-27 15:38:34 +010076 }, {
77 .policy = "writethrough",
78 .cr_mask = 0,
79 .pmd = PMD_SECT_WT,
Russell Kingbb30f362008-09-06 20:04:59 +010080 .pte = L_PTE_MT_WRITETHROUGH,
Russell Kingae8f1542006-09-27 15:38:34 +010081 }, {
82 .policy = "writeback",
83 .cr_mask = 0,
84 .pmd = PMD_SECT_WB,
Russell Kingbb30f362008-09-06 20:04:59 +010085 .pte = L_PTE_MT_WRITEBACK,
Russell Kingae8f1542006-09-27 15:38:34 +010086 }, {
87 .policy = "writealloc",
88 .cr_mask = 0,
89 .pmd = PMD_SECT_WBWA,
Russell Kingbb30f362008-09-06 20:04:59 +010090 .pte = L_PTE_MT_WRITEALLOC,
Russell Kingae8f1542006-09-27 15:38:34 +010091 }
92};
93
94/*
Simon Arlott6cbdc8c2007-05-11 20:40:30 +010095 * These are useful for identifying cache coherency
Russell Kingae8f1542006-09-27 15:38:34 +010096 * problems by allowing the cache or the cache and
97 * writebuffer to be turned off. (Note: the write
98 * buffer should not be on and the cache off).
99 */
100static void __init early_cachepolicy(char **p)
101{
102 int i;
103
104 for (i = 0; i < ARRAY_SIZE(cache_policies); i++) {
105 int len = strlen(cache_policies[i].policy);
106
107 if (memcmp(*p, cache_policies[i].policy, len) == 0) {
108 cachepolicy = i;
109 cr_alignment &= ~cache_policies[i].cr_mask;
110 cr_no_alignment &= ~cache_policies[i].cr_mask;
111 *p += len;
112 break;
113 }
114 }
115 if (i == ARRAY_SIZE(cache_policies))
116 printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n");
Catalin Marinas11179d82007-07-20 11:42:24 +0100117 if (cpu_architecture() >= CPU_ARCH_ARMv6) {
118 printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n");
119 cachepolicy = CPOLICY_WRITEBACK;
120 }
Russell Kingae8f1542006-09-27 15:38:34 +0100121 flush_cache_all();
122 set_cr(cr_alignment);
123}
124__early_param("cachepolicy=", early_cachepolicy);
125
126static void __init early_nocache(char **__unused)
127{
128 char *p = "buffered";
129 printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p);
130 early_cachepolicy(&p);
131}
132__early_param("nocache", early_nocache);
133
134static void __init early_nowrite(char **__unused)
135{
136 char *p = "uncached";
137 printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p);
138 early_cachepolicy(&p);
139}
140__early_param("nowb", early_nowrite);
141
142static void __init early_ecc(char **p)
143{
144 if (memcmp(*p, "on", 2) == 0) {
145 ecc_mask = PMD_PROTECTION;
146 *p += 2;
147 } else if (memcmp(*p, "off", 3) == 0) {
148 ecc_mask = 0;
149 *p += 3;
150 }
151}
152__early_param("ecc=", early_ecc);
153
154static int __init noalign_setup(char *__unused)
155{
156 cr_alignment &= ~CR_A;
157 cr_no_alignment &= ~CR_A;
158 set_cr(cr_alignment);
159 return 1;
160}
161__setup("noalign", noalign_setup);
162
Russell King255d1f82006-12-18 00:12:47 +0000163#ifndef CONFIG_SMP
164void adjust_cr(unsigned long mask, unsigned long set)
165{
166 unsigned long flags;
167
168 mask &= ~CR_A;
169
170 set &= mask;
171
172 local_irq_save(flags);
173
174 cr_no_alignment = (cr_no_alignment & ~mask) | set;
175 cr_alignment = (cr_alignment & ~mask) | set;
176
177 set_cr((get_cr() & ~mask) | set);
178
179 local_irq_restore(flags);
180}
181#endif
182
Russell King0af92be2007-05-05 20:28:16 +0100183#define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE
Russell Kingb1cce6b2008-11-04 10:52:28 +0000184#define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE
Russell King0af92be2007-05-05 20:28:16 +0100185
Russell Kingb29e9f52007-04-21 10:47:29 +0100186static struct mem_type mem_types[] = {
Russell King0af92be2007-05-05 20:28:16 +0100187 [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100188 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED |
189 L_PTE_SHARED,
Russell King0af92be2007-05-05 20:28:16 +0100190 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000191 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S,
Russell King0af92be2007-05-05 20:28:16 +0100192 .domain = DOMAIN_IO,
193 },
194 [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */
Russell Kingbb30f362008-09-06 20:04:59 +0100195 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED,
Russell King0af92be2007-05-05 20:28:16 +0100196 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000197 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100198 .domain = DOMAIN_IO,
199 },
200 [MT_DEVICE_CACHED] = { /* ioremap_cached */
Russell Kingbb30f362008-09-06 20:04:59 +0100201 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED,
Russell King0af92be2007-05-05 20:28:16 +0100202 .prot_l1 = PMD_TYPE_TABLE,
203 .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB,
204 .domain = DOMAIN_IO,
205 },
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100206 [MT_DEVICE_WC] = { /* ioremap_wc */
Russell Kingbb30f362008-09-06 20:04:59 +0100207 .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC,
Russell King0af92be2007-05-05 20:28:16 +0100208 .prot_l1 = PMD_TYPE_TABLE,
Russell Kingb1cce6b2008-11-04 10:52:28 +0000209 .prot_sect = PROT_SECT_DEVICE,
Russell King0af92be2007-05-05 20:28:16 +0100210 .domain = DOMAIN_IO,
Russell Kingae8f1542006-09-27 15:38:34 +0100211 },
Russell Kingebb4c652008-11-09 11:18:36 +0000212 [MT_UNCACHED] = {
213 .prot_pte = PROT_PTE_DEVICE,
214 .prot_l1 = PMD_TYPE_TABLE,
215 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
216 .domain = DOMAIN_IO,
217 },
Russell Kingae8f1542006-09-27 15:38:34 +0100218 [MT_CACHECLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100219 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN,
Russell Kingae8f1542006-09-27 15:38:34 +0100220 .domain = DOMAIN_KERNEL,
221 },
222 [MT_MINICLEAN] = {
Russell King9ef79632007-05-05 20:03:35 +0100223 .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE,
Russell Kingae8f1542006-09-27 15:38:34 +0100224 .domain = DOMAIN_KERNEL,
225 },
226 [MT_LOW_VECTORS] = {
227 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
228 L_PTE_EXEC,
229 .prot_l1 = PMD_TYPE_TABLE,
230 .domain = DOMAIN_USER,
231 },
232 [MT_HIGH_VECTORS] = {
233 .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY |
234 L_PTE_USER | L_PTE_EXEC,
235 .prot_l1 = PMD_TYPE_TABLE,
236 .domain = DOMAIN_USER,
237 },
238 [MT_MEMORY] = {
Russell King9ef79632007-05-05 20:03:35 +0100239 .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE,
Russell Kingae8f1542006-09-27 15:38:34 +0100240 .domain = DOMAIN_KERNEL,
241 },
242 [MT_ROM] = {
Russell King9ef79632007-05-05 20:03:35 +0100243 .prot_sect = PMD_TYPE_SECT,
Russell Kingae8f1542006-09-27 15:38:34 +0100244 .domain = DOMAIN_KERNEL,
245 },
Russell Kingae8f1542006-09-27 15:38:34 +0100246};
247
Russell Kingb29e9f52007-04-21 10:47:29 +0100248const struct mem_type *get_mem_type(unsigned int type)
249{
250 return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL;
251}
252
Russell Kingae8f1542006-09-27 15:38:34 +0100253/*
254 * Adjust the PMD section entries according to the CPU in use.
255 */
256static void __init build_mem_type_table(void)
257{
258 struct cachepolicy *cp;
259 unsigned int cr = get_cr();
Russell Kingbb30f362008-09-06 20:04:59 +0100260 unsigned int user_pgprot, kern_pgprot, vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100261 int cpu_arch = cpu_architecture();
262 int i;
263
Catalin Marinas11179d82007-07-20 11:42:24 +0100264 if (cpu_arch < CPU_ARCH_ARMv6) {
Russell Kingae8f1542006-09-27 15:38:34 +0100265#if defined(CONFIG_CPU_DCACHE_DISABLE)
Catalin Marinas11179d82007-07-20 11:42:24 +0100266 if (cachepolicy > CPOLICY_BUFFERED)
267 cachepolicy = CPOLICY_BUFFERED;
Russell Kingae8f1542006-09-27 15:38:34 +0100268#elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH)
Catalin Marinas11179d82007-07-20 11:42:24 +0100269 if (cachepolicy > CPOLICY_WRITETHROUGH)
270 cachepolicy = CPOLICY_WRITETHROUGH;
Russell Kingae8f1542006-09-27 15:38:34 +0100271#endif
Catalin Marinas11179d82007-07-20 11:42:24 +0100272 }
Russell Kingae8f1542006-09-27 15:38:34 +0100273 if (cpu_arch < CPU_ARCH_ARMv5) {
274 if (cachepolicy >= CPOLICY_WRITEALLOC)
275 cachepolicy = CPOLICY_WRITEBACK;
276 ecc_mask = 0;
277 }
Russell Kingbb30f362008-09-06 20:04:59 +0100278#ifdef CONFIG_SMP
279 cachepolicy = CPOLICY_WRITEALLOC;
280#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100281
282 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000283 * Strip out features not present on earlier architectures.
284 * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those
285 * without extended page tables don't have the 'Shared' bit.
Lennert Buytenhek1ad77a82008-09-05 13:17:11 +0100286 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000287 if (cpu_arch < CPU_ARCH_ARMv5)
288 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
289 mem_types[i].prot_sect &= ~PMD_SECT_TEX(7);
290 if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3())
291 for (i = 0; i < ARRAY_SIZE(mem_types); i++)
292 mem_types[i].prot_sect &= ~PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100293
294 /*
Russell Kingb1cce6b2008-11-04 10:52:28 +0000295 * ARMv5 and lower, bit 4 must be set for page tables (was: cache
296 * "update-able on write" bit on ARM610). However, Xscale and
297 * Xscale3 require this bit to be cleared.
Russell Kingae8f1542006-09-27 15:38:34 +0100298 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000299 if (cpu_is_xscale() || cpu_is_xsc3()) {
Russell King9ef79632007-05-05 20:03:35 +0100300 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100301 mem_types[i].prot_sect &= ~PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100302 mem_types[i].prot_l1 &= ~PMD_BIT4;
303 }
304 } else if (cpu_arch < CPU_ARCH_ARMv6) {
305 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
Russell Kingae8f1542006-09-27 15:38:34 +0100306 if (mem_types[i].prot_l1)
307 mem_types[i].prot_l1 |= PMD_BIT4;
Russell King9ef79632007-05-05 20:03:35 +0100308 if (mem_types[i].prot_sect)
309 mem_types[i].prot_sect |= PMD_BIT4;
310 }
311 }
Russell Kingae8f1542006-09-27 15:38:34 +0100312
Russell Kingb1cce6b2008-11-04 10:52:28 +0000313 /*
314 * Mark the device areas according to the CPU/architecture.
315 */
316 if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) {
317 if (!cpu_is_xsc3()) {
318 /*
319 * Mark device regions on ARMv6+ as execute-never
320 * to prevent speculative instruction fetches.
321 */
322 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN;
323 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN;
324 mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN;
325 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN;
326 }
327 if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) {
328 /*
329 * For ARMv7 with TEX remapping,
330 * - shared device is SXCB=1100
331 * - nonshared device is SXCB=0100
332 * - write combine device mem is SXCB=0001
333 * (Uncached Normal memory)
334 */
335 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1);
336 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1);
337 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
338 } else if (cpu_is_xsc3()) {
339 /*
340 * For Xscale3,
341 * - shared device is TEXCB=00101
342 * - nonshared device is TEXCB=01000
343 * - write combine device mem is TEXCB=00100
344 * (Inner/Outer Uncacheable in xsc3 parlance)
345 */
346 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED;
347 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
348 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
349 } else {
350 /*
351 * For ARMv6 and ARMv7 without TEX remapping,
352 * - shared device is TEXCB=00001
353 * - nonshared device is TEXCB=01000
354 * - write combine device mem is TEXCB=00100
355 * (Uncached Normal in ARMv6 parlance).
356 */
357 mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED;
358 mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2);
359 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1);
360 }
361 } else {
362 /*
363 * On others, write combining is "Uncached/Buffered"
364 */
365 mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE;
366 }
367
368 /*
369 * Now deal with the memory-type mappings
370 */
Russell Kingae8f1542006-09-27 15:38:34 +0100371 cp = &cache_policies[cachepolicy];
Russell Kingbb30f362008-09-06 20:04:59 +0100372 vecs_pgprot = kern_pgprot = user_pgprot = cp->pte;
373
374#ifndef CONFIG_SMP
375 /*
376 * Only use write-through for non-SMP systems
377 */
378 if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH)
379 vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte;
380#endif
Russell Kingae8f1542006-09-27 15:38:34 +0100381
382 /*
383 * Enable CPU-specific coherency if supported.
384 * (Only available on XSC3 at the moment.)
385 */
Russell Kingb1cce6b2008-11-04 10:52:28 +0000386 if (arch_is_coherent() && cpu_is_xsc3())
387 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
Russell Kingae8f1542006-09-27 15:38:34 +0100388
389 /*
390 * ARMv6 and above have extended page tables.
391 */
392 if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) {
393 /*
Russell Kingae8f1542006-09-27 15:38:34 +0100394 * Mark cache clean areas and XIP ROM read only
395 * from SVC mode and no access from userspace.
396 */
397 mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
398 mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
399 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE;
400
Russell Kingae8f1542006-09-27 15:38:34 +0100401#ifdef CONFIG_SMP
402 /*
403 * Mark memory with the "shared" attribute for SMP systems
404 */
405 user_pgprot |= L_PTE_SHARED;
406 kern_pgprot |= L_PTE_SHARED;
Russell Kingbb30f362008-09-06 20:04:59 +0100407 vecs_pgprot |= L_PTE_SHARED;
Russell Kingae8f1542006-09-27 15:38:34 +0100408 mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S;
409#endif
410 }
411
412 for (i = 0; i < 16; i++) {
413 unsigned long v = pgprot_val(protection_map[i]);
Russell Kingbb30f362008-09-06 20:04:59 +0100414 protection_map[i] = __pgprot(v | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100415 }
416
Russell Kingbb30f362008-09-06 20:04:59 +0100417 mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot;
418 mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot;
Russell Kingae8f1542006-09-27 15:38:34 +0100419
Imre_Deak44b18692007-02-11 13:45:13 +0100420 pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot);
Russell Kingae8f1542006-09-27 15:38:34 +0100421 pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG |
422 L_PTE_DIRTY | L_PTE_WRITE |
423 L_PTE_EXEC | kern_pgprot);
424
425 mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask;
426 mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask;
427 mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd;
428 mem_types[MT_ROM].prot_sect |= cp->pmd;
429
430 switch (cp->pmd) {
431 case PMD_SECT_WT:
432 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT;
433 break;
434 case PMD_SECT_WB:
435 case PMD_SECT_WBWA:
436 mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB;
437 break;
438 }
439 printk("Memory policy: ECC %sabled, Data cache %s\n",
440 ecc_mask ? "en" : "dis", cp->policy);
Russell King2497f0a2007-04-21 09:59:44 +0100441
442 for (i = 0; i < ARRAY_SIZE(mem_types); i++) {
443 struct mem_type *t = &mem_types[i];
444 if (t->prot_l1)
445 t->prot_l1 |= PMD_DOMAIN(t->domain);
446 if (t->prot_sect)
447 t->prot_sect |= PMD_DOMAIN(t->domain);
448 }
Russell Kingae8f1542006-09-27 15:38:34 +0100449}
450
451#define vectors_base() (vectors_high() ? 0xffff0000 : 0)
452
Russell King24e6c692007-04-21 10:21:28 +0100453static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr,
454 unsigned long end, unsigned long pfn,
455 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100456{
Russell King24e6c692007-04-21 10:21:28 +0100457 pte_t *pte;
Russell Kingae8f1542006-09-27 15:38:34 +0100458
Russell King24e6c692007-04-21 10:21:28 +0100459 if (pmd_none(*pmd)) {
460 pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t));
461 __pmd_populate(pmd, __pa(pte) | type->prot_l1);
462 }
Russell Kingae8f1542006-09-27 15:38:34 +0100463
Russell King24e6c692007-04-21 10:21:28 +0100464 pte = pte_offset_kernel(pmd, addr);
465 do {
Russell King40d192b2008-09-06 21:15:56 +0100466 set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0);
Russell King24e6c692007-04-21 10:21:28 +0100467 pfn++;
468 } while (pte++, addr += PAGE_SIZE, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100469}
470
Russell King24e6c692007-04-21 10:21:28 +0100471static void __init alloc_init_section(pgd_t *pgd, unsigned long addr,
472 unsigned long end, unsigned long phys,
473 const struct mem_type *type)
Russell Kingae8f1542006-09-27 15:38:34 +0100474{
Russell King24e6c692007-04-21 10:21:28 +0100475 pmd_t *pmd = pmd_offset(pgd, addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100476
Russell King24e6c692007-04-21 10:21:28 +0100477 /*
478 * Try a section mapping - end, addr and phys must all be aligned
479 * to a section boundary. Note that PMDs refer to the individual
480 * L1 entries, whereas PGDs refer to a group of L1 entries making
481 * up one logical pointer to an L2 table.
482 */
483 if (((addr | end | phys) & ~SECTION_MASK) == 0) {
484 pmd_t *p = pmd;
Russell Kingae8f1542006-09-27 15:38:34 +0100485
Russell King24e6c692007-04-21 10:21:28 +0100486 if (addr & SECTION_SIZE)
487 pmd++;
488
489 do {
490 *pmd = __pmd(phys | type->prot_sect);
491 phys += SECTION_SIZE;
492 } while (pmd++, addr += SECTION_SIZE, addr != end);
493
494 flush_pmd_entry(p);
495 } else {
496 /*
497 * No need to loop; pte's aren't interested in the
498 * individual L1 entries.
499 */
500 alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type);
Russell Kingae8f1542006-09-27 15:38:34 +0100501 }
Russell Kingae8f1542006-09-27 15:38:34 +0100502}
503
Russell King4a56c1e2007-04-21 10:16:48 +0100504static void __init create_36bit_mapping(struct map_desc *md,
505 const struct mem_type *type)
506{
507 unsigned long phys, addr, length, end;
508 pgd_t *pgd;
509
510 addr = md->virtual;
511 phys = (unsigned long)__pfn_to_phys(md->pfn);
512 length = PAGE_ALIGN(md->length);
513
514 if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) {
515 printk(KERN_ERR "MM: CPU does not support supersection "
516 "mapping for 0x%08llx at 0x%08lx\n",
517 __pfn_to_phys((u64)md->pfn), addr);
518 return;
519 }
520
521 /* N.B. ARMv6 supersections are only defined to work with domain 0.
522 * Since domain assignments can in fact be arbitrary, the
523 * 'domain == 0' check below is required to insure that ARMv6
524 * supersections are only allocated for domain 0 regardless
525 * of the actual domain assignments in use.
526 */
527 if (type->domain) {
528 printk(KERN_ERR "MM: invalid domain in supersection "
529 "mapping for 0x%08llx at 0x%08lx\n",
530 __pfn_to_phys((u64)md->pfn), addr);
531 return;
532 }
533
534 if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) {
535 printk(KERN_ERR "MM: cannot create mapping for "
536 "0x%08llx at 0x%08lx invalid alignment\n",
537 __pfn_to_phys((u64)md->pfn), addr);
538 return;
539 }
540
541 /*
542 * Shift bits [35:32] of address into bits [23:20] of PMD
543 * (See ARMv6 spec).
544 */
545 phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20);
546
547 pgd = pgd_offset_k(addr);
548 end = addr + length;
549 do {
550 pmd_t *pmd = pmd_offset(pgd, addr);
551 int i;
552
553 for (i = 0; i < 16; i++)
554 *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER);
555
556 addr += SUPERSECTION_SIZE;
557 phys += SUPERSECTION_SIZE;
558 pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT;
559 } while (addr != end);
560}
561
Russell Kingae8f1542006-09-27 15:38:34 +0100562/*
563 * Create the page directory entries and any necessary
564 * page tables for the mapping specified by `md'. We
565 * are able to cope here with varying sizes and address
566 * offsets, and we take full advantage of sections and
567 * supersections.
568 */
569void __init create_mapping(struct map_desc *md)
570{
Russell King24e6c692007-04-21 10:21:28 +0100571 unsigned long phys, addr, length, end;
Russell Kingd5c98172007-04-21 10:05:32 +0100572 const struct mem_type *type;
Russell King24e6c692007-04-21 10:21:28 +0100573 pgd_t *pgd;
Russell Kingae8f1542006-09-27 15:38:34 +0100574
575 if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) {
576 printk(KERN_WARNING "BUG: not creating mapping for "
577 "0x%08llx at 0x%08lx in user region\n",
578 __pfn_to_phys((u64)md->pfn), md->virtual);
579 return;
580 }
581
582 if ((md->type == MT_DEVICE || md->type == MT_ROM) &&
583 md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) {
584 printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx "
585 "overlaps vmalloc space\n",
586 __pfn_to_phys((u64)md->pfn), md->virtual);
587 }
588
Russell Kingd5c98172007-04-21 10:05:32 +0100589 type = &mem_types[md->type];
Russell Kingae8f1542006-09-27 15:38:34 +0100590
591 /*
592 * Catch 36-bit addresses
593 */
Russell King4a56c1e2007-04-21 10:16:48 +0100594 if (md->pfn >= 0x100000) {
595 create_36bit_mapping(md, type);
596 return;
Russell Kingae8f1542006-09-27 15:38:34 +0100597 }
598
Russell King7b9c7b42007-07-04 21:16:33 +0100599 addr = md->virtual & PAGE_MASK;
Russell King24e6c692007-04-21 10:21:28 +0100600 phys = (unsigned long)__pfn_to_phys(md->pfn);
Russell King7b9c7b42007-07-04 21:16:33 +0100601 length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK));
Russell Kingae8f1542006-09-27 15:38:34 +0100602
Russell King24e6c692007-04-21 10:21:28 +0100603 if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) {
Russell Kingae8f1542006-09-27 15:38:34 +0100604 printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not "
605 "be mapped using pages, ignoring.\n",
Russell King24e6c692007-04-21 10:21:28 +0100606 __pfn_to_phys(md->pfn), addr);
Russell Kingae8f1542006-09-27 15:38:34 +0100607 return;
608 }
609
Russell King24e6c692007-04-21 10:21:28 +0100610 pgd = pgd_offset_k(addr);
611 end = addr + length;
612 do {
613 unsigned long next = pgd_addr_end(addr, end);
Russell Kingae8f1542006-09-27 15:38:34 +0100614
Russell King24e6c692007-04-21 10:21:28 +0100615 alloc_init_section(pgd, addr, next, phys, type);
Russell Kingae8f1542006-09-27 15:38:34 +0100616
Russell King24e6c692007-04-21 10:21:28 +0100617 phys += next - addr;
618 addr = next;
619 } while (pgd++, addr != end);
Russell Kingae8f1542006-09-27 15:38:34 +0100620}
621
622/*
623 * Create the architecture specific mappings
624 */
625void __init iotable_init(struct map_desc *io_desc, int nr)
626{
627 int i;
628
629 for (i = 0; i < nr; i++)
630 create_mapping(io_desc + i);
631}
632
Russell King6c5da7a2008-09-30 19:31:44 +0100633static unsigned long __initdata vmalloc_reserve = SZ_128M;
634
635/*
636 * vmalloc=size forces the vmalloc area to be exactly 'size'
637 * bytes. This can be used to increase (or decrease) the vmalloc
638 * area - the default is 128m.
639 */
640static void __init early_vmalloc(char **arg)
641{
642 vmalloc_reserve = memparse(*arg, arg);
643
644 if (vmalloc_reserve < SZ_16M) {
645 vmalloc_reserve = SZ_16M;
646 printk(KERN_WARNING
647 "vmalloc area too small, limiting to %luMB\n",
648 vmalloc_reserve >> 20);
649 }
Nicolas Pitre92108072008-09-19 10:43:06 -0400650
651 if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) {
652 vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M);
653 printk(KERN_WARNING
654 "vmalloc area is too big, limiting to %luMB\n",
655 vmalloc_reserve >> 20);
656 }
Russell King6c5da7a2008-09-30 19:31:44 +0100657}
658__early_param("vmalloc=", early_vmalloc);
659
660#define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve)
661
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400662static void __init sanity_check_meminfo(void)
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200663{
Russell Kingeca73212008-09-30 19:29:25 +0100664 int i, j;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200665
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400666 for (i = 0, j = 0; i < meminfo.nr_banks; i++) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400667 struct membank *bank = &meminfo.bank[j];
668 *bank = meminfo.bank[i];
669
670#ifdef CONFIG_HIGHMEM
671 /*
672 * Split those memory banks which are partially overlapping
673 * the vmalloc area greatly simplifying things later.
674 */
675 if (__va(bank->start) < VMALLOC_MIN &&
676 bank->size > VMALLOC_MIN - __va(bank->start)) {
677 if (meminfo.nr_banks >= NR_BANKS) {
678 printk(KERN_CRIT "NR_BANKS too low, "
679 "ignoring high memory\n");
680 } else {
681 memmove(bank + 1, bank,
682 (meminfo.nr_banks - i) * sizeof(*bank));
683 meminfo.nr_banks++;
684 i++;
685 bank[1].size -= VMALLOC_MIN - __va(bank->start);
686 bank[1].start = __pa(VMALLOC_MIN - 1) + 1;
687 j++;
688 }
689 bank->size = VMALLOC_MIN - __va(bank->start);
690 }
691#else
692 /*
693 * Check whether this memory bank would entirely overlap
694 * the vmalloc area.
695 */
Nicolas Pitre3fd98252009-02-18 22:29:22 +0100696 if (__va(bank->start) >= VMALLOC_MIN ||
697 __va(bank->start) < PAGE_OFFSET) {
Nicolas Pitrea1bbaec2008-09-02 11:44:21 -0400698 printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx "
699 "(vmalloc region overlap).\n",
700 bank->start, bank->start + bank->size - 1);
701 continue;
702 }
703
704 /*
705 * Check whether this memory bank would partially overlap
706 * the vmalloc area.
707 */
708 if (__va(bank->start + bank->size) > VMALLOC_MIN ||
709 __va(bank->start + bank->size) < __va(bank->start)) {
710 unsigned long newsize = VMALLOC_MIN - __va(bank->start);
711 printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx "
712 "to -%.8lx (vmalloc region overlap).\n",
713 bank->start, bank->start + bank->size - 1,
714 bank->start + newsize - 1);
715 bank->size = newsize;
716 }
717#endif
718 j++;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200719 }
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400720 meminfo.nr_banks = j;
Lennert Buytenhek60296c72008-08-05 01:56:13 +0200721}
722
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400723static inline void prepare_page_table(void)
Russell Kingd111e8f2006-09-27 15:27:33 +0100724{
725 unsigned long addr;
726
727 /*
728 * Clear out all the mappings below the kernel image.
729 */
Russell Kingab4f2ee2008-11-06 17:11:07 +0000730 for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE)
Russell Kingd111e8f2006-09-27 15:27:33 +0100731 pmd_clear(pmd_off_k(addr));
732
733#ifdef CONFIG_XIP_KERNEL
734 /* The XIP kernel is mapped in the module area -- skip over it */
Russell King37efe642008-12-01 11:53:07 +0000735 addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100736#endif
737 for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE)
738 pmd_clear(pmd_off_k(addr));
739
740 /*
741 * Clear out all the kernel space mappings, except for the first
742 * memory bank, up to the end of the vmalloc region.
743 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400744 for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0]));
Russell Kingd111e8f2006-09-27 15:27:33 +0100745 addr < VMALLOC_END; addr += PGDIR_SIZE)
746 pmd_clear(pmd_off_k(addr));
747}
748
749/*
750 * Reserve the various regions of node 0
751 */
752void __init reserve_node_zero(pg_data_t *pgdat)
753{
754 unsigned long res_size = 0;
755
756 /*
757 * Register the kernel text and data with bootmem.
758 * Note that this can only be in node 0.
759 */
760#ifdef CONFIG_XIP_KERNEL
Russell King37efe642008-12-01 11:53:07 +0000761 reserve_bootmem_node(pgdat, __pa(_data), _end - _data,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800762 BOOTMEM_DEFAULT);
Russell Kingd111e8f2006-09-27 15:27:33 +0100763#else
Russell King37efe642008-12-01 11:53:07 +0000764 reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext,
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800765 BOOTMEM_DEFAULT);
Russell Kingd111e8f2006-09-27 15:27:33 +0100766#endif
767
768 /*
769 * Reserve the page tables. These are already in use,
770 * and can only be in node 0.
771 */
772 reserve_bootmem_node(pgdat, __pa(swapper_pg_dir),
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800773 PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT);
Russell Kingd111e8f2006-09-27 15:27:33 +0100774
775 /*
776 * Hmm... This should go elsewhere, but we really really need to
777 * stop things allocating the low memory; ideally we need a better
778 * implementation of GFP_DMA which does not assume that DMA-able
779 * memory starts at zero.
780 */
781 if (machine_is_integrator() || machine_is_cintegrator())
782 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
783
784 /*
785 * These should likewise go elsewhere. They pre-reserve the
786 * screen memory region at the start of main system memory.
787 */
788 if (machine_is_edb7211())
789 res_size = 0x00020000;
790 if (machine_is_p720t())
791 res_size = 0x00014000;
792
Ben Dooksbbf6f282006-12-07 20:47:58 +0100793 /* H1940 and RX3715 need to reserve this for suspend */
794
795 if (machine_is_h1940() || machine_is_rx3715()) {
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800796 reserve_bootmem_node(pgdat, 0x30003000, 0x1000,
797 BOOTMEM_DEFAULT);
798 reserve_bootmem_node(pgdat, 0x30081000, 0x1000,
799 BOOTMEM_DEFAULT);
Ben Dooks90733412006-12-06 01:50:24 +0100800 }
801
Russell Kingd111e8f2006-09-27 15:27:33 +0100802#ifdef CONFIG_SA1111
803 /*
804 * Because of the SA1111 DMA bug, we want to preserve our
805 * precious DMA-able memory...
806 */
807 res_size = __pa(swapper_pg_dir) - PHYS_OFFSET;
808#endif
809 if (res_size)
Bernhard Walle72a7fe32008-02-07 00:15:17 -0800810 reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size,
811 BOOTMEM_DEFAULT);
Russell Kingd111e8f2006-09-27 15:27:33 +0100812}
813
814/*
815 * Set up device the mappings. Since we clear out the page tables for all
816 * mappings above VMALLOC_END, we will remove any debug device mappings.
817 * This means you have to be careful how you debug this function, or any
818 * called function. This means you can't use any function or debugging
819 * method which may touch any device, otherwise the kernel _will_ crash.
820 */
821static void __init devicemaps_init(struct machine_desc *mdesc)
822{
823 struct map_desc map;
824 unsigned long addr;
825 void *vectors;
826
827 /*
828 * Allocate the vector page early.
829 */
830 vectors = alloc_bootmem_low_pages(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100831
832 for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE)
833 pmd_clear(pmd_off_k(addr));
834
835 /*
836 * Map the kernel if it is XIP.
837 * It is always first in the modulearea.
838 */
839#ifdef CONFIG_XIP_KERNEL
840 map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK);
Russell Kingab4f2ee2008-11-06 17:11:07 +0000841 map.virtual = MODULES_VADDR;
Russell King37efe642008-12-01 11:53:07 +0000842 map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK;
Russell Kingd111e8f2006-09-27 15:27:33 +0100843 map.type = MT_ROM;
844 create_mapping(&map);
845#endif
846
847 /*
848 * Map the cache flushing regions.
849 */
850#ifdef FLUSH_BASE
851 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS);
852 map.virtual = FLUSH_BASE;
853 map.length = SZ_1M;
854 map.type = MT_CACHECLEAN;
855 create_mapping(&map);
856#endif
857#ifdef FLUSH_BASE_MINICACHE
858 map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M);
859 map.virtual = FLUSH_BASE_MINICACHE;
860 map.length = SZ_1M;
861 map.type = MT_MINICLEAN;
862 create_mapping(&map);
863#endif
864
865 /*
866 * Create a mapping for the machine vectors at the high-vectors
867 * location (0xffff0000). If we aren't using high-vectors, also
868 * create a mapping at the low-vectors virtual address.
869 */
870 map.pfn = __phys_to_pfn(virt_to_phys(vectors));
871 map.virtual = 0xffff0000;
872 map.length = PAGE_SIZE;
873 map.type = MT_HIGH_VECTORS;
874 create_mapping(&map);
875
876 if (!vectors_high()) {
877 map.virtual = 0;
878 map.type = MT_LOW_VECTORS;
879 create_mapping(&map);
880 }
881
882 /*
883 * Ask the machine support to map in the statically mapped devices.
884 */
885 if (mdesc->map_io)
886 mdesc->map_io();
887
888 /*
889 * Finally flush the caches and tlb to ensure that we're in a
890 * consistent state wrt the writebuffer. This also ensures that
891 * any write-allocated cache lines in the vector page are written
892 * back. After this point, we can start to touch devices again.
893 */
894 local_flush_tlb_all();
895 flush_cache_all();
896}
897
898/*
899 * paging_init() sets up the page tables, initialises the zone memory
900 * maps, and sets up the zero page, bad page and bad page tables.
901 */
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400902void __init paging_init(struct machine_desc *mdesc)
Russell Kingd111e8f2006-09-27 15:27:33 +0100903{
904 void *zero_page;
905
906 build_mem_type_table();
Nicolas Pitre4b5f32c2008-10-06 13:24:40 -0400907 sanity_check_meminfo();
908 prepare_page_table();
909 bootmem_init();
Russell Kingd111e8f2006-09-27 15:27:33 +0100910 devicemaps_init(mdesc);
911
912 top_pmd = pmd_off_k(0xffff0000);
913
914 /*
Julia Lawall6ce1b872008-12-01 14:15:41 -0800915 * allocate the zero page. Note that this always succeeds and
916 * returns a zeroed result.
Russell Kingd111e8f2006-09-27 15:27:33 +0100917 */
918 zero_page = alloc_bootmem_low_pages(PAGE_SIZE);
Russell Kingd111e8f2006-09-27 15:27:33 +0100919 empty_zero_page = virt_to_page(zero_page);
920 flush_dcache_page(empty_zero_page);
921}
Russell Kingae8f1542006-09-27 15:38:34 +0100922
923/*
924 * In order to soft-boot, we need to insert a 1:1 mapping in place of
925 * the user-mode pages. This will then ensure that we have predictable
926 * results when turning the mmu off
927 */
928void setup_mm_for_reboot(char mode)
929{
930 unsigned long base_pmdval;
931 pgd_t *pgd;
932 int i;
933
934 if (current->mm && current->mm->pgd)
935 pgd = current->mm->pgd;
936 else
937 pgd = init_mm.pgd;
938
939 base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT;
940 if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale())
941 base_pmdval |= PMD_BIT4;
942
943 for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) {
944 unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval;
945 pmd_t *pmd;
946
947 pmd = pmd_off(pgd, i << PGDIR_SHIFT);
948 pmd[0] = __pmd(pmdval);
949 pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1)));
950 flush_pmd_entry(pmd);
951 }
952}