Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/mmu.c |
| 3 | * |
| 4 | * Copyright (C) 1995-2005 Russell King |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | */ |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 10 | #include <linux/module.h> |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/init.h> |
| 14 | #include <linux/bootmem.h> |
| 15 | #include <linux/mman.h> |
| 16 | #include <linux/nodemask.h> |
| 17 | |
Russell King | 0ba8b9b | 2008-08-10 18:08:10 +0100 | [diff] [blame] | 18 | #include <asm/cputype.h> |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 19 | #include <asm/mach-types.h> |
Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 20 | #include <asm/sections.h> |
Nicolas Pitre | 3f973e2 | 2008-11-04 00:48:42 -0500 | [diff] [blame] | 21 | #include <asm/cachetype.h> |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 22 | #include <asm/setup.h> |
| 23 | #include <asm/sizes.h> |
| 24 | #include <asm/tlb.h> |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 25 | #include <asm/highmem.h> |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 26 | |
| 27 | #include <asm/mach/arch.h> |
| 28 | #include <asm/mach/map.h> |
| 29 | |
| 30 | #include "mm.h" |
| 31 | |
| 32 | DEFINE_PER_CPU(struct mmu_gather, mmu_gathers); |
| 33 | |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 34 | /* |
| 35 | * empty_zero_page is a special page that is used for |
| 36 | * zero-initialized data and COW. |
| 37 | */ |
| 38 | struct page *empty_zero_page; |
Aneesh Kumar K.V | 3653f3a | 2008-04-29 08:11:12 -0400 | [diff] [blame] | 39 | EXPORT_SYMBOL(empty_zero_page); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 40 | |
| 41 | /* |
| 42 | * The pmd table for the upper-most set of pages. |
| 43 | */ |
| 44 | pmd_t *top_pmd; |
| 45 | |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 46 | #define CPOLICY_UNCACHED 0 |
| 47 | #define CPOLICY_BUFFERED 1 |
| 48 | #define CPOLICY_WRITETHROUGH 2 |
| 49 | #define CPOLICY_WRITEBACK 3 |
| 50 | #define CPOLICY_WRITEALLOC 4 |
| 51 | |
| 52 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; |
| 53 | static unsigned int ecc_mask __initdata = 0; |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 54 | pgprot_t pgprot_user; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 55 | pgprot_t pgprot_kernel; |
| 56 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 57 | EXPORT_SYMBOL(pgprot_user); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 58 | EXPORT_SYMBOL(pgprot_kernel); |
| 59 | |
| 60 | struct cachepolicy { |
| 61 | const char policy[16]; |
| 62 | unsigned int cr_mask; |
| 63 | unsigned int pmd; |
| 64 | unsigned int pte; |
| 65 | }; |
| 66 | |
| 67 | static struct cachepolicy cache_policies[] __initdata = { |
| 68 | { |
| 69 | .policy = "uncached", |
| 70 | .cr_mask = CR_W|CR_C, |
| 71 | .pmd = PMD_SECT_UNCACHED, |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 72 | .pte = L_PTE_MT_UNCACHED, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 73 | }, { |
| 74 | .policy = "buffered", |
| 75 | .cr_mask = CR_C, |
| 76 | .pmd = PMD_SECT_BUFFERED, |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 77 | .pte = L_PTE_MT_BUFFERABLE, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 78 | }, { |
| 79 | .policy = "writethrough", |
| 80 | .cr_mask = 0, |
| 81 | .pmd = PMD_SECT_WT, |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 82 | .pte = L_PTE_MT_WRITETHROUGH, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 83 | }, { |
| 84 | .policy = "writeback", |
| 85 | .cr_mask = 0, |
| 86 | .pmd = PMD_SECT_WB, |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 87 | .pte = L_PTE_MT_WRITEBACK, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 88 | }, { |
| 89 | .policy = "writealloc", |
| 90 | .cr_mask = 0, |
| 91 | .pmd = PMD_SECT_WBWA, |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 92 | .pte = L_PTE_MT_WRITEALLOC, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 93 | } |
| 94 | }; |
| 95 | |
| 96 | /* |
Simon Arlott | 6cbdc8c | 2007-05-11 20:40:30 +0100 | [diff] [blame] | 97 | * These are useful for identifying cache coherency |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 98 | * problems by allowing the cache or the cache and |
| 99 | * writebuffer to be turned off. (Note: the write |
| 100 | * buffer should not be on and the cache off). |
| 101 | */ |
| 102 | static void __init early_cachepolicy(char **p) |
| 103 | { |
| 104 | int i; |
| 105 | |
| 106 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
| 107 | int len = strlen(cache_policies[i].policy); |
| 108 | |
| 109 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { |
| 110 | cachepolicy = i; |
| 111 | cr_alignment &= ~cache_policies[i].cr_mask; |
| 112 | cr_no_alignment &= ~cache_policies[i].cr_mask; |
| 113 | *p += len; |
| 114 | break; |
| 115 | } |
| 116 | } |
| 117 | if (i == ARRAY_SIZE(cache_policies)) |
| 118 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); |
Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 119 | if (cpu_architecture() >= CPU_ARCH_ARMv6) { |
| 120 | printk(KERN_WARNING "Only cachepolicy=writeback supported on ARMv6 and later\n"); |
| 121 | cachepolicy = CPOLICY_WRITEBACK; |
| 122 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 123 | flush_cache_all(); |
| 124 | set_cr(cr_alignment); |
| 125 | } |
| 126 | __early_param("cachepolicy=", early_cachepolicy); |
| 127 | |
| 128 | static void __init early_nocache(char **__unused) |
| 129 | { |
| 130 | char *p = "buffered"; |
| 131 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); |
| 132 | early_cachepolicy(&p); |
| 133 | } |
| 134 | __early_param("nocache", early_nocache); |
| 135 | |
| 136 | static void __init early_nowrite(char **__unused) |
| 137 | { |
| 138 | char *p = "uncached"; |
| 139 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); |
| 140 | early_cachepolicy(&p); |
| 141 | } |
| 142 | __early_param("nowb", early_nowrite); |
| 143 | |
| 144 | static void __init early_ecc(char **p) |
| 145 | { |
| 146 | if (memcmp(*p, "on", 2) == 0) { |
| 147 | ecc_mask = PMD_PROTECTION; |
| 148 | *p += 2; |
| 149 | } else if (memcmp(*p, "off", 3) == 0) { |
| 150 | ecc_mask = 0; |
| 151 | *p += 3; |
| 152 | } |
| 153 | } |
| 154 | __early_param("ecc=", early_ecc); |
| 155 | |
| 156 | static int __init noalign_setup(char *__unused) |
| 157 | { |
| 158 | cr_alignment &= ~CR_A; |
| 159 | cr_no_alignment &= ~CR_A; |
| 160 | set_cr(cr_alignment); |
| 161 | return 1; |
| 162 | } |
| 163 | __setup("noalign", noalign_setup); |
| 164 | |
Russell King | 255d1f8 | 2006-12-18 00:12:47 +0000 | [diff] [blame] | 165 | #ifndef CONFIG_SMP |
| 166 | void adjust_cr(unsigned long mask, unsigned long set) |
| 167 | { |
| 168 | unsigned long flags; |
| 169 | |
| 170 | mask &= ~CR_A; |
| 171 | |
| 172 | set &= mask; |
| 173 | |
| 174 | local_irq_save(flags); |
| 175 | |
| 176 | cr_no_alignment = (cr_no_alignment & ~mask) | set; |
| 177 | cr_alignment = (cr_alignment & ~mask) | set; |
| 178 | |
| 179 | set_cr((get_cr() & ~mask) | set); |
| 180 | |
| 181 | local_irq_restore(flags); |
| 182 | } |
| 183 | #endif |
| 184 | |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 185 | #define PROT_PTE_DEVICE L_PTE_PRESENT|L_PTE_YOUNG|L_PTE_DIRTY|L_PTE_WRITE |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 186 | #define PROT_SECT_DEVICE PMD_TYPE_SECT|PMD_SECT_AP_WRITE |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 187 | |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 188 | static struct mem_type mem_types[] = { |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 189 | [MT_DEVICE] = { /* Strongly ordered / ARMv6 shared device */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 190 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_SHARED | |
| 191 | L_PTE_SHARED, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 192 | .prot_l1 = PMD_TYPE_TABLE, |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 193 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_S, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 194 | .domain = DOMAIN_IO, |
| 195 | }, |
| 196 | [MT_DEVICE_NONSHARED] = { /* ARMv6 non-shared device */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 197 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_NONSHARED, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 198 | .prot_l1 = PMD_TYPE_TABLE, |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 199 | .prot_sect = PROT_SECT_DEVICE, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 200 | .domain = DOMAIN_IO, |
| 201 | }, |
| 202 | [MT_DEVICE_CACHED] = { /* ioremap_cached */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 203 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_CACHED, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 204 | .prot_l1 = PMD_TYPE_TABLE, |
| 205 | .prot_sect = PROT_SECT_DEVICE | PMD_SECT_WB, |
| 206 | .domain = DOMAIN_IO, |
| 207 | }, |
Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 208 | [MT_DEVICE_WC] = { /* ioremap_wc */ |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 209 | .prot_pte = PROT_PTE_DEVICE | L_PTE_MT_DEV_WC, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 210 | .prot_l1 = PMD_TYPE_TABLE, |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 211 | .prot_sect = PROT_SECT_DEVICE, |
Russell King | 0af92be | 2007-05-05 20:28:16 +0100 | [diff] [blame] | 212 | .domain = DOMAIN_IO, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 213 | }, |
Russell King | ebb4c65 | 2008-11-09 11:18:36 +0000 | [diff] [blame] | 214 | [MT_UNCACHED] = { |
| 215 | .prot_pte = PROT_PTE_DEVICE, |
| 216 | .prot_l1 = PMD_TYPE_TABLE, |
| 217 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
| 218 | .domain = DOMAIN_IO, |
| 219 | }, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 220 | [MT_CACHECLEAN] = { |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 221 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 222 | .domain = DOMAIN_KERNEL, |
| 223 | }, |
| 224 | [MT_MINICLEAN] = { |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 225 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_XN | PMD_SECT_MINICACHE, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 226 | .domain = DOMAIN_KERNEL, |
| 227 | }, |
| 228 | [MT_LOW_VECTORS] = { |
| 229 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 230 | L_PTE_EXEC, |
| 231 | .prot_l1 = PMD_TYPE_TABLE, |
| 232 | .domain = DOMAIN_USER, |
| 233 | }, |
| 234 | [MT_HIGH_VECTORS] = { |
| 235 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 236 | L_PTE_USER | L_PTE_EXEC, |
| 237 | .prot_l1 = PMD_TYPE_TABLE, |
| 238 | .domain = DOMAIN_USER, |
| 239 | }, |
| 240 | [MT_MEMORY] = { |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 241 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 242 | .domain = DOMAIN_KERNEL, |
| 243 | }, |
| 244 | [MT_ROM] = { |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 245 | .prot_sect = PMD_TYPE_SECT, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 246 | .domain = DOMAIN_KERNEL, |
| 247 | }, |
Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 248 | [MT_MEMORY_NONCACHED] = { |
| 249 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
| 250 | .domain = DOMAIN_KERNEL, |
| 251 | }, |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 252 | }; |
| 253 | |
Russell King | b29e9f5 | 2007-04-21 10:47:29 +0100 | [diff] [blame] | 254 | const struct mem_type *get_mem_type(unsigned int type) |
| 255 | { |
| 256 | return type < ARRAY_SIZE(mem_types) ? &mem_types[type] : NULL; |
| 257 | } |
| 258 | |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 259 | /* |
| 260 | * Adjust the PMD section entries according to the CPU in use. |
| 261 | */ |
| 262 | static void __init build_mem_type_table(void) |
| 263 | { |
| 264 | struct cachepolicy *cp; |
| 265 | unsigned int cr = get_cr(); |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 266 | unsigned int user_pgprot, kern_pgprot, vecs_pgprot; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 267 | int cpu_arch = cpu_architecture(); |
| 268 | int i; |
| 269 | |
Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 270 | if (cpu_arch < CPU_ARCH_ARMv6) { |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 271 | #if defined(CONFIG_CPU_DCACHE_DISABLE) |
Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 272 | if (cachepolicy > CPOLICY_BUFFERED) |
| 273 | cachepolicy = CPOLICY_BUFFERED; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 274 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) |
Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 275 | if (cachepolicy > CPOLICY_WRITETHROUGH) |
| 276 | cachepolicy = CPOLICY_WRITETHROUGH; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 277 | #endif |
Catalin Marinas | 11179d8 | 2007-07-20 11:42:24 +0100 | [diff] [blame] | 278 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 279 | if (cpu_arch < CPU_ARCH_ARMv5) { |
| 280 | if (cachepolicy >= CPOLICY_WRITEALLOC) |
| 281 | cachepolicy = CPOLICY_WRITEBACK; |
| 282 | ecc_mask = 0; |
| 283 | } |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 284 | #ifdef CONFIG_SMP |
| 285 | cachepolicy = CPOLICY_WRITEALLOC; |
| 286 | #endif |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 287 | |
| 288 | /* |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 289 | * Strip out features not present on earlier architectures. |
| 290 | * Pre-ARMv5 CPUs don't have TEX bits. Pre-ARMv6 CPUs or those |
| 291 | * without extended page tables don't have the 'Shared' bit. |
Lennert Buytenhek | 1ad77a8 | 2008-09-05 13:17:11 +0100 | [diff] [blame] | 292 | */ |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 293 | if (cpu_arch < CPU_ARCH_ARMv5) |
| 294 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) |
| 295 | mem_types[i].prot_sect &= ~PMD_SECT_TEX(7); |
| 296 | if ((cpu_arch < CPU_ARCH_ARMv6 || !(cr & CR_XP)) && !cpu_is_xsc3()) |
| 297 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) |
| 298 | mem_types[i].prot_sect &= ~PMD_SECT_S; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 299 | |
| 300 | /* |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 301 | * ARMv5 and lower, bit 4 must be set for page tables (was: cache |
| 302 | * "update-able on write" bit on ARM610). However, Xscale and |
| 303 | * Xscale3 require this bit to be cleared. |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 304 | */ |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 305 | if (cpu_is_xscale() || cpu_is_xsc3()) { |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 306 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 307 | mem_types[i].prot_sect &= ~PMD_BIT4; |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 308 | mem_types[i].prot_l1 &= ~PMD_BIT4; |
| 309 | } |
| 310 | } else if (cpu_arch < CPU_ARCH_ARMv6) { |
| 311 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 312 | if (mem_types[i].prot_l1) |
| 313 | mem_types[i].prot_l1 |= PMD_BIT4; |
Russell King | 9ef7963 | 2007-05-05 20:03:35 +0100 | [diff] [blame] | 314 | if (mem_types[i].prot_sect) |
| 315 | mem_types[i].prot_sect |= PMD_BIT4; |
| 316 | } |
| 317 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 318 | |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 319 | /* |
| 320 | * Mark the device areas according to the CPU/architecture. |
| 321 | */ |
| 322 | if (cpu_is_xsc3() || (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP))) { |
| 323 | if (!cpu_is_xsc3()) { |
| 324 | /* |
| 325 | * Mark device regions on ARMv6+ as execute-never |
| 326 | * to prevent speculative instruction fetches. |
| 327 | */ |
| 328 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_XN; |
| 329 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_XN; |
| 330 | mem_types[MT_DEVICE_CACHED].prot_sect |= PMD_SECT_XN; |
| 331 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_XN; |
| 332 | } |
| 333 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { |
| 334 | /* |
| 335 | * For ARMv7 with TEX remapping, |
| 336 | * - shared device is SXCB=1100 |
| 337 | * - nonshared device is SXCB=0100 |
| 338 | * - write combine device mem is SXCB=0001 |
| 339 | * (Uncached Normal memory) |
| 340 | */ |
| 341 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1); |
| 342 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(1); |
| 343 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; |
| 344 | } else if (cpu_is_xsc3()) { |
| 345 | /* |
| 346 | * For Xscale3, |
| 347 | * - shared device is TEXCB=00101 |
| 348 | * - nonshared device is TEXCB=01000 |
| 349 | * - write combine device mem is TEXCB=00100 |
| 350 | * (Inner/Outer Uncacheable in xsc3 parlance) |
| 351 | */ |
| 352 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_TEX(1) | PMD_SECT_BUFFERED; |
| 353 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); |
| 354 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); |
| 355 | } else { |
| 356 | /* |
| 357 | * For ARMv6 and ARMv7 without TEX remapping, |
| 358 | * - shared device is TEXCB=00001 |
| 359 | * - nonshared device is TEXCB=01000 |
| 360 | * - write combine device mem is TEXCB=00100 |
| 361 | * (Uncached Normal in ARMv6 parlance). |
| 362 | */ |
| 363 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; |
| 364 | mem_types[MT_DEVICE_NONSHARED].prot_sect |= PMD_SECT_TEX(2); |
| 365 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_TEX(1); |
| 366 | } |
| 367 | } else { |
| 368 | /* |
| 369 | * On others, write combining is "Uncached/Buffered" |
| 370 | */ |
| 371 | mem_types[MT_DEVICE_WC].prot_sect |= PMD_SECT_BUFFERABLE; |
| 372 | } |
| 373 | |
| 374 | /* |
| 375 | * Now deal with the memory-type mappings |
| 376 | */ |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 377 | cp = &cache_policies[cachepolicy]; |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 378 | vecs_pgprot = kern_pgprot = user_pgprot = cp->pte; |
| 379 | |
| 380 | #ifndef CONFIG_SMP |
| 381 | /* |
| 382 | * Only use write-through for non-SMP systems |
| 383 | */ |
| 384 | if (cpu_arch >= CPU_ARCH_ARMv5 && cachepolicy > CPOLICY_WRITETHROUGH) |
| 385 | vecs_pgprot = cache_policies[CPOLICY_WRITETHROUGH].pte; |
| 386 | #endif |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 387 | |
| 388 | /* |
| 389 | * Enable CPU-specific coherency if supported. |
| 390 | * (Only available on XSC3 at the moment.) |
| 391 | */ |
Russell King | b1cce6b | 2008-11-04 10:52:28 +0000 | [diff] [blame] | 392 | if (arch_is_coherent() && cpu_is_xsc3()) |
| 393 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 394 | |
| 395 | /* |
| 396 | * ARMv6 and above have extended page tables. |
| 397 | */ |
| 398 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
| 399 | /* |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 400 | * Mark cache clean areas and XIP ROM read only |
| 401 | * from SVC mode and no access from userspace. |
| 402 | */ |
| 403 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
| 404 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
| 405 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
| 406 | |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 407 | #ifdef CONFIG_SMP |
| 408 | /* |
| 409 | * Mark memory with the "shared" attribute for SMP systems |
| 410 | */ |
| 411 | user_pgprot |= L_PTE_SHARED; |
| 412 | kern_pgprot |= L_PTE_SHARED; |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 413 | vecs_pgprot |= L_PTE_SHARED; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 414 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 415 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_S; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 416 | #endif |
| 417 | } |
| 418 | |
Paul Walmsley | e4707dd | 2009-03-12 20:11:43 +0100 | [diff] [blame] | 419 | /* |
| 420 | * Non-cacheable Normal - intended for memory areas that must |
| 421 | * not cause dirty cache line writebacks when used |
| 422 | */ |
| 423 | if (cpu_arch >= CPU_ARCH_ARMv6) { |
| 424 | if (cpu_arch >= CPU_ARCH_ARMv7 && (cr & CR_TRE)) { |
| 425 | /* Non-cacheable Normal is XCB = 001 */ |
| 426 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= |
| 427 | PMD_SECT_BUFFERED; |
| 428 | } else { |
| 429 | /* For both ARMv6 and non-TEX-remapping ARMv7 */ |
| 430 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= |
| 431 | PMD_SECT_TEX(1); |
| 432 | } |
| 433 | } else { |
| 434 | mem_types[MT_MEMORY_NONCACHED].prot_sect |= PMD_SECT_BUFFERABLE; |
| 435 | } |
| 436 | |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 437 | for (i = 0; i < 16; i++) { |
| 438 | unsigned long v = pgprot_val(protection_map[i]); |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 439 | protection_map[i] = __pgprot(v | user_pgprot); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 440 | } |
| 441 | |
Russell King | bb30f36 | 2008-09-06 20:04:59 +0100 | [diff] [blame] | 442 | mem_types[MT_LOW_VECTORS].prot_pte |= vecs_pgprot; |
| 443 | mem_types[MT_HIGH_VECTORS].prot_pte |= vecs_pgprot; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 444 | |
Imre_Deak | 44b1869 | 2007-02-11 13:45:13 +0100 | [diff] [blame] | 445 | pgprot_user = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | user_pgprot); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 446 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
| 447 | L_PTE_DIRTY | L_PTE_WRITE | |
| 448 | L_PTE_EXEC | kern_pgprot); |
| 449 | |
| 450 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
| 451 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
| 452 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
| 453 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
| 454 | |
| 455 | switch (cp->pmd) { |
| 456 | case PMD_SECT_WT: |
| 457 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; |
| 458 | break; |
| 459 | case PMD_SECT_WB: |
| 460 | case PMD_SECT_WBWA: |
| 461 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; |
| 462 | break; |
| 463 | } |
| 464 | printk("Memory policy: ECC %sabled, Data cache %s\n", |
| 465 | ecc_mask ? "en" : "dis", cp->policy); |
Russell King | 2497f0a | 2007-04-21 09:59:44 +0100 | [diff] [blame] | 466 | |
| 467 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
| 468 | struct mem_type *t = &mem_types[i]; |
| 469 | if (t->prot_l1) |
| 470 | t->prot_l1 |= PMD_DOMAIN(t->domain); |
| 471 | if (t->prot_sect) |
| 472 | t->prot_sect |= PMD_DOMAIN(t->domain); |
| 473 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 474 | } |
| 475 | |
| 476 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
| 477 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 478 | static void __init alloc_init_pte(pmd_t *pmd, unsigned long addr, |
| 479 | unsigned long end, unsigned long pfn, |
| 480 | const struct mem_type *type) |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 481 | { |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 482 | pte_t *pte; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 483 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 484 | if (pmd_none(*pmd)) { |
| 485 | pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); |
| 486 | __pmd_populate(pmd, __pa(pte) | type->prot_l1); |
| 487 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 488 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 489 | pte = pte_offset_kernel(pmd, addr); |
| 490 | do { |
Russell King | 40d192b | 2008-09-06 21:15:56 +0100 | [diff] [blame] | 491 | set_pte_ext(pte, pfn_pte(pfn, __pgprot(type->prot_pte)), 0); |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 492 | pfn++; |
| 493 | } while (pte++, addr += PAGE_SIZE, addr != end); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 494 | } |
| 495 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 496 | static void __init alloc_init_section(pgd_t *pgd, unsigned long addr, |
| 497 | unsigned long end, unsigned long phys, |
| 498 | const struct mem_type *type) |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 499 | { |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 500 | pmd_t *pmd = pmd_offset(pgd, addr); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 501 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 502 | /* |
| 503 | * Try a section mapping - end, addr and phys must all be aligned |
| 504 | * to a section boundary. Note that PMDs refer to the individual |
| 505 | * L1 entries, whereas PGDs refer to a group of L1 entries making |
| 506 | * up one logical pointer to an L2 table. |
| 507 | */ |
| 508 | if (((addr | end | phys) & ~SECTION_MASK) == 0) { |
| 509 | pmd_t *p = pmd; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 510 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 511 | if (addr & SECTION_SIZE) |
| 512 | pmd++; |
| 513 | |
| 514 | do { |
| 515 | *pmd = __pmd(phys | type->prot_sect); |
| 516 | phys += SECTION_SIZE; |
| 517 | } while (pmd++, addr += SECTION_SIZE, addr != end); |
| 518 | |
| 519 | flush_pmd_entry(p); |
| 520 | } else { |
| 521 | /* |
| 522 | * No need to loop; pte's aren't interested in the |
| 523 | * individual L1 entries. |
| 524 | */ |
| 525 | alloc_init_pte(pmd, addr, end, __phys_to_pfn(phys), type); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 526 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 527 | } |
| 528 | |
Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 529 | static void __init create_36bit_mapping(struct map_desc *md, |
| 530 | const struct mem_type *type) |
| 531 | { |
| 532 | unsigned long phys, addr, length, end; |
| 533 | pgd_t *pgd; |
| 534 | |
| 535 | addr = md->virtual; |
| 536 | phys = (unsigned long)__pfn_to_phys(md->pfn); |
| 537 | length = PAGE_ALIGN(md->length); |
| 538 | |
| 539 | if (!(cpu_architecture() >= CPU_ARCH_ARMv6 || cpu_is_xsc3())) { |
| 540 | printk(KERN_ERR "MM: CPU does not support supersection " |
| 541 | "mapping for 0x%08llx at 0x%08lx\n", |
| 542 | __pfn_to_phys((u64)md->pfn), addr); |
| 543 | return; |
| 544 | } |
| 545 | |
| 546 | /* N.B. ARMv6 supersections are only defined to work with domain 0. |
| 547 | * Since domain assignments can in fact be arbitrary, the |
| 548 | * 'domain == 0' check below is required to insure that ARMv6 |
| 549 | * supersections are only allocated for domain 0 regardless |
| 550 | * of the actual domain assignments in use. |
| 551 | */ |
| 552 | if (type->domain) { |
| 553 | printk(KERN_ERR "MM: invalid domain in supersection " |
| 554 | "mapping for 0x%08llx at 0x%08lx\n", |
| 555 | __pfn_to_phys((u64)md->pfn), addr); |
| 556 | return; |
| 557 | } |
| 558 | |
| 559 | if ((addr | length | __pfn_to_phys(md->pfn)) & ~SUPERSECTION_MASK) { |
| 560 | printk(KERN_ERR "MM: cannot create mapping for " |
| 561 | "0x%08llx at 0x%08lx invalid alignment\n", |
| 562 | __pfn_to_phys((u64)md->pfn), addr); |
| 563 | return; |
| 564 | } |
| 565 | |
| 566 | /* |
| 567 | * Shift bits [35:32] of address into bits [23:20] of PMD |
| 568 | * (See ARMv6 spec). |
| 569 | */ |
| 570 | phys |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); |
| 571 | |
| 572 | pgd = pgd_offset_k(addr); |
| 573 | end = addr + length; |
| 574 | do { |
| 575 | pmd_t *pmd = pmd_offset(pgd, addr); |
| 576 | int i; |
| 577 | |
| 578 | for (i = 0; i < 16; i++) |
| 579 | *pmd++ = __pmd(phys | type->prot_sect | PMD_SECT_SUPER); |
| 580 | |
| 581 | addr += SUPERSECTION_SIZE; |
| 582 | phys += SUPERSECTION_SIZE; |
| 583 | pgd += SUPERSECTION_SIZE >> PGDIR_SHIFT; |
| 584 | } while (addr != end); |
| 585 | } |
| 586 | |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 587 | /* |
| 588 | * Create the page directory entries and any necessary |
| 589 | * page tables for the mapping specified by `md'. We |
| 590 | * are able to cope here with varying sizes and address |
| 591 | * offsets, and we take full advantage of sections and |
| 592 | * supersections. |
| 593 | */ |
| 594 | void __init create_mapping(struct map_desc *md) |
| 595 | { |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 596 | unsigned long phys, addr, length, end; |
Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 597 | const struct mem_type *type; |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 598 | pgd_t *pgd; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 599 | |
| 600 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
| 601 | printk(KERN_WARNING "BUG: not creating mapping for " |
| 602 | "0x%08llx at 0x%08lx in user region\n", |
| 603 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| 604 | return; |
| 605 | } |
| 606 | |
| 607 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
| 608 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
| 609 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " |
| 610 | "overlaps vmalloc space\n", |
| 611 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| 612 | } |
| 613 | |
Russell King | d5c9817 | 2007-04-21 10:05:32 +0100 | [diff] [blame] | 614 | type = &mem_types[md->type]; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 615 | |
| 616 | /* |
| 617 | * Catch 36-bit addresses |
| 618 | */ |
Russell King | 4a56c1e | 2007-04-21 10:16:48 +0100 | [diff] [blame] | 619 | if (md->pfn >= 0x100000) { |
| 620 | create_36bit_mapping(md, type); |
| 621 | return; |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 622 | } |
| 623 | |
Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 624 | addr = md->virtual & PAGE_MASK; |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 625 | phys = (unsigned long)__pfn_to_phys(md->pfn); |
Russell King | 7b9c7b4 | 2007-07-04 21:16:33 +0100 | [diff] [blame] | 626 | length = PAGE_ALIGN(md->length + (md->virtual & ~PAGE_MASK)); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 627 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 628 | if (type->prot_l1 == 0 && ((addr | phys | length) & ~SECTION_MASK)) { |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 629 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
| 630 | "be mapped using pages, ignoring.\n", |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 631 | __pfn_to_phys(md->pfn), addr); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 632 | return; |
| 633 | } |
| 634 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 635 | pgd = pgd_offset_k(addr); |
| 636 | end = addr + length; |
| 637 | do { |
| 638 | unsigned long next = pgd_addr_end(addr, end); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 639 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 640 | alloc_init_section(pgd, addr, next, phys, type); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 641 | |
Russell King | 24e6c69 | 2007-04-21 10:21:28 +0100 | [diff] [blame] | 642 | phys += next - addr; |
| 643 | addr = next; |
| 644 | } while (pgd++, addr != end); |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 645 | } |
| 646 | |
| 647 | /* |
| 648 | * Create the architecture specific mappings |
| 649 | */ |
| 650 | void __init iotable_init(struct map_desc *io_desc, int nr) |
| 651 | { |
| 652 | int i; |
| 653 | |
| 654 | for (i = 0; i < nr; i++) |
| 655 | create_mapping(io_desc + i); |
| 656 | } |
| 657 | |
Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 658 | static unsigned long __initdata vmalloc_reserve = SZ_128M; |
| 659 | |
| 660 | /* |
| 661 | * vmalloc=size forces the vmalloc area to be exactly 'size' |
| 662 | * bytes. This can be used to increase (or decrease) the vmalloc |
| 663 | * area - the default is 128m. |
| 664 | */ |
| 665 | static void __init early_vmalloc(char **arg) |
| 666 | { |
| 667 | vmalloc_reserve = memparse(*arg, arg); |
| 668 | |
| 669 | if (vmalloc_reserve < SZ_16M) { |
| 670 | vmalloc_reserve = SZ_16M; |
| 671 | printk(KERN_WARNING |
| 672 | "vmalloc area too small, limiting to %luMB\n", |
| 673 | vmalloc_reserve >> 20); |
| 674 | } |
Nicolas Pitre | 9210807 | 2008-09-19 10:43:06 -0400 | [diff] [blame] | 675 | |
| 676 | if (vmalloc_reserve > VMALLOC_END - (PAGE_OFFSET + SZ_32M)) { |
| 677 | vmalloc_reserve = VMALLOC_END - (PAGE_OFFSET + SZ_32M); |
| 678 | printk(KERN_WARNING |
| 679 | "vmalloc area is too big, limiting to %luMB\n", |
| 680 | vmalloc_reserve >> 20); |
| 681 | } |
Russell King | 6c5da7a | 2008-09-30 19:31:44 +0100 | [diff] [blame] | 682 | } |
| 683 | __early_param("vmalloc=", early_vmalloc); |
| 684 | |
| 685 | #define VMALLOC_MIN (void *)(VMALLOC_END - vmalloc_reserve) |
| 686 | |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 687 | static void __init sanity_check_meminfo(void) |
Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 688 | { |
Russell King | eca7321 | 2008-09-30 19:29:25 +0100 | [diff] [blame] | 689 | int i, j; |
Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 690 | |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 691 | for (i = 0, j = 0; i < meminfo.nr_banks; i++) { |
Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 692 | struct membank *bank = &meminfo.bank[j]; |
| 693 | *bank = meminfo.bank[i]; |
| 694 | |
| 695 | #ifdef CONFIG_HIGHMEM |
| 696 | /* |
| 697 | * Split those memory banks which are partially overlapping |
| 698 | * the vmalloc area greatly simplifying things later. |
| 699 | */ |
| 700 | if (__va(bank->start) < VMALLOC_MIN && |
| 701 | bank->size > VMALLOC_MIN - __va(bank->start)) { |
| 702 | if (meminfo.nr_banks >= NR_BANKS) { |
| 703 | printk(KERN_CRIT "NR_BANKS too low, " |
| 704 | "ignoring high memory\n"); |
Nicolas Pitre | 3f973e2 | 2008-11-04 00:48:42 -0500 | [diff] [blame] | 705 | } else if (cache_is_vipt_aliasing()) { |
| 706 | printk(KERN_CRIT "HIGHMEM is not yet supported " |
| 707 | "with VIPT aliasing cache, " |
| 708 | "ignoring high memory\n"); |
Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 709 | } else { |
| 710 | memmove(bank + 1, bank, |
| 711 | (meminfo.nr_banks - i) * sizeof(*bank)); |
| 712 | meminfo.nr_banks++; |
| 713 | i++; |
| 714 | bank[1].size -= VMALLOC_MIN - __va(bank->start); |
| 715 | bank[1].start = __pa(VMALLOC_MIN - 1) + 1; |
| 716 | j++; |
| 717 | } |
| 718 | bank->size = VMALLOC_MIN - __va(bank->start); |
| 719 | } |
| 720 | #else |
| 721 | /* |
| 722 | * Check whether this memory bank would entirely overlap |
| 723 | * the vmalloc area. |
| 724 | */ |
Nicolas Pitre | 3fd9825 | 2009-02-18 22:29:22 +0100 | [diff] [blame] | 725 | if (__va(bank->start) >= VMALLOC_MIN || |
Mikael Pettersson | f0bba9f | 2009-03-28 19:18:05 +0100 | [diff] [blame] | 726 | __va(bank->start) < (void *)PAGE_OFFSET) { |
Nicolas Pitre | a1bbaec | 2008-09-02 11:44:21 -0400 | [diff] [blame] | 727 | printk(KERN_NOTICE "Ignoring RAM at %.8lx-%.8lx " |
| 728 | "(vmalloc region overlap).\n", |
| 729 | bank->start, bank->start + bank->size - 1); |
| 730 | continue; |
| 731 | } |
| 732 | |
| 733 | /* |
| 734 | * Check whether this memory bank would partially overlap |
| 735 | * the vmalloc area. |
| 736 | */ |
| 737 | if (__va(bank->start + bank->size) > VMALLOC_MIN || |
| 738 | __va(bank->start + bank->size) < __va(bank->start)) { |
| 739 | unsigned long newsize = VMALLOC_MIN - __va(bank->start); |
| 740 | printk(KERN_NOTICE "Truncating RAM at %.8lx-%.8lx " |
| 741 | "to -%.8lx (vmalloc region overlap).\n", |
| 742 | bank->start, bank->start + bank->size - 1, |
| 743 | bank->start + newsize - 1); |
| 744 | bank->size = newsize; |
| 745 | } |
| 746 | #endif |
| 747 | j++; |
Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 748 | } |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 749 | meminfo.nr_banks = j; |
Lennert Buytenhek | 60296c7 | 2008-08-05 01:56:13 +0200 | [diff] [blame] | 750 | } |
| 751 | |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 752 | static inline void prepare_page_table(void) |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 753 | { |
| 754 | unsigned long addr; |
| 755 | |
| 756 | /* |
| 757 | * Clear out all the mappings below the kernel image. |
| 758 | */ |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 759 | for (addr = 0; addr < MODULES_VADDR; addr += PGDIR_SIZE) |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 760 | pmd_clear(pmd_off_k(addr)); |
| 761 | |
| 762 | #ifdef CONFIG_XIP_KERNEL |
| 763 | /* The XIP kernel is mapped in the module area -- skip over it */ |
Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 764 | addr = ((unsigned long)_etext + PGDIR_SIZE - 1) & PGDIR_MASK; |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 765 | #endif |
| 766 | for ( ; addr < PAGE_OFFSET; addr += PGDIR_SIZE) |
| 767 | pmd_clear(pmd_off_k(addr)); |
| 768 | |
| 769 | /* |
| 770 | * Clear out all the kernel space mappings, except for the first |
| 771 | * memory bank, up to the end of the vmalloc region. |
| 772 | */ |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 773 | for (addr = __phys_to_virt(bank_phys_end(&meminfo.bank[0])); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 774 | addr < VMALLOC_END; addr += PGDIR_SIZE) |
| 775 | pmd_clear(pmd_off_k(addr)); |
| 776 | } |
| 777 | |
| 778 | /* |
| 779 | * Reserve the various regions of node 0 |
| 780 | */ |
| 781 | void __init reserve_node_zero(pg_data_t *pgdat) |
| 782 | { |
| 783 | unsigned long res_size = 0; |
| 784 | |
| 785 | /* |
| 786 | * Register the kernel text and data with bootmem. |
| 787 | * Note that this can only be in node 0. |
| 788 | */ |
| 789 | #ifdef CONFIG_XIP_KERNEL |
Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 790 | reserve_bootmem_node(pgdat, __pa(_data), _end - _data, |
Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 791 | BOOTMEM_DEFAULT); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 792 | #else |
Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 793 | reserve_bootmem_node(pgdat, __pa(_stext), _end - _stext, |
Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 794 | BOOTMEM_DEFAULT); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 795 | #endif |
| 796 | |
| 797 | /* |
| 798 | * Reserve the page tables. These are already in use, |
| 799 | * and can only be in node 0. |
| 800 | */ |
| 801 | reserve_bootmem_node(pgdat, __pa(swapper_pg_dir), |
Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 802 | PTRS_PER_PGD * sizeof(pgd_t), BOOTMEM_DEFAULT); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 803 | |
| 804 | /* |
| 805 | * Hmm... This should go elsewhere, but we really really need to |
| 806 | * stop things allocating the low memory; ideally we need a better |
| 807 | * implementation of GFP_DMA which does not assume that DMA-able |
| 808 | * memory starts at zero. |
| 809 | */ |
| 810 | if (machine_is_integrator() || machine_is_cintegrator()) |
| 811 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; |
| 812 | |
| 813 | /* |
| 814 | * These should likewise go elsewhere. They pre-reserve the |
| 815 | * screen memory region at the start of main system memory. |
| 816 | */ |
| 817 | if (machine_is_edb7211()) |
| 818 | res_size = 0x00020000; |
| 819 | if (machine_is_p720t()) |
| 820 | res_size = 0x00014000; |
| 821 | |
Ben Dooks | bbf6f28 | 2006-12-07 20:47:58 +0100 | [diff] [blame] | 822 | /* H1940 and RX3715 need to reserve this for suspend */ |
| 823 | |
| 824 | if (machine_is_h1940() || machine_is_rx3715()) { |
Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 825 | reserve_bootmem_node(pgdat, 0x30003000, 0x1000, |
| 826 | BOOTMEM_DEFAULT); |
| 827 | reserve_bootmem_node(pgdat, 0x30081000, 0x1000, |
| 828 | BOOTMEM_DEFAULT); |
Ben Dooks | 9073341 | 2006-12-06 01:50:24 +0100 | [diff] [blame] | 829 | } |
| 830 | |
Marek Vasut | 81854f8 | 2009-03-28 12:37:42 +0100 | [diff] [blame^] | 831 | if (machine_is_palmld() || machine_is_palmtx()) { |
| 832 | reserve_bootmem_node(pgdat, 0xa0000000, 0x1000, |
| 833 | BOOTMEM_EXCLUSIVE); |
| 834 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, |
| 835 | BOOTMEM_EXCLUSIVE); |
| 836 | } |
| 837 | |
| 838 | if (machine_is_palmt5()) |
| 839 | reserve_bootmem_node(pgdat, 0xa0200000, 0x1000, |
| 840 | BOOTMEM_EXCLUSIVE); |
| 841 | |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 842 | #ifdef CONFIG_SA1111 |
| 843 | /* |
| 844 | * Because of the SA1111 DMA bug, we want to preserve our |
| 845 | * precious DMA-able memory... |
| 846 | */ |
| 847 | res_size = __pa(swapper_pg_dir) - PHYS_OFFSET; |
| 848 | #endif |
| 849 | if (res_size) |
Bernhard Walle | 72a7fe3 | 2008-02-07 00:15:17 -0800 | [diff] [blame] | 850 | reserve_bootmem_node(pgdat, PHYS_OFFSET, res_size, |
| 851 | BOOTMEM_DEFAULT); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 852 | } |
| 853 | |
| 854 | /* |
| 855 | * Set up device the mappings. Since we clear out the page tables for all |
| 856 | * mappings above VMALLOC_END, we will remove any debug device mappings. |
| 857 | * This means you have to be careful how you debug this function, or any |
| 858 | * called function. This means you can't use any function or debugging |
| 859 | * method which may touch any device, otherwise the kernel _will_ crash. |
| 860 | */ |
| 861 | static void __init devicemaps_init(struct machine_desc *mdesc) |
| 862 | { |
| 863 | struct map_desc map; |
| 864 | unsigned long addr; |
| 865 | void *vectors; |
| 866 | |
| 867 | /* |
| 868 | * Allocate the vector page early. |
| 869 | */ |
| 870 | vectors = alloc_bootmem_low_pages(PAGE_SIZE); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 871 | |
| 872 | for (addr = VMALLOC_END; addr; addr += PGDIR_SIZE) |
| 873 | pmd_clear(pmd_off_k(addr)); |
| 874 | |
| 875 | /* |
| 876 | * Map the kernel if it is XIP. |
| 877 | * It is always first in the modulearea. |
| 878 | */ |
| 879 | #ifdef CONFIG_XIP_KERNEL |
| 880 | map.pfn = __phys_to_pfn(CONFIG_XIP_PHYS_ADDR & SECTION_MASK); |
Russell King | ab4f2ee | 2008-11-06 17:11:07 +0000 | [diff] [blame] | 881 | map.virtual = MODULES_VADDR; |
Russell King | 37efe64 | 2008-12-01 11:53:07 +0000 | [diff] [blame] | 882 | map.length = ((unsigned long)_etext - map.virtual + ~SECTION_MASK) & SECTION_MASK; |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 883 | map.type = MT_ROM; |
| 884 | create_mapping(&map); |
| 885 | #endif |
| 886 | |
| 887 | /* |
| 888 | * Map the cache flushing regions. |
| 889 | */ |
| 890 | #ifdef FLUSH_BASE |
| 891 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS); |
| 892 | map.virtual = FLUSH_BASE; |
| 893 | map.length = SZ_1M; |
| 894 | map.type = MT_CACHECLEAN; |
| 895 | create_mapping(&map); |
| 896 | #endif |
| 897 | #ifdef FLUSH_BASE_MINICACHE |
| 898 | map.pfn = __phys_to_pfn(FLUSH_BASE_PHYS + SZ_1M); |
| 899 | map.virtual = FLUSH_BASE_MINICACHE; |
| 900 | map.length = SZ_1M; |
| 901 | map.type = MT_MINICLEAN; |
| 902 | create_mapping(&map); |
| 903 | #endif |
| 904 | |
| 905 | /* |
| 906 | * Create a mapping for the machine vectors at the high-vectors |
| 907 | * location (0xffff0000). If we aren't using high-vectors, also |
| 908 | * create a mapping at the low-vectors virtual address. |
| 909 | */ |
| 910 | map.pfn = __phys_to_pfn(virt_to_phys(vectors)); |
| 911 | map.virtual = 0xffff0000; |
| 912 | map.length = PAGE_SIZE; |
| 913 | map.type = MT_HIGH_VECTORS; |
| 914 | create_mapping(&map); |
| 915 | |
| 916 | if (!vectors_high()) { |
| 917 | map.virtual = 0; |
| 918 | map.type = MT_LOW_VECTORS; |
| 919 | create_mapping(&map); |
| 920 | } |
| 921 | |
| 922 | /* |
| 923 | * Ask the machine support to map in the statically mapped devices. |
| 924 | */ |
| 925 | if (mdesc->map_io) |
| 926 | mdesc->map_io(); |
| 927 | |
| 928 | /* |
| 929 | * Finally flush the caches and tlb to ensure that we're in a |
| 930 | * consistent state wrt the writebuffer. This also ensures that |
| 931 | * any write-allocated cache lines in the vector page are written |
| 932 | * back. After this point, we can start to touch devices again. |
| 933 | */ |
| 934 | local_flush_tlb_all(); |
| 935 | flush_cache_all(); |
| 936 | } |
| 937 | |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 938 | static void __init kmap_init(void) |
| 939 | { |
| 940 | #ifdef CONFIG_HIGHMEM |
| 941 | pmd_t *pmd = pmd_off_k(PKMAP_BASE); |
| 942 | pte_t *pte = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * sizeof(pte_t)); |
| 943 | BUG_ON(!pmd_none(*pmd) || !pte); |
| 944 | __pmd_populate(pmd, __pa(pte) | _PAGE_KERNEL_TABLE); |
| 945 | pkmap_page_table = pte + PTRS_PER_PTE; |
| 946 | #endif |
| 947 | } |
| 948 | |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 949 | /* |
| 950 | * paging_init() sets up the page tables, initialises the zone memory |
| 951 | * maps, and sets up the zero page, bad page and bad page tables. |
| 952 | */ |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 953 | void __init paging_init(struct machine_desc *mdesc) |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 954 | { |
| 955 | void *zero_page; |
| 956 | |
| 957 | build_mem_type_table(); |
Nicolas Pitre | 4b5f32c | 2008-10-06 13:24:40 -0400 | [diff] [blame] | 958 | sanity_check_meminfo(); |
| 959 | prepare_page_table(); |
| 960 | bootmem_init(); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 961 | devicemaps_init(mdesc); |
Nicolas Pitre | d73cd42 | 2008-09-15 16:44:55 -0400 | [diff] [blame] | 962 | kmap_init(); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 963 | |
| 964 | top_pmd = pmd_off_k(0xffff0000); |
| 965 | |
| 966 | /* |
Julia Lawall | 6ce1b87 | 2008-12-01 14:15:41 -0800 | [diff] [blame] | 967 | * allocate the zero page. Note that this always succeeds and |
| 968 | * returns a zeroed result. |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 969 | */ |
| 970 | zero_page = alloc_bootmem_low_pages(PAGE_SIZE); |
Russell King | d111e8f | 2006-09-27 15:27:33 +0100 | [diff] [blame] | 971 | empty_zero_page = virt_to_page(zero_page); |
| 972 | flush_dcache_page(empty_zero_page); |
| 973 | } |
Russell King | ae8f154 | 2006-09-27 15:38:34 +0100 | [diff] [blame] | 974 | |
| 975 | /* |
| 976 | * In order to soft-boot, we need to insert a 1:1 mapping in place of |
| 977 | * the user-mode pages. This will then ensure that we have predictable |
| 978 | * results when turning the mmu off |
| 979 | */ |
| 980 | void setup_mm_for_reboot(char mode) |
| 981 | { |
| 982 | unsigned long base_pmdval; |
| 983 | pgd_t *pgd; |
| 984 | int i; |
| 985 | |
| 986 | if (current->mm && current->mm->pgd) |
| 987 | pgd = current->mm->pgd; |
| 988 | else |
| 989 | pgd = init_mm.pgd; |
| 990 | |
| 991 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
| 992 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ && !cpu_is_xscale()) |
| 993 | base_pmdval |= PMD_BIT4; |
| 994 | |
| 995 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { |
| 996 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; |
| 997 | pmd_t *pmd; |
| 998 | |
| 999 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); |
| 1000 | pmd[0] = __pmd(pmdval); |
| 1001 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); |
| 1002 | flush_pmd_entry(pmd); |
| 1003 | } |
| 1004 | } |