Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * linux/arch/arm/mm/mm-armv.c |
| 3 | * |
Russell King | 9007205 | 2005-10-28 14:48:37 +0100 | [diff] [blame] | 4 | * Copyright (C) 1998-2005 Russell King |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * Page table sludge for ARM v3 and v4 processor architectures. |
| 11 | */ |
| 12 | #include <linux/config.h> |
| 13 | #include <linux/module.h> |
| 14 | #include <linux/mm.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/bootmem.h> |
| 17 | #include <linux/highmem.h> |
| 18 | #include <linux/nodemask.h> |
| 19 | |
| 20 | #include <asm/pgalloc.h> |
| 21 | #include <asm/page.h> |
| 22 | #include <asm/io.h> |
| 23 | #include <asm/setup.h> |
| 24 | #include <asm/tlbflush.h> |
| 25 | |
| 26 | #include <asm/mach/map.h> |
| 27 | |
| 28 | #define CPOLICY_UNCACHED 0 |
| 29 | #define CPOLICY_BUFFERED 1 |
| 30 | #define CPOLICY_WRITETHROUGH 2 |
| 31 | #define CPOLICY_WRITEBACK 3 |
| 32 | #define CPOLICY_WRITEALLOC 4 |
| 33 | |
| 34 | static unsigned int cachepolicy __initdata = CPOLICY_WRITEBACK; |
| 35 | static unsigned int ecc_mask __initdata = 0; |
| 36 | pgprot_t pgprot_kernel; |
| 37 | |
| 38 | EXPORT_SYMBOL(pgprot_kernel); |
| 39 | |
Russell King | c4e1f6f | 2005-05-10 10:40:19 +0100 | [diff] [blame] | 40 | pmd_t *top_pmd; |
| 41 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 42 | struct cachepolicy { |
| 43 | const char policy[16]; |
| 44 | unsigned int cr_mask; |
| 45 | unsigned int pmd; |
| 46 | unsigned int pte; |
| 47 | }; |
| 48 | |
| 49 | static struct cachepolicy cache_policies[] __initdata = { |
| 50 | { |
| 51 | .policy = "uncached", |
| 52 | .cr_mask = CR_W|CR_C, |
| 53 | .pmd = PMD_SECT_UNCACHED, |
| 54 | .pte = 0, |
| 55 | }, { |
| 56 | .policy = "buffered", |
| 57 | .cr_mask = CR_C, |
| 58 | .pmd = PMD_SECT_BUFFERED, |
| 59 | .pte = PTE_BUFFERABLE, |
| 60 | }, { |
| 61 | .policy = "writethrough", |
| 62 | .cr_mask = 0, |
| 63 | .pmd = PMD_SECT_WT, |
| 64 | .pte = PTE_CACHEABLE, |
| 65 | }, { |
| 66 | .policy = "writeback", |
| 67 | .cr_mask = 0, |
| 68 | .pmd = PMD_SECT_WB, |
| 69 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, |
| 70 | }, { |
| 71 | .policy = "writealloc", |
| 72 | .cr_mask = 0, |
| 73 | .pmd = PMD_SECT_WBWA, |
| 74 | .pte = PTE_BUFFERABLE|PTE_CACHEABLE, |
| 75 | } |
| 76 | }; |
| 77 | |
| 78 | /* |
| 79 | * These are useful for identifing cache coherency |
| 80 | * problems by allowing the cache or the cache and |
| 81 | * writebuffer to be turned off. (Note: the write |
| 82 | * buffer should not be on and the cache off). |
| 83 | */ |
| 84 | static void __init early_cachepolicy(char **p) |
| 85 | { |
| 86 | int i; |
| 87 | |
| 88 | for (i = 0; i < ARRAY_SIZE(cache_policies); i++) { |
| 89 | int len = strlen(cache_policies[i].policy); |
| 90 | |
| 91 | if (memcmp(*p, cache_policies[i].policy, len) == 0) { |
| 92 | cachepolicy = i; |
| 93 | cr_alignment &= ~cache_policies[i].cr_mask; |
| 94 | cr_no_alignment &= ~cache_policies[i].cr_mask; |
| 95 | *p += len; |
| 96 | break; |
| 97 | } |
| 98 | } |
| 99 | if (i == ARRAY_SIZE(cache_policies)) |
| 100 | printk(KERN_ERR "ERROR: unknown or unsupported cache policy\n"); |
| 101 | flush_cache_all(); |
| 102 | set_cr(cr_alignment); |
| 103 | } |
| 104 | |
| 105 | static void __init early_nocache(char **__unused) |
| 106 | { |
| 107 | char *p = "buffered"; |
| 108 | printk(KERN_WARNING "nocache is deprecated; use cachepolicy=%s\n", p); |
| 109 | early_cachepolicy(&p); |
| 110 | } |
| 111 | |
| 112 | static void __init early_nowrite(char **__unused) |
| 113 | { |
| 114 | char *p = "uncached"; |
| 115 | printk(KERN_WARNING "nowb is deprecated; use cachepolicy=%s\n", p); |
| 116 | early_cachepolicy(&p); |
| 117 | } |
| 118 | |
| 119 | static void __init early_ecc(char **p) |
| 120 | { |
| 121 | if (memcmp(*p, "on", 2) == 0) { |
| 122 | ecc_mask = PMD_PROTECTION; |
| 123 | *p += 2; |
| 124 | } else if (memcmp(*p, "off", 3) == 0) { |
| 125 | ecc_mask = 0; |
| 126 | *p += 3; |
| 127 | } |
| 128 | } |
| 129 | |
| 130 | __early_param("nocache", early_nocache); |
| 131 | __early_param("nowb", early_nowrite); |
| 132 | __early_param("cachepolicy=", early_cachepolicy); |
| 133 | __early_param("ecc=", early_ecc); |
| 134 | |
| 135 | static int __init noalign_setup(char *__unused) |
| 136 | { |
| 137 | cr_alignment &= ~CR_A; |
| 138 | cr_no_alignment &= ~CR_A; |
| 139 | set_cr(cr_alignment); |
| 140 | return 1; |
| 141 | } |
| 142 | |
| 143 | __setup("noalign", noalign_setup); |
| 144 | |
| 145 | #define FIRST_KERNEL_PGD_NR (FIRST_USER_PGD_NR + USER_PTRS_PER_PGD) |
| 146 | |
Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 147 | static inline pmd_t *pmd_off(pgd_t *pgd, unsigned long virt) |
| 148 | { |
| 149 | return pmd_offset(pgd, virt); |
| 150 | } |
| 151 | |
| 152 | static inline pmd_t *pmd_off_k(unsigned long virt) |
| 153 | { |
| 154 | return pmd_off(pgd_offset_k(virt), virt); |
| 155 | } |
| 156 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 157 | /* |
| 158 | * need to get a 16k page for level 1 |
| 159 | */ |
| 160 | pgd_t *get_pgd_slow(struct mm_struct *mm) |
| 161 | { |
| 162 | pgd_t *new_pgd, *init_pgd; |
| 163 | pmd_t *new_pmd, *init_pmd; |
| 164 | pte_t *new_pte, *init_pte; |
| 165 | |
| 166 | new_pgd = (pgd_t *)__get_free_pages(GFP_KERNEL, 2); |
| 167 | if (!new_pgd) |
| 168 | goto no_pgd; |
| 169 | |
| 170 | memzero(new_pgd, FIRST_KERNEL_PGD_NR * sizeof(pgd_t)); |
| 171 | |
Russell King | a343e60 | 2005-06-27 14:08:56 +0100 | [diff] [blame] | 172 | /* |
| 173 | * Copy over the kernel and IO PGD entries |
| 174 | */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 175 | init_pgd = pgd_offset_k(0); |
Russell King | a343e60 | 2005-06-27 14:08:56 +0100 | [diff] [blame] | 176 | memcpy(new_pgd + FIRST_KERNEL_PGD_NR, init_pgd + FIRST_KERNEL_PGD_NR, |
| 177 | (PTRS_PER_PGD - FIRST_KERNEL_PGD_NR) * sizeof(pgd_t)); |
| 178 | |
| 179 | clean_dcache_area(new_pgd, PTRS_PER_PGD * sizeof(pgd_t)); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 180 | |
| 181 | if (!vectors_high()) { |
| 182 | /* |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 183 | * On ARM, first page must always be allocated since it |
| 184 | * contains the machine vectors. |
| 185 | */ |
| 186 | new_pmd = pmd_alloc(mm, new_pgd, 0); |
| 187 | if (!new_pmd) |
| 188 | goto no_pmd; |
| 189 | |
| 190 | new_pte = pte_alloc_map(mm, new_pmd, 0); |
| 191 | if (!new_pte) |
| 192 | goto no_pte; |
| 193 | |
| 194 | init_pmd = pmd_offset(init_pgd, 0); |
| 195 | init_pte = pte_offset_map_nested(init_pmd, 0); |
| 196 | set_pte(new_pte, *init_pte); |
| 197 | pte_unmap_nested(init_pte); |
| 198 | pte_unmap(new_pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 199 | } |
| 200 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 201 | return new_pgd; |
| 202 | |
| 203 | no_pte: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 204 | pmd_free(new_pmd); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 205 | no_pmd: |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 206 | free_pages((unsigned long)new_pgd, 2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 207 | no_pgd: |
| 208 | return NULL; |
| 209 | } |
| 210 | |
| 211 | void free_pgd_slow(pgd_t *pgd) |
| 212 | { |
| 213 | pmd_t *pmd; |
| 214 | struct page *pte; |
| 215 | |
| 216 | if (!pgd) |
| 217 | return; |
| 218 | |
| 219 | /* pgd is always present and good */ |
Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 220 | pmd = pmd_off(pgd, 0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 221 | if (pmd_none(*pmd)) |
| 222 | goto free; |
| 223 | if (pmd_bad(*pmd)) { |
| 224 | pmd_ERROR(*pmd); |
| 225 | pmd_clear(pmd); |
| 226 | goto free; |
| 227 | } |
| 228 | |
| 229 | pte = pmd_page(*pmd); |
| 230 | pmd_clear(pmd); |
| 231 | dec_page_state(nr_page_table_pages); |
Hugh Dickins | 4c21e2f | 2005-10-29 18:16:40 -0700 | [diff] [blame] | 232 | pte_lock_deinit(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 233 | pte_free(pte); |
| 234 | pmd_free(pmd); |
| 235 | free: |
| 236 | free_pages((unsigned long) pgd, 2); |
| 237 | } |
| 238 | |
| 239 | /* |
| 240 | * Create a SECTION PGD between VIRT and PHYS in domain |
| 241 | * DOMAIN with protection PROT. This operates on half- |
| 242 | * pgdir entry increments. |
| 243 | */ |
| 244 | static inline void |
| 245 | alloc_init_section(unsigned long virt, unsigned long phys, int prot) |
| 246 | { |
Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 247 | pmd_t *pmdp = pmd_off_k(virt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 248 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 249 | if (virt & (1 << 20)) |
| 250 | pmdp++; |
| 251 | |
| 252 | *pmdp = __pmd(phys | prot); |
| 253 | flush_pmd_entry(pmdp); |
| 254 | } |
| 255 | |
| 256 | /* |
| 257 | * Create a SUPER SECTION PGD between VIRT and PHYS with protection PROT |
| 258 | */ |
| 259 | static inline void |
| 260 | alloc_init_supersection(unsigned long virt, unsigned long phys, int prot) |
| 261 | { |
| 262 | int i; |
| 263 | |
| 264 | for (i = 0; i < 16; i += 1) { |
Deepak Saxena | 083bc6b | 2005-08-29 22:54:53 +0100 | [diff] [blame] | 265 | alloc_init_section(virt, phys, prot | PMD_SECT_SUPER); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 266 | |
| 267 | virt += (PGDIR_SIZE / 2); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 268 | } |
| 269 | } |
| 270 | |
| 271 | /* |
| 272 | * Add a PAGE mapping between VIRT and PHYS in domain |
| 273 | * DOMAIN with protection PROT. Note that due to the |
| 274 | * way we map the PTEs, we must allocate two PTE_SIZE'd |
| 275 | * blocks - one for the Linux pte table, and one for |
| 276 | * the hardware pte table. |
| 277 | */ |
| 278 | static inline void |
| 279 | alloc_init_page(unsigned long virt, unsigned long phys, unsigned int prot_l1, pgprot_t prot) |
| 280 | { |
Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 281 | pmd_t *pmdp = pmd_off_k(virt); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 282 | pte_t *ptep; |
| 283 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 284 | if (pmd_none(*pmdp)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 285 | ptep = alloc_bootmem_low_pages(2 * PTRS_PER_PTE * |
| 286 | sizeof(pte_t)); |
| 287 | |
Russell King | 08f4ffb | 2005-09-01 14:45:18 +0100 | [diff] [blame] | 288 | __pmd_populate(pmdp, __pa(ptep) | prot_l1); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 289 | } |
| 290 | ptep = pte_offset_kernel(pmdp, virt); |
| 291 | |
| 292 | set_pte(ptep, pfn_pte(phys >> PAGE_SHIFT, prot)); |
| 293 | } |
| 294 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 295 | struct mem_types { |
| 296 | unsigned int prot_pte; |
| 297 | unsigned int prot_l1; |
| 298 | unsigned int prot_sect; |
| 299 | unsigned int domain; |
| 300 | }; |
| 301 | |
| 302 | static struct mem_types mem_types[] __initdata = { |
| 303 | [MT_DEVICE] = { |
| 304 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 305 | L_PTE_WRITE, |
| 306 | .prot_l1 = PMD_TYPE_TABLE, |
| 307 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | |
| 308 | PMD_SECT_AP_WRITE, |
| 309 | .domain = DOMAIN_IO, |
| 310 | }, |
| 311 | [MT_CACHECLEAN] = { |
| 312 | .prot_sect = PMD_TYPE_SECT, |
| 313 | .domain = DOMAIN_KERNEL, |
| 314 | }, |
| 315 | [MT_MINICLEAN] = { |
| 316 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_MINICACHE, |
| 317 | .domain = DOMAIN_KERNEL, |
| 318 | }, |
| 319 | [MT_LOW_VECTORS] = { |
| 320 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 321 | L_PTE_EXEC, |
| 322 | .prot_l1 = PMD_TYPE_TABLE, |
| 323 | .domain = DOMAIN_USER, |
| 324 | }, |
| 325 | [MT_HIGH_VECTORS] = { |
| 326 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 327 | L_PTE_USER | L_PTE_EXEC, |
| 328 | .prot_l1 = PMD_TYPE_TABLE, |
| 329 | .domain = DOMAIN_USER, |
| 330 | }, |
| 331 | [MT_MEMORY] = { |
| 332 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_AP_WRITE, |
| 333 | .domain = DOMAIN_KERNEL, |
| 334 | }, |
| 335 | [MT_ROM] = { |
| 336 | .prot_sect = PMD_TYPE_SECT, |
| 337 | .domain = DOMAIN_KERNEL, |
| 338 | }, |
| 339 | [MT_IXP2000_DEVICE] = { /* IXP2400 requires XCB=101 for on-chip I/O */ |
| 340 | .prot_pte = L_PTE_PRESENT | L_PTE_YOUNG | L_PTE_DIRTY | |
| 341 | L_PTE_WRITE, |
| 342 | .prot_l1 = PMD_TYPE_TABLE, |
| 343 | .prot_sect = PMD_TYPE_SECT | PMD_SECT_UNCACHED | |
| 344 | PMD_SECT_AP_WRITE | PMD_SECT_BUFFERABLE | |
| 345 | PMD_SECT_TEX(1), |
| 346 | .domain = DOMAIN_IO, |
| 347 | } |
| 348 | }; |
| 349 | |
| 350 | /* |
| 351 | * Adjust the PMD section entries according to the CPU in use. |
| 352 | */ |
Russell King | 9007205 | 2005-10-28 14:48:37 +0100 | [diff] [blame] | 353 | void __init build_mem_type_table(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 354 | { |
| 355 | struct cachepolicy *cp; |
| 356 | unsigned int cr = get_cr(); |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 357 | unsigned int user_pgprot, kern_pgprot; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 358 | int cpu_arch = cpu_architecture(); |
| 359 | int i; |
| 360 | |
| 361 | #if defined(CONFIG_CPU_DCACHE_DISABLE) |
| 362 | if (cachepolicy > CPOLICY_BUFFERED) |
| 363 | cachepolicy = CPOLICY_BUFFERED; |
| 364 | #elif defined(CONFIG_CPU_DCACHE_WRITETHROUGH) |
| 365 | if (cachepolicy > CPOLICY_WRITETHROUGH) |
| 366 | cachepolicy = CPOLICY_WRITETHROUGH; |
| 367 | #endif |
| 368 | if (cpu_arch < CPU_ARCH_ARMv5) { |
| 369 | if (cachepolicy >= CPOLICY_WRITEALLOC) |
| 370 | cachepolicy = CPOLICY_WRITEBACK; |
| 371 | ecc_mask = 0; |
| 372 | } |
| 373 | |
Deepak Saxena | 8107338 | 2005-07-10 19:44:55 +0100 | [diff] [blame] | 374 | if (cpu_arch <= CPU_ARCH_ARMv5TEJ) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | for (i = 0; i < ARRAY_SIZE(mem_types); i++) { |
| 376 | if (mem_types[i].prot_l1) |
| 377 | mem_types[i].prot_l1 |= PMD_BIT4; |
| 378 | if (mem_types[i].prot_sect) |
| 379 | mem_types[i].prot_sect |= PMD_BIT4; |
| 380 | } |
| 381 | } |
| 382 | |
Russell King | 6626a70 | 2005-08-10 16:18:35 +0100 | [diff] [blame] | 383 | cp = &cache_policies[cachepolicy]; |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 384 | kern_pgprot = user_pgprot = cp->pte; |
Russell King | 6626a70 | 2005-08-10 16:18:35 +0100 | [diff] [blame] | 385 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 386 | /* |
| 387 | * ARMv6 and above have extended page tables. |
| 388 | */ |
| 389 | if (cpu_arch >= CPU_ARCH_ARMv6 && (cr & CR_XP)) { |
| 390 | /* |
| 391 | * bit 4 becomes XN which we must clear for the |
| 392 | * kernel memory mapping. |
| 393 | */ |
| 394 | mem_types[MT_MEMORY].prot_sect &= ~PMD_BIT4; |
| 395 | mem_types[MT_ROM].prot_sect &= ~PMD_BIT4; |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 396 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 397 | /* |
George G. Davis | ca31515 | 2005-04-29 22:08:35 +0100 | [diff] [blame] | 398 | * Mark cache clean areas and XIP ROM read only |
| 399 | * from SVC mode and no access from userspace. |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 400 | */ |
George G. Davis | ca31515 | 2005-04-29 22:08:35 +0100 | [diff] [blame] | 401 | mem_types[MT_ROM].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 402 | mem_types[MT_MINICLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
| 403 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_APX|PMD_SECT_AP_WRITE; |
Russell King | 186efd5 | 2005-07-26 19:51:26 +0100 | [diff] [blame] | 404 | |
Russell King | 6626a70 | 2005-08-10 16:18:35 +0100 | [diff] [blame] | 405 | /* |
| 406 | * Mark the device area as "shared device" |
| 407 | */ |
Russell King | 186efd5 | 2005-07-26 19:51:26 +0100 | [diff] [blame] | 408 | mem_types[MT_DEVICE].prot_pte |= L_PTE_BUFFERABLE; |
| 409 | mem_types[MT_DEVICE].prot_sect |= PMD_SECT_BUFFERED; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 410 | |
Russell King | 6626a70 | 2005-08-10 16:18:35 +0100 | [diff] [blame] | 411 | /* |
| 412 | * User pages need to be mapped with the ASID |
| 413 | * (iow, non-global) |
| 414 | */ |
| 415 | user_pgprot |= L_PTE_ASID; |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 416 | |
| 417 | #ifdef CONFIG_SMP |
| 418 | /* |
| 419 | * Mark memory with the "shared" attribute for SMP systems |
| 420 | */ |
| 421 | user_pgprot |= L_PTE_SHARED; |
| 422 | kern_pgprot |= L_PTE_SHARED; |
| 423 | mem_types[MT_MEMORY].prot_sect |= PMD_SECT_S; |
| 424 | #endif |
Russell King | 6626a70 | 2005-08-10 16:18:35 +0100 | [diff] [blame] | 425 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 426 | |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 427 | for (i = 0; i < 16; i++) { |
| 428 | unsigned long v = pgprot_val(protection_map[i]); |
| 429 | v = (v & ~(L_PTE_BUFFERABLE|L_PTE_CACHEABLE)) | user_pgprot; |
| 430 | protection_map[i] = __pgprot(v); |
| 431 | } |
| 432 | |
| 433 | mem_types[MT_LOW_VECTORS].prot_pte |= kern_pgprot; |
| 434 | mem_types[MT_HIGH_VECTORS].prot_pte |= kern_pgprot; |
| 435 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 436 | if (cpu_arch >= CPU_ARCH_ARMv5) { |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 437 | #ifndef CONFIG_SMP |
| 438 | /* |
| 439 | * Only use write-through for non-SMP systems |
| 440 | */ |
| 441 | mem_types[MT_LOW_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; |
| 442 | mem_types[MT_HIGH_VECTORS].prot_pte &= ~L_PTE_BUFFERABLE; |
| 443 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 444 | } else { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 445 | mem_types[MT_MINICLEAN].prot_sect &= ~PMD_SECT_TEX(1); |
| 446 | } |
| 447 | |
Russell King | cd03adb | 2005-11-07 10:10:28 +0000 | [diff] [blame] | 448 | pgprot_kernel = __pgprot(L_PTE_PRESENT | L_PTE_YOUNG | |
| 449 | L_PTE_DIRTY | L_PTE_WRITE | |
| 450 | L_PTE_EXEC | kern_pgprot); |
| 451 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 452 | mem_types[MT_LOW_VECTORS].prot_l1 |= ecc_mask; |
| 453 | mem_types[MT_HIGH_VECTORS].prot_l1 |= ecc_mask; |
| 454 | mem_types[MT_MEMORY].prot_sect |= ecc_mask | cp->pmd; |
| 455 | mem_types[MT_ROM].prot_sect |= cp->pmd; |
| 456 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 457 | switch (cp->pmd) { |
| 458 | case PMD_SECT_WT: |
| 459 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WT; |
| 460 | break; |
| 461 | case PMD_SECT_WB: |
| 462 | case PMD_SECT_WBWA: |
| 463 | mem_types[MT_CACHECLEAN].prot_sect |= PMD_SECT_WB; |
| 464 | break; |
| 465 | } |
| 466 | printk("Memory policy: ECC %sabled, Data cache %s\n", |
| 467 | ecc_mask ? "en" : "dis", cp->policy); |
| 468 | } |
| 469 | |
| 470 | #define vectors_base() (vectors_high() ? 0xffff0000 : 0) |
| 471 | |
| 472 | /* |
| 473 | * Create the page directory entries and any necessary |
| 474 | * page tables for the mapping specified by `md'. We |
| 475 | * are able to cope here with varying sizes and address |
| 476 | * offsets, and we take full advantage of sections and |
| 477 | * supersections. |
| 478 | */ |
Russell King | 9007205 | 2005-10-28 14:48:37 +0100 | [diff] [blame] | 479 | void __init create_mapping(struct map_desc *md) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 480 | { |
| 481 | unsigned long virt, length; |
| 482 | int prot_sect, prot_l1, domain; |
| 483 | pgprot_t prot_pte; |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 484 | unsigned long off = (u32)__pfn_to_phys(md->pfn); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 485 | |
| 486 | if (md->virtual != vectors_base() && md->virtual < TASK_SIZE) { |
| 487 | printk(KERN_WARNING "BUG: not creating mapping for " |
Nicolas Pitre | 24bcc2f | 2005-11-03 20:40:50 +0000 | [diff] [blame] | 488 | "0x%08llx at 0x%08lx in user region\n", |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 489 | __pfn_to_phys((u64)md->pfn), md->virtual); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 490 | return; |
| 491 | } |
| 492 | |
| 493 | if ((md->type == MT_DEVICE || md->type == MT_ROM) && |
| 494 | md->virtual >= PAGE_OFFSET && md->virtual < VMALLOC_END) { |
Nicolas Pitre | 24bcc2f | 2005-11-03 20:40:50 +0000 | [diff] [blame] | 495 | printk(KERN_WARNING "BUG: mapping for 0x%08llx at 0x%08lx " |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 496 | "overlaps vmalloc space\n", |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 497 | __pfn_to_phys((u64)md->pfn), md->virtual); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 498 | } |
| 499 | |
| 500 | domain = mem_types[md->type].domain; |
| 501 | prot_pte = __pgprot(mem_types[md->type].prot_pte); |
| 502 | prot_l1 = mem_types[md->type].prot_l1 | PMD_DOMAIN(domain); |
| 503 | prot_sect = mem_types[md->type].prot_sect | PMD_DOMAIN(domain); |
| 504 | |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 505 | /* |
| 506 | * Catch 36-bit addresses |
| 507 | */ |
| 508 | if(md->pfn >= 0x100000) { |
| 509 | if(domain) { |
| 510 | printk(KERN_ERR "MM: invalid domain in supersection " |
Nicolas Pitre | 24bcc2f | 2005-11-03 20:40:50 +0000 | [diff] [blame] | 511 | "mapping for 0x%08llx at 0x%08lx\n", |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 512 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| 513 | return; |
| 514 | } |
| 515 | if((md->virtual | md->length | __pfn_to_phys(md->pfn)) |
| 516 | & ~SUPERSECTION_MASK) { |
| 517 | printk(KERN_ERR "MM: cannot create mapping for " |
Nicolas Pitre | 24bcc2f | 2005-11-03 20:40:50 +0000 | [diff] [blame] | 518 | "0x%08llx at 0x%08lx invalid alignment\n", |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 519 | __pfn_to_phys((u64)md->pfn), md->virtual); |
| 520 | return; |
| 521 | } |
| 522 | |
| 523 | /* |
| 524 | * Shift bits [35:32] of address into bits [23:20] of PMD |
| 525 | * (See ARMv6 spec). |
| 526 | */ |
| 527 | off |= (((md->pfn >> (32 - PAGE_SHIFT)) & 0xF) << 20); |
| 528 | } |
| 529 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 530 | virt = md->virtual; |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 531 | off -= virt; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 532 | length = md->length; |
| 533 | |
| 534 | if (mem_types[md->type].prot_l1 == 0 && |
| 535 | (virt & 0xfffff || (virt + off) & 0xfffff || (virt + length) & 0xfffff)) { |
| 536 | printk(KERN_WARNING "BUG: map for 0x%08lx at 0x%08lx can not " |
| 537 | "be mapped using pages, ignoring.\n", |
Deepak Saxena | 9769c24 | 2005-10-28 15:19:11 +0100 | [diff] [blame] | 538 | __pfn_to_phys(md->pfn), md->virtual); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 539 | return; |
| 540 | } |
| 541 | |
| 542 | while ((virt & 0xfffff || (virt + off) & 0xfffff) && length >= PAGE_SIZE) { |
| 543 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); |
| 544 | |
| 545 | virt += PAGE_SIZE; |
| 546 | length -= PAGE_SIZE; |
| 547 | } |
| 548 | |
| 549 | /* N.B. ARMv6 supersections are only defined to work with domain 0. |
| 550 | * Since domain assignments can in fact be arbitrary, the |
| 551 | * 'domain == 0' check below is required to insure that ARMv6 |
| 552 | * supersections are only allocated for domain 0 regardless |
| 553 | * of the actual domain assignments in use. |
| 554 | */ |
| 555 | if (cpu_architecture() >= CPU_ARCH_ARMv6 && domain == 0) { |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 556 | /* |
| 557 | * Align to supersection boundary if !high pages. |
| 558 | * High pages have already been checked for proper |
| 559 | * alignment above and they will fail the SUPSERSECTION_MASK |
| 560 | * check because of the way the address is encoded into |
| 561 | * offset. |
| 562 | */ |
| 563 | if (md->pfn <= 0x100000) { |
| 564 | while ((virt & ~SUPERSECTION_MASK || |
| 565 | (virt + off) & ~SUPERSECTION_MASK) && |
| 566 | length >= (PGDIR_SIZE / 2)) { |
| 567 | alloc_init_section(virt, virt + off, prot_sect); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 568 | |
Deepak Saxena | 0b7cd62 | 2005-10-28 15:19:12 +0100 | [diff] [blame] | 569 | virt += (PGDIR_SIZE / 2); |
| 570 | length -= (PGDIR_SIZE / 2); |
| 571 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 572 | } |
| 573 | |
| 574 | while (length >= SUPERSECTION_SIZE) { |
| 575 | alloc_init_supersection(virt, virt + off, prot_sect); |
| 576 | |
| 577 | virt += SUPERSECTION_SIZE; |
| 578 | length -= SUPERSECTION_SIZE; |
| 579 | } |
| 580 | } |
| 581 | |
| 582 | /* |
| 583 | * A section mapping covers half a "pgdir" entry. |
| 584 | */ |
| 585 | while (length >= (PGDIR_SIZE / 2)) { |
| 586 | alloc_init_section(virt, virt + off, prot_sect); |
| 587 | |
| 588 | virt += (PGDIR_SIZE / 2); |
| 589 | length -= (PGDIR_SIZE / 2); |
| 590 | } |
| 591 | |
| 592 | while (length >= PAGE_SIZE) { |
| 593 | alloc_init_page(virt, virt + off, prot_l1, prot_pte); |
| 594 | |
| 595 | virt += PAGE_SIZE; |
| 596 | length -= PAGE_SIZE; |
| 597 | } |
| 598 | } |
| 599 | |
| 600 | /* |
| 601 | * In order to soft-boot, we need to insert a 1:1 mapping in place of |
| 602 | * the user-mode pages. This will then ensure that we have predictable |
| 603 | * results when turning the mmu off |
| 604 | */ |
| 605 | void setup_mm_for_reboot(char mode) |
| 606 | { |
Russell King | 103461a | 2005-09-01 14:51:59 +0100 | [diff] [blame] | 607 | unsigned long base_pmdval; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 608 | pgd_t *pgd; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 609 | int i; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 610 | |
| 611 | if (current->mm && current->mm->pgd) |
| 612 | pgd = current->mm->pgd; |
| 613 | else |
| 614 | pgd = init_mm.pgd; |
| 615 | |
Russell King | 103461a | 2005-09-01 14:51:59 +0100 | [diff] [blame] | 616 | base_pmdval = PMD_SECT_AP_WRITE | PMD_SECT_AP_READ | PMD_TYPE_SECT; |
| 617 | if (cpu_architecture() <= CPU_ARCH_ARMv5TEJ) |
| 618 | base_pmdval |= PMD_BIT4; |
| 619 | |
| 620 | for (i = 0; i < FIRST_USER_PGD_NR + USER_PTRS_PER_PGD; i++, pgd++) { |
| 621 | unsigned long pmdval = (i << PGDIR_SHIFT) | base_pmdval; |
| 622 | pmd_t *pmd; |
| 623 | |
Russell King | 155bb14 | 2005-05-09 20:52:51 +0100 | [diff] [blame] | 624 | pmd = pmd_off(pgd, i << PGDIR_SHIFT); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 625 | pmd[0] = __pmd(pmdval); |
| 626 | pmd[1] = __pmd(pmdval + (1 << (PGDIR_SHIFT - 1))); |
| 627 | flush_pmd_entry(pmd); |
| 628 | } |
| 629 | } |
| 630 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 631 | /* |
| 632 | * Create the architecture specific mappings |
| 633 | */ |
| 634 | void __init iotable_init(struct map_desc *io_desc, int nr) |
| 635 | { |
| 636 | int i; |
| 637 | |
| 638 | for (i = 0; i < nr; i++) |
| 639 | create_mapping(io_desc + i); |
| 640 | } |