Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1 | /* |
| 2 | * srmmu.c: SRMMU specific routines for memory management. |
| 3 | * |
| 4 | * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu) |
| 5 | * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com) |
| 6 | * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be) |
| 7 | * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz) |
| 8 | * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org) |
| 9 | */ |
| 10 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 11 | #include <linux/kernel.h> |
| 12 | #include <linux/mm.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 13 | #include <linux/vmalloc.h> |
| 14 | #include <linux/pagemap.h> |
| 15 | #include <linux/init.h> |
| 16 | #include <linux/spinlock.h> |
| 17 | #include <linux/bootmem.h> |
| 18 | #include <linux/fs.h> |
| 19 | #include <linux/seq_file.h> |
Christoph Hellwig | 1eeb66a | 2007-05-08 00:27:03 -0700 | [diff] [blame] | 20 | #include <linux/kdebug.h> |
Robert P. J. Day | 949e827 | 2009-04-24 03:58:24 +0000 | [diff] [blame] | 21 | #include <linux/log2.h> |
Tejun Heo | 5a0e3ad | 2010-03-24 17:04:11 +0900 | [diff] [blame] | 22 | #include <linux/gfp.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 23 | |
| 24 | #include <asm/bitext.h> |
| 25 | #include <asm/page.h> |
| 26 | #include <asm/pgalloc.h> |
| 27 | #include <asm/pgtable.h> |
| 28 | #include <asm/io.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 29 | #include <asm/vaddrs.h> |
| 30 | #include <asm/traps.h> |
| 31 | #include <asm/smp.h> |
| 32 | #include <asm/mbus.h> |
| 33 | #include <asm/cache.h> |
| 34 | #include <asm/oplib.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 35 | #include <asm/asi.h> |
| 36 | #include <asm/msi.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 37 | #include <asm/mmu_context.h> |
| 38 | #include <asm/io-unit.h> |
| 39 | #include <asm/cacheflush.h> |
| 40 | #include <asm/tlbflush.h> |
| 41 | |
| 42 | /* Now the cpu specific definitions. */ |
| 43 | #include <asm/viking.h> |
| 44 | #include <asm/mxcc.h> |
| 45 | #include <asm/ross.h> |
| 46 | #include <asm/tsunami.h> |
| 47 | #include <asm/swift.h> |
| 48 | #include <asm/turbosparc.h> |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 49 | #include <asm/leon.h> |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 50 | |
| 51 | #include <asm/btfixup.h> |
| 52 | |
| 53 | enum mbus_module srmmu_modtype; |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 54 | static unsigned int hwbug_bitmask; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 55 | int vac_cache_size; |
| 56 | int vac_line_size; |
| 57 | |
| 58 | extern struct resource sparc_iomap; |
| 59 | |
| 60 | extern unsigned long last_valid_pfn; |
| 61 | |
| 62 | extern unsigned long page_kernel; |
| 63 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 64 | static pgd_t *srmmu_swapper_pg_dir; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 65 | |
| 66 | #ifdef CONFIG_SMP |
| 67 | #define FLUSH_BEGIN(mm) |
| 68 | #define FLUSH_END |
| 69 | #else |
| 70 | #define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) { |
| 71 | #define FLUSH_END } |
| 72 | #endif |
| 73 | |
| 74 | BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long) |
| 75 | #define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page) |
| 76 | |
| 77 | int flush_page_for_dma_global = 1; |
| 78 | |
| 79 | #ifdef CONFIG_SMP |
| 80 | BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long) |
| 81 | #define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page) |
| 82 | #endif |
| 83 | |
| 84 | char *srmmu_name; |
| 85 | |
| 86 | ctxd_t *srmmu_ctx_table_phys; |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 87 | static ctxd_t *srmmu_context_table; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 88 | |
| 89 | int viking_mxcc_present; |
| 90 | static DEFINE_SPINLOCK(srmmu_context_spinlock); |
| 91 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 92 | static int is_hypersparc; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 93 | |
| 94 | /* |
| 95 | * In general all page table modifications should use the V8 atomic |
| 96 | * swap instruction. This insures the mmu and the cpu are in sync |
| 97 | * with respect to ref/mod bits in the page tables. |
| 98 | */ |
| 99 | static inline unsigned long srmmu_swap(unsigned long *addr, unsigned long value) |
| 100 | { |
| 101 | __asm__ __volatile__("swap [%2], %0" : "=&r" (value) : "0" (value), "r" (addr)); |
| 102 | return value; |
| 103 | } |
| 104 | |
| 105 | static inline void srmmu_set_pte(pte_t *ptep, pte_t pteval) |
| 106 | { |
| 107 | srmmu_swap((unsigned long *)ptep, pte_val(pteval)); |
| 108 | } |
| 109 | |
| 110 | /* The very generic SRMMU page table operations. */ |
| 111 | static inline int srmmu_device_memory(unsigned long x) |
| 112 | { |
| 113 | return ((x & 0xF0000000) != 0); |
| 114 | } |
| 115 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 116 | static int srmmu_cache_pagetables; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 117 | |
| 118 | /* these will be initialized in srmmu_nocache_calcsize() */ |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 119 | static unsigned long srmmu_nocache_size; |
| 120 | static unsigned long srmmu_nocache_end; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 121 | |
| 122 | /* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */ |
| 123 | #define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4) |
| 124 | |
| 125 | /* The context table is a nocache user with the biggest alignment needs. */ |
| 126 | #define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS) |
| 127 | |
| 128 | void *srmmu_nocache_pool; |
| 129 | void *srmmu_nocache_bitmap; |
| 130 | static struct bit_map srmmu_nocache_map; |
| 131 | |
| 132 | static unsigned long srmmu_pte_pfn(pte_t pte) |
| 133 | { |
| 134 | if (srmmu_device_memory(pte_val(pte))) { |
| 135 | /* Just return something that will cause |
| 136 | * pfn_valid() to return false. This makes |
| 137 | * copy_one_pte() to just directly copy to |
| 138 | * PTE over. |
| 139 | */ |
| 140 | return ~0UL; |
| 141 | } |
| 142 | return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4); |
| 143 | } |
| 144 | |
| 145 | static struct page *srmmu_pmd_page(pmd_t pmd) |
| 146 | { |
| 147 | |
| 148 | if (srmmu_device_memory(pmd_val(pmd))) |
| 149 | BUG(); |
| 150 | return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4)); |
| 151 | } |
| 152 | |
| 153 | static inline unsigned long srmmu_pgd_page(pgd_t pgd) |
| 154 | { return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); } |
| 155 | |
| 156 | |
| 157 | static inline int srmmu_pte_none(pte_t pte) |
| 158 | { return !(pte_val(pte) & 0xFFFFFFF); } |
| 159 | |
| 160 | static inline int srmmu_pte_present(pte_t pte) |
| 161 | { return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); } |
| 162 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 163 | static inline void srmmu_pte_clear(pte_t *ptep) |
| 164 | { srmmu_set_pte(ptep, __pte(0)); } |
| 165 | |
| 166 | static inline int srmmu_pmd_none(pmd_t pmd) |
| 167 | { return !(pmd_val(pmd) & 0xFFFFFFF); } |
| 168 | |
| 169 | static inline int srmmu_pmd_bad(pmd_t pmd) |
| 170 | { return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } |
| 171 | |
| 172 | static inline int srmmu_pmd_present(pmd_t pmd) |
| 173 | { return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } |
| 174 | |
| 175 | static inline void srmmu_pmd_clear(pmd_t *pmdp) { |
| 176 | int i; |
| 177 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) |
| 178 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], __pte(0)); |
| 179 | } |
| 180 | |
| 181 | static inline int srmmu_pgd_none(pgd_t pgd) |
| 182 | { return !(pgd_val(pgd) & 0xFFFFFFF); } |
| 183 | |
| 184 | static inline int srmmu_pgd_bad(pgd_t pgd) |
| 185 | { return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; } |
| 186 | |
| 187 | static inline int srmmu_pgd_present(pgd_t pgd) |
| 188 | { return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); } |
| 189 | |
| 190 | static inline void srmmu_pgd_clear(pgd_t * pgdp) |
| 191 | { srmmu_set_pte((pte_t *)pgdp, __pte(0)); } |
| 192 | |
| 193 | static inline pte_t srmmu_pte_wrprotect(pte_t pte) |
| 194 | { return __pte(pte_val(pte) & ~SRMMU_WRITE);} |
| 195 | |
| 196 | static inline pte_t srmmu_pte_mkclean(pte_t pte) |
| 197 | { return __pte(pte_val(pte) & ~SRMMU_DIRTY);} |
| 198 | |
| 199 | static inline pte_t srmmu_pte_mkold(pte_t pte) |
| 200 | { return __pte(pte_val(pte) & ~SRMMU_REF);} |
| 201 | |
| 202 | static inline pte_t srmmu_pte_mkwrite(pte_t pte) |
| 203 | { return __pte(pte_val(pte) | SRMMU_WRITE);} |
| 204 | |
| 205 | static inline pte_t srmmu_pte_mkdirty(pte_t pte) |
| 206 | { return __pte(pte_val(pte) | SRMMU_DIRTY);} |
| 207 | |
| 208 | static inline pte_t srmmu_pte_mkyoung(pte_t pte) |
| 209 | { return __pte(pte_val(pte) | SRMMU_REF);} |
| 210 | |
| 211 | /* |
| 212 | * Conversion functions: convert a page and protection to a page entry, |
| 213 | * and a page entry and page directory to the page they refer to. |
| 214 | */ |
| 215 | static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot) |
| 216 | { return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); } |
| 217 | |
| 218 | static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot) |
| 219 | { return __pte(((page) >> 4) | pgprot_val(pgprot)); } |
| 220 | |
| 221 | static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space) |
| 222 | { return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); } |
| 223 | |
| 224 | /* XXX should we hyper_flush_whole_icache here - Anton */ |
| 225 | static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp) |
| 226 | { srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); } |
| 227 | |
| 228 | static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp) |
| 229 | { srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); } |
| 230 | |
| 231 | static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep) |
| 232 | { |
| 233 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
| 234 | int i; |
| 235 | |
| 236 | ptp = __nocache_pa((unsigned long) ptep) >> 4; |
| 237 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
| 238 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); |
| 239 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); |
| 240 | } |
| 241 | } |
| 242 | |
| 243 | static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep) |
| 244 | { |
| 245 | unsigned long ptp; /* Physical address, shifted right by 4 */ |
| 246 | int i; |
| 247 | |
| 248 | ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */ |
| 249 | for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) { |
| 250 | srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp); |
| 251 | ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4); |
| 252 | } |
| 253 | } |
| 254 | |
| 255 | static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot) |
| 256 | { return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); } |
| 257 | |
| 258 | /* to find an entry in a top-level page table... */ |
Adrian Bunk | 3115624 | 2005-10-03 17:37:02 -0700 | [diff] [blame] | 259 | static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 260 | { return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); } |
| 261 | |
| 262 | /* Find an entry in the second-level page table.. */ |
| 263 | static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address) |
| 264 | { |
| 265 | return (pmd_t *) srmmu_pgd_page(*dir) + |
| 266 | ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1)); |
| 267 | } |
| 268 | |
| 269 | /* Find an entry in the third-level page table.. */ |
| 270 | static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address) |
| 271 | { |
| 272 | void *pte; |
| 273 | |
| 274 | pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4); |
| 275 | return (pte_t *) pte + |
| 276 | ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1)); |
| 277 | } |
| 278 | |
| 279 | static unsigned long srmmu_swp_type(swp_entry_t entry) |
| 280 | { |
| 281 | return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK; |
| 282 | } |
| 283 | |
| 284 | static unsigned long srmmu_swp_offset(swp_entry_t entry) |
| 285 | { |
| 286 | return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK; |
| 287 | } |
| 288 | |
| 289 | static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset) |
| 290 | { |
| 291 | return (swp_entry_t) { |
| 292 | (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT |
| 293 | | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT }; |
| 294 | } |
| 295 | |
| 296 | /* |
| 297 | * size: bytes to allocate in the nocache area. |
| 298 | * align: bytes, number to align at. |
| 299 | * Returns the virtual address of the allocated area. |
| 300 | */ |
| 301 | static unsigned long __srmmu_get_nocache(int size, int align) |
| 302 | { |
| 303 | int offset; |
| 304 | |
| 305 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { |
| 306 | printk("Size 0x%x too small for nocache request\n", size); |
| 307 | size = SRMMU_NOCACHE_BITMAP_SHIFT; |
| 308 | } |
| 309 | if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) { |
| 310 | printk("Size 0x%x unaligned int nocache request\n", size); |
| 311 | size += SRMMU_NOCACHE_BITMAP_SHIFT-1; |
| 312 | } |
| 313 | BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX); |
| 314 | |
| 315 | offset = bit_map_string_get(&srmmu_nocache_map, |
| 316 | size >> SRMMU_NOCACHE_BITMAP_SHIFT, |
| 317 | align >> SRMMU_NOCACHE_BITMAP_SHIFT); |
| 318 | if (offset == -1) { |
| 319 | printk("srmmu: out of nocache %d: %d/%d\n", |
| 320 | size, (int) srmmu_nocache_size, |
| 321 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
| 322 | return 0; |
| 323 | } |
| 324 | |
| 325 | return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT)); |
| 326 | } |
| 327 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 328 | static unsigned long srmmu_get_nocache(int size, int align) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 329 | { |
| 330 | unsigned long tmp; |
| 331 | |
| 332 | tmp = __srmmu_get_nocache(size, align); |
| 333 | |
| 334 | if (tmp) |
| 335 | memset((void *)tmp, 0, size); |
| 336 | |
| 337 | return tmp; |
| 338 | } |
| 339 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 340 | static void srmmu_free_nocache(unsigned long vaddr, int size) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 341 | { |
| 342 | int offset; |
| 343 | |
| 344 | if (vaddr < SRMMU_NOCACHE_VADDR) { |
| 345 | printk("Vaddr %lx is smaller than nocache base 0x%lx\n", |
| 346 | vaddr, (unsigned long)SRMMU_NOCACHE_VADDR); |
| 347 | BUG(); |
| 348 | } |
| 349 | if (vaddr+size > srmmu_nocache_end) { |
| 350 | printk("Vaddr %lx is bigger than nocache end 0x%lx\n", |
| 351 | vaddr, srmmu_nocache_end); |
| 352 | BUG(); |
| 353 | } |
Robert P. J. Day | 949e827 | 2009-04-24 03:58:24 +0000 | [diff] [blame] | 354 | if (!is_power_of_2(size)) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 355 | printk("Size 0x%x is not a power of 2\n", size); |
| 356 | BUG(); |
| 357 | } |
| 358 | if (size < SRMMU_NOCACHE_BITMAP_SHIFT) { |
| 359 | printk("Size 0x%x is too small\n", size); |
| 360 | BUG(); |
| 361 | } |
| 362 | if (vaddr & (size-1)) { |
| 363 | printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size); |
| 364 | BUG(); |
| 365 | } |
| 366 | |
| 367 | offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT; |
| 368 | size = size >> SRMMU_NOCACHE_BITMAP_SHIFT; |
| 369 | |
| 370 | bit_map_clear(&srmmu_nocache_map, offset, size); |
| 371 | } |
| 372 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 373 | static void srmmu_early_allocate_ptable_skeleton(unsigned long start, |
| 374 | unsigned long end); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 375 | |
| 376 | extern unsigned long probe_memory(void); /* in fault.c */ |
| 377 | |
| 378 | /* |
| 379 | * Reserve nocache dynamically proportionally to the amount of |
| 380 | * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002 |
| 381 | */ |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 382 | static void srmmu_nocache_calcsize(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 383 | { |
| 384 | unsigned long sysmemavail = probe_memory() / 1024; |
| 385 | int srmmu_nocache_npages; |
| 386 | |
| 387 | srmmu_nocache_npages = |
| 388 | sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256; |
| 389 | |
| 390 | /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */ |
| 391 | // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256; |
| 392 | if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES) |
| 393 | srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES; |
| 394 | |
| 395 | /* anything above 1280 blows up */ |
| 396 | if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES) |
| 397 | srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES; |
| 398 | |
| 399 | srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE; |
| 400 | srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size; |
| 401 | } |
| 402 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 403 | static void __init srmmu_nocache_init(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 404 | { |
| 405 | unsigned int bitmap_bits; |
| 406 | pgd_t *pgd; |
| 407 | pmd_t *pmd; |
| 408 | pte_t *pte; |
| 409 | unsigned long paddr, vaddr; |
| 410 | unsigned long pteval; |
| 411 | |
| 412 | bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT; |
| 413 | |
| 414 | srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size, |
| 415 | SRMMU_NOCACHE_ALIGN_MAX, 0UL); |
| 416 | memset(srmmu_nocache_pool, 0, srmmu_nocache_size); |
| 417 | |
| 418 | srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL); |
| 419 | bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits); |
| 420 | |
| 421 | srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
| 422 | memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE); |
| 423 | init_mm.pgd = srmmu_swapper_pg_dir; |
| 424 | |
| 425 | srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end); |
| 426 | |
| 427 | paddr = __pa((unsigned long)srmmu_nocache_pool); |
| 428 | vaddr = SRMMU_NOCACHE_VADDR; |
| 429 | |
| 430 | while (vaddr < srmmu_nocache_end) { |
| 431 | pgd = pgd_offset_k(vaddr); |
| 432 | pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr); |
| 433 | pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr); |
| 434 | |
| 435 | pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV); |
| 436 | |
| 437 | if (srmmu_cache_pagetables) |
| 438 | pteval |= SRMMU_CACHE; |
| 439 | |
| 440 | srmmu_set_pte(__nocache_fix(pte), __pte(pteval)); |
| 441 | |
| 442 | vaddr += PAGE_SIZE; |
| 443 | paddr += PAGE_SIZE; |
| 444 | } |
| 445 | |
| 446 | flush_cache_all(); |
| 447 | flush_tlb_all(); |
| 448 | } |
| 449 | |
| 450 | static inline pgd_t *srmmu_get_pgd_fast(void) |
| 451 | { |
| 452 | pgd_t *pgd = NULL; |
| 453 | |
| 454 | pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE); |
| 455 | if (pgd) { |
| 456 | pgd_t *init = pgd_offset_k(0); |
| 457 | memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t)); |
| 458 | memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD, |
| 459 | (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t)); |
| 460 | } |
| 461 | |
| 462 | return pgd; |
| 463 | } |
| 464 | |
| 465 | static void srmmu_free_pgd_fast(pgd_t *pgd) |
| 466 | { |
| 467 | srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE); |
| 468 | } |
| 469 | |
| 470 | static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address) |
| 471 | { |
| 472 | return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
| 473 | } |
| 474 | |
| 475 | static void srmmu_pmd_free(pmd_t * pmd) |
| 476 | { |
| 477 | srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE); |
| 478 | } |
| 479 | |
| 480 | /* |
| 481 | * Hardware needs alignment to 256 only, but we align to whole page size |
| 482 | * to reduce fragmentation problems due to the buddy principle. |
| 483 | * XXX Provide actual fragmentation statistics in /proc. |
| 484 | * |
| 485 | * Alignments up to the page size are the same for physical and virtual |
| 486 | * addresses of the nocache area. |
| 487 | */ |
| 488 | static pte_t * |
| 489 | srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address) |
| 490 | { |
| 491 | return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
| 492 | } |
| 493 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 494 | static pgtable_t |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 495 | srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address) |
| 496 | { |
| 497 | unsigned long pte; |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 498 | struct page *page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 499 | |
| 500 | if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0) |
| 501 | return NULL; |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 502 | page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT ); |
| 503 | pgtable_page_ctor(page); |
| 504 | return page; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 505 | } |
| 506 | |
| 507 | static void srmmu_free_pte_fast(pte_t *pte) |
| 508 | { |
| 509 | srmmu_free_nocache((unsigned long)pte, PTE_SIZE); |
| 510 | } |
| 511 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 512 | static void srmmu_pte_free(pgtable_t pte) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 513 | { |
| 514 | unsigned long p; |
| 515 | |
Martin Schwidefsky | 2f569af | 2008-02-08 04:22:04 -0800 | [diff] [blame] | 516 | pgtable_page_dtor(pte); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 517 | p = (unsigned long)page_address(pte); /* Cached address (for test) */ |
| 518 | if (p == 0) |
| 519 | BUG(); |
| 520 | p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */ |
| 521 | p = (unsigned long) __nocache_va(p); /* Nocached virtual */ |
| 522 | srmmu_free_nocache(p, PTE_SIZE); |
| 523 | } |
| 524 | |
| 525 | /* |
| 526 | */ |
| 527 | static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm) |
| 528 | { |
| 529 | struct ctx_list *ctxp; |
| 530 | |
| 531 | ctxp = ctx_free.next; |
| 532 | if(ctxp != &ctx_free) { |
| 533 | remove_from_ctx_list(ctxp); |
| 534 | add_to_used_ctxlist(ctxp); |
| 535 | mm->context = ctxp->ctx_number; |
| 536 | ctxp->ctx_mm = mm; |
| 537 | return; |
| 538 | } |
| 539 | ctxp = ctx_used.next; |
| 540 | if(ctxp->ctx_mm == old_mm) |
| 541 | ctxp = ctxp->next; |
| 542 | if(ctxp == &ctx_used) |
| 543 | panic("out of mmu contexts"); |
| 544 | flush_cache_mm(ctxp->ctx_mm); |
| 545 | flush_tlb_mm(ctxp->ctx_mm); |
| 546 | remove_from_ctx_list(ctxp); |
| 547 | add_to_used_ctxlist(ctxp); |
| 548 | ctxp->ctx_mm->context = NO_CONTEXT; |
| 549 | ctxp->ctx_mm = mm; |
| 550 | mm->context = ctxp->ctx_number; |
| 551 | } |
| 552 | |
| 553 | static inline void free_context(int context) |
| 554 | { |
| 555 | struct ctx_list *ctx_old; |
| 556 | |
| 557 | ctx_old = ctx_list_pool + context; |
| 558 | remove_from_ctx_list(ctx_old); |
| 559 | add_to_free_ctxlist(ctx_old); |
| 560 | } |
| 561 | |
| 562 | |
| 563 | static void srmmu_switch_mm(struct mm_struct *old_mm, struct mm_struct *mm, |
| 564 | struct task_struct *tsk, int cpu) |
| 565 | { |
| 566 | if(mm->context == NO_CONTEXT) { |
| 567 | spin_lock(&srmmu_context_spinlock); |
| 568 | alloc_context(old_mm, mm); |
| 569 | spin_unlock(&srmmu_context_spinlock); |
| 570 | srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd); |
| 571 | } |
| 572 | |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 573 | if (sparc_cpu_model == sparc_leon) |
| 574 | leon_switch_mm(); |
| 575 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 576 | if (is_hypersparc) |
| 577 | hyper_flush_whole_icache(); |
| 578 | |
| 579 | srmmu_set_context(mm->context); |
| 580 | } |
| 581 | |
| 582 | /* Low level IO area allocation on the SRMMU. */ |
| 583 | static inline void srmmu_mapioaddr(unsigned long physaddr, |
| 584 | unsigned long virt_addr, int bus_type) |
| 585 | { |
| 586 | pgd_t *pgdp; |
| 587 | pmd_t *pmdp; |
| 588 | pte_t *ptep; |
| 589 | unsigned long tmp; |
| 590 | |
| 591 | physaddr &= PAGE_MASK; |
| 592 | pgdp = pgd_offset_k(virt_addr); |
| 593 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); |
| 594 | ptep = srmmu_pte_offset(pmdp, virt_addr); |
| 595 | tmp = (physaddr >> 4) | SRMMU_ET_PTE; |
| 596 | |
| 597 | /* |
| 598 | * I need to test whether this is consistent over all |
| 599 | * sun4m's. The bus_type represents the upper 4 bits of |
| 600 | * 36-bit physical address on the I/O space lines... |
| 601 | */ |
| 602 | tmp |= (bus_type << 28); |
| 603 | tmp |= SRMMU_PRIV; |
| 604 | __flush_page_to_ram(virt_addr); |
| 605 | srmmu_set_pte(ptep, __pte(tmp)); |
| 606 | } |
| 607 | |
| 608 | static void srmmu_mapiorange(unsigned int bus, unsigned long xpa, |
| 609 | unsigned long xva, unsigned int len) |
| 610 | { |
| 611 | while (len != 0) { |
| 612 | len -= PAGE_SIZE; |
| 613 | srmmu_mapioaddr(xpa, xva, bus); |
| 614 | xva += PAGE_SIZE; |
| 615 | xpa += PAGE_SIZE; |
| 616 | } |
| 617 | flush_tlb_all(); |
| 618 | } |
| 619 | |
| 620 | static inline void srmmu_unmapioaddr(unsigned long virt_addr) |
| 621 | { |
| 622 | pgd_t *pgdp; |
| 623 | pmd_t *pmdp; |
| 624 | pte_t *ptep; |
| 625 | |
| 626 | pgdp = pgd_offset_k(virt_addr); |
| 627 | pmdp = srmmu_pmd_offset(pgdp, virt_addr); |
| 628 | ptep = srmmu_pte_offset(pmdp, virt_addr); |
| 629 | |
| 630 | /* No need to flush uncacheable page. */ |
| 631 | srmmu_pte_clear(ptep); |
| 632 | } |
| 633 | |
| 634 | static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len) |
| 635 | { |
| 636 | while (len != 0) { |
| 637 | len -= PAGE_SIZE; |
| 638 | srmmu_unmapioaddr(virt_addr); |
| 639 | virt_addr += PAGE_SIZE; |
| 640 | } |
| 641 | flush_tlb_all(); |
| 642 | } |
| 643 | |
| 644 | /* |
| 645 | * On the SRMMU we do not have the problems with limited tlb entries |
| 646 | * for mapping kernel pages, so we just take things from the free page |
| 647 | * pool. As a side effect we are putting a little too much pressure |
| 648 | * on the gfp() subsystem. This setup also makes the logic of the |
| 649 | * iommu mapping code a lot easier as we can transparently handle |
| 650 | * mappings on the kernel stack without any special code as we did |
| 651 | * need on the sun4c. |
| 652 | */ |
Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 653 | static struct thread_info *srmmu_alloc_thread_info_node(int node) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 654 | { |
| 655 | struct thread_info *ret; |
| 656 | |
| 657 | ret = (struct thread_info *)__get_free_pages(GFP_KERNEL, |
| 658 | THREAD_INFO_ORDER); |
| 659 | #ifdef CONFIG_DEBUG_STACK_USAGE |
| 660 | if (ret) |
| 661 | memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER); |
| 662 | #endif /* DEBUG_STACK_USAGE */ |
| 663 | |
| 664 | return ret; |
| 665 | } |
| 666 | |
| 667 | static void srmmu_free_thread_info(struct thread_info *ti) |
| 668 | { |
| 669 | free_pages((unsigned long)ti, THREAD_INFO_ORDER); |
| 670 | } |
| 671 | |
| 672 | /* tsunami.S */ |
| 673 | extern void tsunami_flush_cache_all(void); |
| 674 | extern void tsunami_flush_cache_mm(struct mm_struct *mm); |
| 675 | extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 676 | extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
| 677 | extern void tsunami_flush_page_to_ram(unsigned long page); |
| 678 | extern void tsunami_flush_page_for_dma(unsigned long page); |
| 679 | extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
| 680 | extern void tsunami_flush_tlb_all(void); |
| 681 | extern void tsunami_flush_tlb_mm(struct mm_struct *mm); |
| 682 | extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 683 | extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
| 684 | extern void tsunami_setup_blockops(void); |
| 685 | |
| 686 | /* |
| 687 | * Workaround, until we find what's going on with Swift. When low on memory, |
| 688 | * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find |
| 689 | * out it is already in page tables/ fault again on the same instruction. |
| 690 | * I really don't understand it, have checked it and contexts |
| 691 | * are right, flush_tlb_all is done as well, and it faults again... |
| 692 | * Strange. -jj |
| 693 | * |
| 694 | * The following code is a deadwood that may be necessary when |
| 695 | * we start to make precise page flushes again. --zaitcev |
| 696 | */ |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 697 | static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 698 | { |
| 699 | #if 0 |
| 700 | static unsigned long last; |
| 701 | unsigned int val; |
| 702 | /* unsigned int n; */ |
| 703 | |
| 704 | if (address == last) { |
| 705 | val = srmmu_hwprobe(address); |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 706 | if (val != 0 && pte_val(*ptep) != val) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 707 | printk("swift_update_mmu_cache: " |
| 708 | "addr %lx put %08x probed %08x from %p\n", |
Russell King | 4b3073e | 2009-12-18 16:40:18 +0000 | [diff] [blame] | 709 | address, pte_val(*ptep), val, |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 710 | __builtin_return_address(0)); |
| 711 | srmmu_flush_whole_tlb(); |
| 712 | } |
| 713 | } |
| 714 | last = address; |
| 715 | #endif |
| 716 | } |
| 717 | |
| 718 | /* swift.S */ |
| 719 | extern void swift_flush_cache_all(void); |
| 720 | extern void swift_flush_cache_mm(struct mm_struct *mm); |
| 721 | extern void swift_flush_cache_range(struct vm_area_struct *vma, |
| 722 | unsigned long start, unsigned long end); |
| 723 | extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
| 724 | extern void swift_flush_page_to_ram(unsigned long page); |
| 725 | extern void swift_flush_page_for_dma(unsigned long page); |
| 726 | extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
| 727 | extern void swift_flush_tlb_all(void); |
| 728 | extern void swift_flush_tlb_mm(struct mm_struct *mm); |
| 729 | extern void swift_flush_tlb_range(struct vm_area_struct *vma, |
| 730 | unsigned long start, unsigned long end); |
| 731 | extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
| 732 | |
| 733 | #if 0 /* P3: deadwood to debug precise flushes on Swift. */ |
| 734 | void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 735 | { |
| 736 | int cctx, ctx1; |
| 737 | |
| 738 | page &= PAGE_MASK; |
| 739 | if ((ctx1 = vma->vm_mm->context) != -1) { |
| 740 | cctx = srmmu_get_context(); |
| 741 | /* Is context # ever different from current context? P3 */ |
| 742 | if (cctx != ctx1) { |
| 743 | printk("flush ctx %02x curr %02x\n", ctx1, cctx); |
| 744 | srmmu_set_context(ctx1); |
| 745 | swift_flush_page(page); |
| 746 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
| 747 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); |
| 748 | srmmu_set_context(cctx); |
| 749 | } else { |
| 750 | /* Rm. prot. bits from virt. c. */ |
| 751 | /* swift_flush_cache_all(); */ |
| 752 | /* swift_flush_cache_page(vma, page); */ |
| 753 | swift_flush_page(page); |
| 754 | |
| 755 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : : |
| 756 | "r" (page), "i" (ASI_M_FLUSH_PROBE)); |
| 757 | /* same as above: srmmu_flush_tlb_page() */ |
| 758 | } |
| 759 | } |
| 760 | } |
| 761 | #endif |
| 762 | |
| 763 | /* |
| 764 | * The following are all MBUS based SRMMU modules, and therefore could |
| 765 | * be found in a multiprocessor configuration. On the whole, these |
| 766 | * chips seems to be much more touchy about DVMA and page tables |
| 767 | * with respect to cache coherency. |
| 768 | */ |
| 769 | |
| 770 | /* Cypress flushes. */ |
| 771 | static void cypress_flush_cache_all(void) |
| 772 | { |
| 773 | volatile unsigned long cypress_sucks; |
| 774 | unsigned long faddr, tagval; |
| 775 | |
| 776 | flush_user_windows(); |
| 777 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { |
| 778 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : |
| 779 | "=r" (tagval) : |
| 780 | "r" (faddr), "r" (0x40000), |
| 781 | "i" (ASI_M_DATAC_TAG)); |
| 782 | |
| 783 | /* If modified and valid, kick it. */ |
| 784 | if((tagval & 0x60) == 0x60) |
| 785 | cypress_sucks = *(unsigned long *)(0xf0020000 + faddr); |
| 786 | } |
| 787 | } |
| 788 | |
| 789 | static void cypress_flush_cache_mm(struct mm_struct *mm) |
| 790 | { |
| 791 | register unsigned long a, b, c, d, e, f, g; |
| 792 | unsigned long flags, faddr; |
| 793 | int octx; |
| 794 | |
| 795 | FLUSH_BEGIN(mm) |
| 796 | flush_user_windows(); |
| 797 | local_irq_save(flags); |
| 798 | octx = srmmu_get_context(); |
| 799 | srmmu_set_context(mm->context); |
| 800 | a = 0x20; b = 0x40; c = 0x60; |
| 801 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
| 802 | |
| 803 | faddr = (0x10000 - 0x100); |
| 804 | goto inside; |
| 805 | do { |
| 806 | faddr -= 0x100; |
| 807 | inside: |
| 808 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
| 809 | "sta %%g0, [%0 + %2] %1\n\t" |
| 810 | "sta %%g0, [%0 + %3] %1\n\t" |
| 811 | "sta %%g0, [%0 + %4] %1\n\t" |
| 812 | "sta %%g0, [%0 + %5] %1\n\t" |
| 813 | "sta %%g0, [%0 + %6] %1\n\t" |
| 814 | "sta %%g0, [%0 + %7] %1\n\t" |
| 815 | "sta %%g0, [%0 + %8] %1\n\t" : : |
| 816 | "r" (faddr), "i" (ASI_M_FLUSH_CTX), |
| 817 | "r" (a), "r" (b), "r" (c), "r" (d), |
| 818 | "r" (e), "r" (f), "r" (g)); |
| 819 | } while(faddr); |
| 820 | srmmu_set_context(octx); |
| 821 | local_irq_restore(flags); |
| 822 | FLUSH_END |
| 823 | } |
| 824 | |
| 825 | static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 826 | { |
| 827 | struct mm_struct *mm = vma->vm_mm; |
| 828 | register unsigned long a, b, c, d, e, f, g; |
| 829 | unsigned long flags, faddr; |
| 830 | int octx; |
| 831 | |
| 832 | FLUSH_BEGIN(mm) |
| 833 | flush_user_windows(); |
| 834 | local_irq_save(flags); |
| 835 | octx = srmmu_get_context(); |
| 836 | srmmu_set_context(mm->context); |
| 837 | a = 0x20; b = 0x40; c = 0x60; |
| 838 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
| 839 | |
| 840 | start &= SRMMU_REAL_PMD_MASK; |
| 841 | while(start < end) { |
| 842 | faddr = (start + (0x10000 - 0x100)); |
| 843 | goto inside; |
| 844 | do { |
| 845 | faddr -= 0x100; |
| 846 | inside: |
| 847 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
| 848 | "sta %%g0, [%0 + %2] %1\n\t" |
| 849 | "sta %%g0, [%0 + %3] %1\n\t" |
| 850 | "sta %%g0, [%0 + %4] %1\n\t" |
| 851 | "sta %%g0, [%0 + %5] %1\n\t" |
| 852 | "sta %%g0, [%0 + %6] %1\n\t" |
| 853 | "sta %%g0, [%0 + %7] %1\n\t" |
| 854 | "sta %%g0, [%0 + %8] %1\n\t" : : |
| 855 | "r" (faddr), |
| 856 | "i" (ASI_M_FLUSH_SEG), |
| 857 | "r" (a), "r" (b), "r" (c), "r" (d), |
| 858 | "r" (e), "r" (f), "r" (g)); |
| 859 | } while (faddr != start); |
| 860 | start += SRMMU_REAL_PMD_SIZE; |
| 861 | } |
| 862 | srmmu_set_context(octx); |
| 863 | local_irq_restore(flags); |
| 864 | FLUSH_END |
| 865 | } |
| 866 | |
| 867 | static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
| 868 | { |
| 869 | register unsigned long a, b, c, d, e, f, g; |
| 870 | struct mm_struct *mm = vma->vm_mm; |
| 871 | unsigned long flags, line; |
| 872 | int octx; |
| 873 | |
| 874 | FLUSH_BEGIN(mm) |
| 875 | flush_user_windows(); |
| 876 | local_irq_save(flags); |
| 877 | octx = srmmu_get_context(); |
| 878 | srmmu_set_context(mm->context); |
| 879 | a = 0x20; b = 0x40; c = 0x60; |
| 880 | d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
| 881 | |
| 882 | page &= PAGE_MASK; |
| 883 | line = (page + PAGE_SIZE) - 0x100; |
| 884 | goto inside; |
| 885 | do { |
| 886 | line -= 0x100; |
| 887 | inside: |
| 888 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
| 889 | "sta %%g0, [%0 + %2] %1\n\t" |
| 890 | "sta %%g0, [%0 + %3] %1\n\t" |
| 891 | "sta %%g0, [%0 + %4] %1\n\t" |
| 892 | "sta %%g0, [%0 + %5] %1\n\t" |
| 893 | "sta %%g0, [%0 + %6] %1\n\t" |
| 894 | "sta %%g0, [%0 + %7] %1\n\t" |
| 895 | "sta %%g0, [%0 + %8] %1\n\t" : : |
| 896 | "r" (line), |
| 897 | "i" (ASI_M_FLUSH_PAGE), |
| 898 | "r" (a), "r" (b), "r" (c), "r" (d), |
| 899 | "r" (e), "r" (f), "r" (g)); |
| 900 | } while(line != page); |
| 901 | srmmu_set_context(octx); |
| 902 | local_irq_restore(flags); |
| 903 | FLUSH_END |
| 904 | } |
| 905 | |
| 906 | /* Cypress is copy-back, at least that is how we configure it. */ |
| 907 | static void cypress_flush_page_to_ram(unsigned long page) |
| 908 | { |
| 909 | register unsigned long a, b, c, d, e, f, g; |
| 910 | unsigned long line; |
| 911 | |
| 912 | a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0; |
| 913 | page &= PAGE_MASK; |
| 914 | line = (page + PAGE_SIZE) - 0x100; |
| 915 | goto inside; |
| 916 | do { |
| 917 | line -= 0x100; |
| 918 | inside: |
| 919 | __asm__ __volatile__("sta %%g0, [%0] %1\n\t" |
| 920 | "sta %%g0, [%0 + %2] %1\n\t" |
| 921 | "sta %%g0, [%0 + %3] %1\n\t" |
| 922 | "sta %%g0, [%0 + %4] %1\n\t" |
| 923 | "sta %%g0, [%0 + %5] %1\n\t" |
| 924 | "sta %%g0, [%0 + %6] %1\n\t" |
| 925 | "sta %%g0, [%0 + %7] %1\n\t" |
| 926 | "sta %%g0, [%0 + %8] %1\n\t" : : |
| 927 | "r" (line), |
| 928 | "i" (ASI_M_FLUSH_PAGE), |
| 929 | "r" (a), "r" (b), "r" (c), "r" (d), |
| 930 | "r" (e), "r" (f), "r" (g)); |
| 931 | } while(line != page); |
| 932 | } |
| 933 | |
| 934 | /* Cypress is also IO cache coherent. */ |
| 935 | static void cypress_flush_page_for_dma(unsigned long page) |
| 936 | { |
| 937 | } |
| 938 | |
| 939 | /* Cypress has unified L2 VIPT, from which both instructions and data |
| 940 | * are stored. It does not have an onboard icache of any sort, therefore |
| 941 | * no flush is necessary. |
| 942 | */ |
| 943 | static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
| 944 | { |
| 945 | } |
| 946 | |
| 947 | static void cypress_flush_tlb_all(void) |
| 948 | { |
| 949 | srmmu_flush_whole_tlb(); |
| 950 | } |
| 951 | |
| 952 | static void cypress_flush_tlb_mm(struct mm_struct *mm) |
| 953 | { |
| 954 | FLUSH_BEGIN(mm) |
| 955 | __asm__ __volatile__( |
| 956 | "lda [%0] %3, %%g5\n\t" |
| 957 | "sta %2, [%0] %3\n\t" |
| 958 | "sta %%g0, [%1] %4\n\t" |
| 959 | "sta %%g5, [%0] %3\n" |
| 960 | : /* no outputs */ |
| 961 | : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context), |
| 962 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) |
| 963 | : "g5"); |
| 964 | FLUSH_END |
| 965 | } |
| 966 | |
| 967 | static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 968 | { |
| 969 | struct mm_struct *mm = vma->vm_mm; |
| 970 | unsigned long size; |
| 971 | |
| 972 | FLUSH_BEGIN(mm) |
| 973 | start &= SRMMU_PGDIR_MASK; |
| 974 | size = SRMMU_PGDIR_ALIGN(end) - start; |
| 975 | __asm__ __volatile__( |
| 976 | "lda [%0] %5, %%g5\n\t" |
| 977 | "sta %1, [%0] %5\n" |
| 978 | "1:\n\t" |
| 979 | "subcc %3, %4, %3\n\t" |
| 980 | "bne 1b\n\t" |
| 981 | " sta %%g0, [%2 + %3] %6\n\t" |
| 982 | "sta %%g5, [%0] %5\n" |
| 983 | : /* no outputs */ |
| 984 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200), |
| 985 | "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS), |
| 986 | "i" (ASI_M_FLUSH_PROBE) |
| 987 | : "g5", "cc"); |
| 988 | FLUSH_END |
| 989 | } |
| 990 | |
| 991 | static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 992 | { |
| 993 | struct mm_struct *mm = vma->vm_mm; |
| 994 | |
| 995 | FLUSH_BEGIN(mm) |
| 996 | __asm__ __volatile__( |
| 997 | "lda [%0] %3, %%g5\n\t" |
| 998 | "sta %1, [%0] %3\n\t" |
| 999 | "sta %%g0, [%2] %4\n\t" |
| 1000 | "sta %%g5, [%0] %3\n" |
| 1001 | : /* no outputs */ |
| 1002 | : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK), |
| 1003 | "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE) |
| 1004 | : "g5"); |
| 1005 | FLUSH_END |
| 1006 | } |
| 1007 | |
| 1008 | /* viking.S */ |
| 1009 | extern void viking_flush_cache_all(void); |
| 1010 | extern void viking_flush_cache_mm(struct mm_struct *mm); |
| 1011 | extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start, |
| 1012 | unsigned long end); |
| 1013 | extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
| 1014 | extern void viking_flush_page_to_ram(unsigned long page); |
| 1015 | extern void viking_flush_page_for_dma(unsigned long page); |
| 1016 | extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr); |
| 1017 | extern void viking_flush_page(unsigned long page); |
| 1018 | extern void viking_mxcc_flush_page(unsigned long page); |
| 1019 | extern void viking_flush_tlb_all(void); |
| 1020 | extern void viking_flush_tlb_mm(struct mm_struct *mm); |
| 1021 | extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 1022 | unsigned long end); |
| 1023 | extern void viking_flush_tlb_page(struct vm_area_struct *vma, |
| 1024 | unsigned long page); |
| 1025 | extern void sun4dsmp_flush_tlb_all(void); |
| 1026 | extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm); |
| 1027 | extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, |
| 1028 | unsigned long end); |
| 1029 | extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma, |
| 1030 | unsigned long page); |
| 1031 | |
| 1032 | /* hypersparc.S */ |
| 1033 | extern void hypersparc_flush_cache_all(void); |
| 1034 | extern void hypersparc_flush_cache_mm(struct mm_struct *mm); |
| 1035 | extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 1036 | extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page); |
| 1037 | extern void hypersparc_flush_page_to_ram(unsigned long page); |
| 1038 | extern void hypersparc_flush_page_for_dma(unsigned long page); |
| 1039 | extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr); |
| 1040 | extern void hypersparc_flush_tlb_all(void); |
| 1041 | extern void hypersparc_flush_tlb_mm(struct mm_struct *mm); |
| 1042 | extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end); |
| 1043 | extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page); |
| 1044 | extern void hypersparc_setup_blockops(void); |
| 1045 | |
| 1046 | /* |
| 1047 | * NOTE: All of this startup code assumes the low 16mb (approx.) of |
| 1048 | * kernel mappings are done with one single contiguous chunk of |
| 1049 | * ram. On small ram machines (classics mainly) we only get |
| 1050 | * around 8mb mapped for us. |
| 1051 | */ |
| 1052 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 1053 | static void __init early_pgtable_allocfail(char *type) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1054 | { |
| 1055 | prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type); |
| 1056 | prom_halt(); |
| 1057 | } |
| 1058 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 1059 | static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start, |
| 1060 | unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1061 | { |
| 1062 | pgd_t *pgdp; |
| 1063 | pmd_t *pmdp; |
| 1064 | pte_t *ptep; |
| 1065 | |
| 1066 | while(start < end) { |
| 1067 | pgdp = pgd_offset_k(start); |
| 1068 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
| 1069 | pmdp = (pmd_t *) __srmmu_get_nocache( |
| 1070 | SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
| 1071 | if (pmdp == NULL) |
| 1072 | early_pgtable_allocfail("pmd"); |
| 1073 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
| 1074 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); |
| 1075 | } |
| 1076 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
| 1077 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
| 1078 | ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE); |
| 1079 | if (ptep == NULL) |
| 1080 | early_pgtable_allocfail("pte"); |
| 1081 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
| 1082 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); |
| 1083 | } |
| 1084 | if (start > (0xffffffffUL - PMD_SIZE)) |
| 1085 | break; |
| 1086 | start = (start + PMD_SIZE) & PMD_MASK; |
| 1087 | } |
| 1088 | } |
| 1089 | |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 1090 | static void __init srmmu_allocate_ptable_skeleton(unsigned long start, |
| 1091 | unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1092 | { |
| 1093 | pgd_t *pgdp; |
| 1094 | pmd_t *pmdp; |
| 1095 | pte_t *ptep; |
| 1096 | |
| 1097 | while(start < end) { |
| 1098 | pgdp = pgd_offset_k(start); |
| 1099 | if(srmmu_pgd_none(*pgdp)) { |
| 1100 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
| 1101 | if (pmdp == NULL) |
| 1102 | early_pgtable_allocfail("pmd"); |
| 1103 | memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE); |
| 1104 | srmmu_pgd_set(pgdp, pmdp); |
| 1105 | } |
| 1106 | pmdp = srmmu_pmd_offset(pgdp, start); |
| 1107 | if(srmmu_pmd_none(*pmdp)) { |
| 1108 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, |
| 1109 | PTE_SIZE); |
| 1110 | if (ptep == NULL) |
| 1111 | early_pgtable_allocfail("pte"); |
| 1112 | memset(ptep, 0, PTE_SIZE); |
| 1113 | srmmu_pmd_set(pmdp, ptep); |
| 1114 | } |
| 1115 | if (start > (0xffffffffUL - PMD_SIZE)) |
| 1116 | break; |
| 1117 | start = (start + PMD_SIZE) & PMD_MASK; |
| 1118 | } |
| 1119 | } |
| 1120 | |
| 1121 | /* |
| 1122 | * This is much cleaner than poking around physical address space |
| 1123 | * looking at the prom's page table directly which is what most |
| 1124 | * other OS's do. Yuck... this is much better. |
| 1125 | */ |
Adrian Bunk | 50215d6 | 2008-06-05 11:41:51 -0700 | [diff] [blame] | 1126 | static void __init srmmu_inherit_prom_mappings(unsigned long start, |
| 1127 | unsigned long end) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1128 | { |
| 1129 | pgd_t *pgdp; |
| 1130 | pmd_t *pmdp; |
| 1131 | pte_t *ptep; |
| 1132 | int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */ |
| 1133 | unsigned long prompte; |
| 1134 | |
| 1135 | while(start <= end) { |
| 1136 | if (start == 0) |
| 1137 | break; /* probably wrap around */ |
| 1138 | if(start == 0xfef00000) |
| 1139 | start = KADB_DEBUGGER_BEGVM; |
| 1140 | if(!(prompte = srmmu_hwprobe(start))) { |
| 1141 | start += PAGE_SIZE; |
| 1142 | continue; |
| 1143 | } |
| 1144 | |
| 1145 | /* A red snapper, see what it really is. */ |
| 1146 | what = 0; |
| 1147 | |
| 1148 | if(!(start & ~(SRMMU_REAL_PMD_MASK))) { |
| 1149 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte) |
| 1150 | what = 1; |
| 1151 | } |
| 1152 | |
| 1153 | if(!(start & ~(SRMMU_PGDIR_MASK))) { |
| 1154 | if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) == |
| 1155 | prompte) |
| 1156 | what = 2; |
| 1157 | } |
| 1158 | |
| 1159 | pgdp = pgd_offset_k(start); |
| 1160 | if(what == 2) { |
| 1161 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte); |
| 1162 | start += SRMMU_PGDIR_SIZE; |
| 1163 | continue; |
| 1164 | } |
| 1165 | if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) { |
| 1166 | pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE); |
| 1167 | if (pmdp == NULL) |
| 1168 | early_pgtable_allocfail("pmd"); |
| 1169 | memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE); |
| 1170 | srmmu_pgd_set(__nocache_fix(pgdp), pmdp); |
| 1171 | } |
| 1172 | pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start); |
| 1173 | if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) { |
| 1174 | ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE, |
| 1175 | PTE_SIZE); |
| 1176 | if (ptep == NULL) |
| 1177 | early_pgtable_allocfail("pte"); |
| 1178 | memset(__nocache_fix(ptep), 0, PTE_SIZE); |
| 1179 | srmmu_pmd_set(__nocache_fix(pmdp), ptep); |
| 1180 | } |
| 1181 | if(what == 1) { |
| 1182 | /* |
| 1183 | * We bend the rule where all 16 PTPs in a pmd_t point |
| 1184 | * inside the same PTE page, and we leak a perfectly |
| 1185 | * good hardware PTE piece. Alternatives seem worse. |
| 1186 | */ |
| 1187 | unsigned int x; /* Index of HW PMD in soft cluster */ |
| 1188 | x = (start >> PMD_SHIFT) & 15; |
| 1189 | *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte; |
| 1190 | start += SRMMU_REAL_PMD_SIZE; |
| 1191 | continue; |
| 1192 | } |
| 1193 | ptep = srmmu_pte_offset(__nocache_fix(pmdp), start); |
| 1194 | *(pte_t *)__nocache_fix(ptep) = __pte(prompte); |
| 1195 | start += PAGE_SIZE; |
| 1196 | } |
| 1197 | } |
| 1198 | |
| 1199 | #define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID) |
| 1200 | |
| 1201 | /* Create a third-level SRMMU 16MB page mapping. */ |
| 1202 | static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base) |
| 1203 | { |
| 1204 | pgd_t *pgdp = pgd_offset_k(vaddr); |
| 1205 | unsigned long big_pte; |
| 1206 | |
| 1207 | big_pte = KERNEL_PTE(phys_base >> 4); |
| 1208 | *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte); |
| 1209 | } |
| 1210 | |
| 1211 | /* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */ |
| 1212 | static unsigned long __init map_spbank(unsigned long vbase, int sp_entry) |
| 1213 | { |
| 1214 | unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK); |
| 1215 | unsigned long vstart = (vbase & SRMMU_PGDIR_MASK); |
| 1216 | unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes); |
| 1217 | /* Map "low" memory only */ |
| 1218 | const unsigned long min_vaddr = PAGE_OFFSET; |
| 1219 | const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM; |
| 1220 | |
| 1221 | if (vstart < min_vaddr || vstart >= max_vaddr) |
| 1222 | return vstart; |
| 1223 | |
| 1224 | if (vend > max_vaddr || vend < min_vaddr) |
| 1225 | vend = max_vaddr; |
| 1226 | |
| 1227 | while(vstart < vend) { |
| 1228 | do_large_mapping(vstart, pstart); |
| 1229 | vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE; |
| 1230 | } |
| 1231 | return vstart; |
| 1232 | } |
| 1233 | |
| 1234 | static inline void memprobe_error(char *msg) |
| 1235 | { |
| 1236 | prom_printf(msg); |
| 1237 | prom_printf("Halting now...\n"); |
| 1238 | prom_halt(); |
| 1239 | } |
| 1240 | |
| 1241 | static inline void map_kernel(void) |
| 1242 | { |
| 1243 | int i; |
| 1244 | |
| 1245 | if (phys_base > 0) { |
| 1246 | do_large_mapping(PAGE_OFFSET, phys_base); |
| 1247 | } |
| 1248 | |
| 1249 | for (i = 0; sp_banks[i].num_bytes != 0; i++) { |
| 1250 | map_spbank((unsigned long)__va(sp_banks[i].base_addr), i); |
| 1251 | } |
| 1252 | |
| 1253 | BTFIXUPSET_SIMM13(user_ptrs_per_pgd, PAGE_OFFSET / SRMMU_PGDIR_SIZE); |
| 1254 | } |
| 1255 | |
| 1256 | /* Paging initialization on the Sparc Reference MMU. */ |
| 1257 | extern void sparc_context_init(int); |
| 1258 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1259 | void (*poke_srmmu)(void) __cpuinitdata = NULL; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1260 | |
| 1261 | extern unsigned long bootmem_init(unsigned long *pages_avail); |
| 1262 | |
| 1263 | void __init srmmu_paging_init(void) |
| 1264 | { |
Andres Salomon | 8d12556 | 2010-10-08 14:18:11 -0700 | [diff] [blame] | 1265 | int i; |
| 1266 | phandle cpunode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1267 | char node_str[128]; |
| 1268 | pgd_t *pgd; |
| 1269 | pmd_t *pmd; |
| 1270 | pte_t *pte; |
| 1271 | unsigned long pages_avail; |
| 1272 | |
| 1273 | sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */ |
| 1274 | |
| 1275 | if (sparc_cpu_model == sun4d) |
| 1276 | num_contexts = 65536; /* We know it is Viking */ |
| 1277 | else { |
| 1278 | /* Find the number of contexts on the srmmu. */ |
| 1279 | cpunode = prom_getchild(prom_root_node); |
| 1280 | num_contexts = 0; |
| 1281 | while(cpunode != 0) { |
| 1282 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
| 1283 | if(!strcmp(node_str, "cpu")) { |
| 1284 | num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8); |
| 1285 | break; |
| 1286 | } |
| 1287 | cpunode = prom_getsibling(cpunode); |
| 1288 | } |
| 1289 | } |
| 1290 | |
| 1291 | if(!num_contexts) { |
| 1292 | prom_printf("Something wrong, can't find cpu node in paging_init.\n"); |
| 1293 | prom_halt(); |
| 1294 | } |
| 1295 | |
| 1296 | pages_avail = 0; |
| 1297 | last_valid_pfn = bootmem_init(&pages_avail); |
| 1298 | |
| 1299 | srmmu_nocache_calcsize(); |
| 1300 | srmmu_nocache_init(); |
| 1301 | srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE)); |
| 1302 | map_kernel(); |
| 1303 | |
| 1304 | /* ctx table has to be physically aligned to its size */ |
| 1305 | srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t)); |
| 1306 | srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table); |
| 1307 | |
| 1308 | for(i = 0; i < num_contexts; i++) |
| 1309 | srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir); |
| 1310 | |
| 1311 | flush_cache_all(); |
| 1312 | srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys); |
Bob Breuer | a54123e | 2006-03-23 22:36:19 -0800 | [diff] [blame] | 1313 | #ifdef CONFIG_SMP |
| 1314 | /* Stop from hanging here... */ |
| 1315 | local_flush_tlb_all(); |
| 1316 | #else |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1317 | flush_tlb_all(); |
Bob Breuer | a54123e | 2006-03-23 22:36:19 -0800 | [diff] [blame] | 1318 | #endif |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1319 | poke_srmmu(); |
| 1320 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1321 | srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END); |
| 1322 | srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1323 | |
| 1324 | srmmu_allocate_ptable_skeleton( |
| 1325 | __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP); |
| 1326 | srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END); |
| 1327 | |
| 1328 | pgd = pgd_offset_k(PKMAP_BASE); |
| 1329 | pmd = srmmu_pmd_offset(pgd, PKMAP_BASE); |
| 1330 | pte = srmmu_pte_offset(pmd, PKMAP_BASE); |
| 1331 | pkmap_page_table = pte; |
| 1332 | |
| 1333 | flush_cache_all(); |
| 1334 | flush_tlb_all(); |
| 1335 | |
| 1336 | sparc_context_init(num_contexts); |
| 1337 | |
| 1338 | kmap_init(); |
| 1339 | |
| 1340 | { |
| 1341 | unsigned long zones_size[MAX_NR_ZONES]; |
| 1342 | unsigned long zholes_size[MAX_NR_ZONES]; |
| 1343 | unsigned long npages; |
| 1344 | int znum; |
| 1345 | |
| 1346 | for (znum = 0; znum < MAX_NR_ZONES; znum++) |
| 1347 | zones_size[znum] = zholes_size[znum] = 0; |
| 1348 | |
| 1349 | npages = max_low_pfn - pfn_base; |
| 1350 | |
| 1351 | zones_size[ZONE_DMA] = npages; |
| 1352 | zholes_size[ZONE_DMA] = npages - pages_avail; |
| 1353 | |
| 1354 | npages = highend_pfn - max_low_pfn; |
| 1355 | zones_size[ZONE_HIGHMEM] = npages; |
| 1356 | zholes_size[ZONE_HIGHMEM] = npages - calc_highpages(); |
| 1357 | |
Johannes Weiner | 9109fb7 | 2008-07-23 21:27:20 -0700 | [diff] [blame] | 1358 | free_area_init_node(0, zones_size, pfn_base, zholes_size); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1359 | } |
| 1360 | } |
| 1361 | |
| 1362 | static void srmmu_mmu_info(struct seq_file *m) |
| 1363 | { |
| 1364 | seq_printf(m, |
| 1365 | "MMU type\t: %s\n" |
| 1366 | "contexts\t: %d\n" |
| 1367 | "nocache total\t: %ld\n" |
| 1368 | "nocache used\t: %d\n", |
| 1369 | srmmu_name, |
| 1370 | num_contexts, |
| 1371 | srmmu_nocache_size, |
| 1372 | srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT); |
| 1373 | } |
| 1374 | |
| 1375 | static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte) |
| 1376 | { |
| 1377 | } |
| 1378 | |
| 1379 | static void srmmu_destroy_context(struct mm_struct *mm) |
| 1380 | { |
| 1381 | |
| 1382 | if(mm->context != NO_CONTEXT) { |
| 1383 | flush_cache_mm(mm); |
| 1384 | srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir); |
| 1385 | flush_tlb_mm(mm); |
| 1386 | spin_lock(&srmmu_context_spinlock); |
| 1387 | free_context(mm->context); |
| 1388 | spin_unlock(&srmmu_context_spinlock); |
| 1389 | mm->context = NO_CONTEXT; |
| 1390 | } |
| 1391 | } |
| 1392 | |
| 1393 | /* Init various srmmu chip types. */ |
| 1394 | static void __init srmmu_is_bad(void) |
| 1395 | { |
| 1396 | prom_printf("Could not determine SRMMU chip type.\n"); |
| 1397 | prom_halt(); |
| 1398 | } |
| 1399 | |
| 1400 | static void __init init_vac_layout(void) |
| 1401 | { |
Andres Salomon | 8d12556 | 2010-10-08 14:18:11 -0700 | [diff] [blame] | 1402 | phandle nd; |
| 1403 | int cache_lines; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1404 | char node_str[128]; |
| 1405 | #ifdef CONFIG_SMP |
| 1406 | int cpu = 0; |
| 1407 | unsigned long max_size = 0; |
| 1408 | unsigned long min_line_size = 0x10000000; |
| 1409 | #endif |
| 1410 | |
| 1411 | nd = prom_getchild(prom_root_node); |
| 1412 | while((nd = prom_getsibling(nd)) != 0) { |
| 1413 | prom_getstring(nd, "device_type", node_str, sizeof(node_str)); |
| 1414 | if(!strcmp(node_str, "cpu")) { |
| 1415 | vac_line_size = prom_getint(nd, "cache-line-size"); |
| 1416 | if (vac_line_size == -1) { |
| 1417 | prom_printf("can't determine cache-line-size, " |
| 1418 | "halting.\n"); |
| 1419 | prom_halt(); |
| 1420 | } |
| 1421 | cache_lines = prom_getint(nd, "cache-nlines"); |
| 1422 | if (cache_lines == -1) { |
| 1423 | prom_printf("can't determine cache-nlines, halting.\n"); |
| 1424 | prom_halt(); |
| 1425 | } |
| 1426 | |
| 1427 | vac_cache_size = cache_lines * vac_line_size; |
| 1428 | #ifdef CONFIG_SMP |
| 1429 | if(vac_cache_size > max_size) |
| 1430 | max_size = vac_cache_size; |
| 1431 | if(vac_line_size < min_line_size) |
| 1432 | min_line_size = vac_line_size; |
Bob Breuer | a54123e | 2006-03-23 22:36:19 -0800 | [diff] [blame] | 1433 | //FIXME: cpus not contiguous!! |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1434 | cpu++; |
Rusty Russell | ec7c14b | 2009-03-16 14:40:24 +1030 | [diff] [blame] | 1435 | if (cpu >= nr_cpu_ids || !cpu_online(cpu)) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1436 | break; |
| 1437 | #else |
| 1438 | break; |
| 1439 | #endif |
| 1440 | } |
| 1441 | } |
| 1442 | if(nd == 0) { |
| 1443 | prom_printf("No CPU nodes found, halting.\n"); |
| 1444 | prom_halt(); |
| 1445 | } |
| 1446 | #ifdef CONFIG_SMP |
| 1447 | vac_cache_size = max_size; |
| 1448 | vac_line_size = min_line_size; |
| 1449 | #endif |
| 1450 | printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n", |
| 1451 | (int)vac_cache_size, (int)vac_line_size); |
| 1452 | } |
| 1453 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1454 | static void __cpuinit poke_hypersparc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1455 | { |
| 1456 | volatile unsigned long clear; |
| 1457 | unsigned long mreg = srmmu_get_mmureg(); |
| 1458 | |
| 1459 | hyper_flush_unconditional_combined(); |
| 1460 | |
| 1461 | mreg &= ~(HYPERSPARC_CWENABLE); |
| 1462 | mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE); |
| 1463 | mreg |= (HYPERSPARC_CMODE); |
| 1464 | |
| 1465 | srmmu_set_mmureg(mreg); |
| 1466 | |
| 1467 | #if 0 /* XXX I think this is bad news... -DaveM */ |
| 1468 | hyper_clear_all_tags(); |
| 1469 | #endif |
| 1470 | |
| 1471 | put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE); |
| 1472 | hyper_flush_whole_icache(); |
| 1473 | clear = srmmu_get_faddr(); |
| 1474 | clear = srmmu_get_fstatus(); |
| 1475 | } |
| 1476 | |
| 1477 | static void __init init_hypersparc(void) |
| 1478 | { |
| 1479 | srmmu_name = "ROSS HyperSparc"; |
| 1480 | srmmu_modtype = HyperSparc; |
| 1481 | |
| 1482 | init_vac_layout(); |
| 1483 | |
| 1484 | is_hypersparc = 1; |
| 1485 | |
| 1486 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
| 1487 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
| 1488 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
| 1489 | BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM); |
| 1490 | BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1491 | BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM); |
| 1492 | BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM); |
| 1493 | |
| 1494 | BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1495 | BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1496 | BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1497 | BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1498 | |
| 1499 | BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM); |
| 1500 | BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM); |
| 1501 | BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP); |
| 1502 | |
| 1503 | |
| 1504 | poke_srmmu = poke_hypersparc; |
| 1505 | |
| 1506 | hypersparc_setup_blockops(); |
| 1507 | } |
| 1508 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1509 | static void __cpuinit poke_cypress(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1510 | { |
| 1511 | unsigned long mreg = srmmu_get_mmureg(); |
| 1512 | unsigned long faddr, tagval; |
| 1513 | volatile unsigned long cypress_sucks; |
| 1514 | volatile unsigned long clear; |
| 1515 | |
| 1516 | clear = srmmu_get_faddr(); |
| 1517 | clear = srmmu_get_fstatus(); |
| 1518 | |
| 1519 | if (!(mreg & CYPRESS_CENABLE)) { |
| 1520 | for(faddr = 0x0; faddr < 0x10000; faddr += 20) { |
| 1521 | __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t" |
| 1522 | "sta %%g0, [%0] %2\n\t" : : |
| 1523 | "r" (faddr), "r" (0x40000), |
| 1524 | "i" (ASI_M_DATAC_TAG)); |
| 1525 | } |
| 1526 | } else { |
| 1527 | for(faddr = 0; faddr < 0x10000; faddr += 0x20) { |
| 1528 | __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" : |
| 1529 | "=r" (tagval) : |
| 1530 | "r" (faddr), "r" (0x40000), |
| 1531 | "i" (ASI_M_DATAC_TAG)); |
| 1532 | |
| 1533 | /* If modified and valid, kick it. */ |
| 1534 | if((tagval & 0x60) == 0x60) |
| 1535 | cypress_sucks = *(unsigned long *) |
| 1536 | (0xf0020000 + faddr); |
| 1537 | } |
| 1538 | } |
| 1539 | |
| 1540 | /* And one more, for our good neighbor, Mr. Broken Cypress. */ |
| 1541 | clear = srmmu_get_faddr(); |
| 1542 | clear = srmmu_get_fstatus(); |
| 1543 | |
| 1544 | mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE); |
| 1545 | srmmu_set_mmureg(mreg); |
| 1546 | } |
| 1547 | |
| 1548 | static void __init init_cypress_common(void) |
| 1549 | { |
| 1550 | init_vac_layout(); |
| 1551 | |
| 1552 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
| 1553 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
| 1554 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
| 1555 | BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM); |
| 1556 | BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1557 | BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM); |
| 1558 | BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM); |
| 1559 | |
| 1560 | BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1561 | BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1562 | BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1563 | BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1564 | |
| 1565 | |
| 1566 | BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM); |
| 1567 | BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP); |
| 1568 | BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP); |
| 1569 | |
| 1570 | poke_srmmu = poke_cypress; |
| 1571 | } |
| 1572 | |
| 1573 | static void __init init_cypress_604(void) |
| 1574 | { |
| 1575 | srmmu_name = "ROSS Cypress-604(UP)"; |
| 1576 | srmmu_modtype = Cypress; |
| 1577 | init_cypress_common(); |
| 1578 | } |
| 1579 | |
| 1580 | static void __init init_cypress_605(unsigned long mrev) |
| 1581 | { |
| 1582 | srmmu_name = "ROSS Cypress-605(MP)"; |
| 1583 | if(mrev == 0xe) { |
| 1584 | srmmu_modtype = Cypress_vE; |
| 1585 | hwbug_bitmask |= HWBUG_COPYBACK_BROKEN; |
| 1586 | } else { |
| 1587 | if(mrev == 0xd) { |
| 1588 | srmmu_modtype = Cypress_vD; |
| 1589 | hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN; |
| 1590 | } else { |
| 1591 | srmmu_modtype = Cypress; |
| 1592 | } |
| 1593 | } |
| 1594 | init_cypress_common(); |
| 1595 | } |
| 1596 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1597 | static void __cpuinit poke_swift(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1598 | { |
| 1599 | unsigned long mreg; |
| 1600 | |
| 1601 | /* Clear any crap from the cache or else... */ |
| 1602 | swift_flush_cache_all(); |
| 1603 | |
| 1604 | /* Enable I & D caches */ |
| 1605 | mreg = srmmu_get_mmureg(); |
| 1606 | mreg |= (SWIFT_IE | SWIFT_DE); |
| 1607 | /* |
| 1608 | * The Swift branch folding logic is completely broken. At |
| 1609 | * trap time, if things are just right, if can mistakenly |
| 1610 | * think that a trap is coming from kernel mode when in fact |
| 1611 | * it is coming from user mode (it mis-executes the branch in |
| 1612 | * the trap code). So you see things like crashme completely |
| 1613 | * hosing your machine which is completely unacceptable. Turn |
| 1614 | * this shit off... nice job Fujitsu. |
| 1615 | */ |
| 1616 | mreg &= ~(SWIFT_BF); |
| 1617 | srmmu_set_mmureg(mreg); |
| 1618 | } |
| 1619 | |
| 1620 | #define SWIFT_MASKID_ADDR 0x10003018 |
| 1621 | static void __init init_swift(void) |
| 1622 | { |
| 1623 | unsigned long swift_rev; |
| 1624 | |
| 1625 | __asm__ __volatile__("lda [%1] %2, %0\n\t" |
| 1626 | "srl %0, 0x18, %0\n\t" : |
| 1627 | "=r" (swift_rev) : |
| 1628 | "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS)); |
| 1629 | srmmu_name = "Fujitsu Swift"; |
| 1630 | switch(swift_rev) { |
| 1631 | case 0x11: |
| 1632 | case 0x20: |
| 1633 | case 0x23: |
| 1634 | case 0x30: |
| 1635 | srmmu_modtype = Swift_lots_o_bugs; |
| 1636 | hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN); |
| 1637 | /* |
| 1638 | * Gee george, I wonder why Sun is so hush hush about |
| 1639 | * this hardware bug... really braindamage stuff going |
| 1640 | * on here. However I think we can find a way to avoid |
| 1641 | * all of the workaround overhead under Linux. Basically, |
| 1642 | * any page fault can cause kernel pages to become user |
| 1643 | * accessible (the mmu gets confused and clears some of |
| 1644 | * the ACC bits in kernel ptes). Aha, sounds pretty |
| 1645 | * horrible eh? But wait, after extensive testing it appears |
| 1646 | * that if you use pgd_t level large kernel pte's (like the |
| 1647 | * 4MB pages on the Pentium) the bug does not get tripped |
| 1648 | * at all. This avoids almost all of the major overhead. |
| 1649 | * Welcome to a world where your vendor tells you to, |
| 1650 | * "apply this kernel patch" instead of "sorry for the |
| 1651 | * broken hardware, send it back and we'll give you |
| 1652 | * properly functioning parts" |
| 1653 | */ |
| 1654 | break; |
| 1655 | case 0x25: |
| 1656 | case 0x31: |
| 1657 | srmmu_modtype = Swift_bad_c; |
| 1658 | hwbug_bitmask |= HWBUG_KERN_CBITBROKEN; |
| 1659 | /* |
| 1660 | * You see Sun allude to this hardware bug but never |
| 1661 | * admit things directly, they'll say things like, |
| 1662 | * "the Swift chip cache problems" or similar. |
| 1663 | */ |
| 1664 | break; |
| 1665 | default: |
| 1666 | srmmu_modtype = Swift_ok; |
| 1667 | break; |
Joe Perches | 6cb79b3 | 2011-06-03 14:45:23 +0000 | [diff] [blame^] | 1668 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1669 | |
| 1670 | BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM); |
| 1671 | BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1672 | BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM); |
| 1673 | BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM); |
| 1674 | |
| 1675 | |
| 1676 | BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1677 | BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1678 | BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1679 | BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1680 | |
| 1681 | BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM); |
| 1682 | BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM); |
| 1683 | BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM); |
| 1684 | |
| 1685 | BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM); |
| 1686 | |
| 1687 | flush_page_for_dma_global = 0; |
| 1688 | |
| 1689 | /* |
| 1690 | * Are you now convinced that the Swift is one of the |
| 1691 | * biggest VLSI abortions of all time? Bravo Fujitsu! |
| 1692 | * Fujitsu, the !#?!%$'d up processor people. I bet if |
| 1693 | * you examined the microcode of the Swift you'd find |
| 1694 | * XXX's all over the place. |
| 1695 | */ |
| 1696 | poke_srmmu = poke_swift; |
| 1697 | } |
| 1698 | |
| 1699 | static void turbosparc_flush_cache_all(void) |
| 1700 | { |
| 1701 | flush_user_windows(); |
| 1702 | turbosparc_idflash_clear(); |
| 1703 | } |
| 1704 | |
| 1705 | static void turbosparc_flush_cache_mm(struct mm_struct *mm) |
| 1706 | { |
| 1707 | FLUSH_BEGIN(mm) |
| 1708 | flush_user_windows(); |
| 1709 | turbosparc_idflash_clear(); |
| 1710 | FLUSH_END |
| 1711 | } |
| 1712 | |
| 1713 | static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 1714 | { |
| 1715 | FLUSH_BEGIN(vma->vm_mm) |
| 1716 | flush_user_windows(); |
| 1717 | turbosparc_idflash_clear(); |
| 1718 | FLUSH_END |
| 1719 | } |
| 1720 | |
| 1721 | static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page) |
| 1722 | { |
| 1723 | FLUSH_BEGIN(vma->vm_mm) |
| 1724 | flush_user_windows(); |
| 1725 | if (vma->vm_flags & VM_EXEC) |
| 1726 | turbosparc_flush_icache(); |
| 1727 | turbosparc_flush_dcache(); |
| 1728 | FLUSH_END |
| 1729 | } |
| 1730 | |
| 1731 | /* TurboSparc is copy-back, if we turn it on, but this does not work. */ |
| 1732 | static void turbosparc_flush_page_to_ram(unsigned long page) |
| 1733 | { |
| 1734 | #ifdef TURBOSPARC_WRITEBACK |
| 1735 | volatile unsigned long clear; |
| 1736 | |
| 1737 | if (srmmu_hwprobe(page)) |
| 1738 | turbosparc_flush_page_cache(page); |
| 1739 | clear = srmmu_get_fstatus(); |
| 1740 | #endif |
| 1741 | } |
| 1742 | |
| 1743 | static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr) |
| 1744 | { |
| 1745 | } |
| 1746 | |
| 1747 | static void turbosparc_flush_page_for_dma(unsigned long page) |
| 1748 | { |
| 1749 | turbosparc_flush_dcache(); |
| 1750 | } |
| 1751 | |
| 1752 | static void turbosparc_flush_tlb_all(void) |
| 1753 | { |
| 1754 | srmmu_flush_whole_tlb(); |
| 1755 | } |
| 1756 | |
| 1757 | static void turbosparc_flush_tlb_mm(struct mm_struct *mm) |
| 1758 | { |
| 1759 | FLUSH_BEGIN(mm) |
| 1760 | srmmu_flush_whole_tlb(); |
| 1761 | FLUSH_END |
| 1762 | } |
| 1763 | |
| 1764 | static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end) |
| 1765 | { |
| 1766 | FLUSH_BEGIN(vma->vm_mm) |
| 1767 | srmmu_flush_whole_tlb(); |
| 1768 | FLUSH_END |
| 1769 | } |
| 1770 | |
| 1771 | static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page) |
| 1772 | { |
| 1773 | FLUSH_BEGIN(vma->vm_mm) |
| 1774 | srmmu_flush_whole_tlb(); |
| 1775 | FLUSH_END |
| 1776 | } |
| 1777 | |
| 1778 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1779 | static void __cpuinit poke_turbosparc(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1780 | { |
| 1781 | unsigned long mreg = srmmu_get_mmureg(); |
| 1782 | unsigned long ccreg; |
| 1783 | |
| 1784 | /* Clear any crap from the cache or else... */ |
| 1785 | turbosparc_flush_cache_all(); |
| 1786 | mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */ |
| 1787 | mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */ |
| 1788 | srmmu_set_mmureg(mreg); |
| 1789 | |
| 1790 | ccreg = turbosparc_get_ccreg(); |
| 1791 | |
| 1792 | #ifdef TURBOSPARC_WRITEBACK |
| 1793 | ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */ |
| 1794 | ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE); |
| 1795 | /* Write-back D-cache, emulate VLSI |
| 1796 | * abortion number three, not number one */ |
| 1797 | #else |
| 1798 | /* For now let's play safe, optimize later */ |
| 1799 | ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE); |
| 1800 | /* Do DVMA snooping in Dcache, Write-thru D-cache */ |
| 1801 | ccreg &= ~(TURBOSPARC_uS2); |
| 1802 | /* Emulate VLSI abortion number three, not number one */ |
| 1803 | #endif |
| 1804 | |
| 1805 | switch (ccreg & 7) { |
| 1806 | case 0: /* No SE cache */ |
| 1807 | case 7: /* Test mode */ |
| 1808 | break; |
| 1809 | default: |
| 1810 | ccreg |= (TURBOSPARC_SCENABLE); |
| 1811 | } |
| 1812 | turbosparc_set_ccreg (ccreg); |
| 1813 | |
| 1814 | mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */ |
| 1815 | mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */ |
| 1816 | srmmu_set_mmureg(mreg); |
| 1817 | } |
| 1818 | |
| 1819 | static void __init init_turbosparc(void) |
| 1820 | { |
| 1821 | srmmu_name = "Fujitsu TurboSparc"; |
| 1822 | srmmu_modtype = TurboSparc; |
| 1823 | |
| 1824 | BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM); |
| 1825 | BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1826 | BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM); |
| 1827 | BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM); |
| 1828 | |
| 1829 | BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1830 | BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1831 | BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1832 | BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1833 | |
| 1834 | BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM); |
| 1835 | |
| 1836 | BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP); |
| 1837 | BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM); |
| 1838 | |
| 1839 | poke_srmmu = poke_turbosparc; |
| 1840 | } |
| 1841 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1842 | static void __cpuinit poke_tsunami(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1843 | { |
| 1844 | unsigned long mreg = srmmu_get_mmureg(); |
| 1845 | |
| 1846 | tsunami_flush_icache(); |
| 1847 | tsunami_flush_dcache(); |
| 1848 | mreg &= ~TSUNAMI_ITD; |
| 1849 | mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB); |
| 1850 | srmmu_set_mmureg(mreg); |
| 1851 | } |
| 1852 | |
| 1853 | static void __init init_tsunami(void) |
| 1854 | { |
| 1855 | /* |
| 1856 | * Tsunami's pretty sane, Sun and TI actually got it |
| 1857 | * somewhat right this time. Fujitsu should have |
| 1858 | * taken some lessons from them. |
| 1859 | */ |
| 1860 | |
| 1861 | srmmu_name = "TI Tsunami"; |
| 1862 | srmmu_modtype = Tsunami; |
| 1863 | |
| 1864 | BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM); |
| 1865 | BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1866 | BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM); |
| 1867 | BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM); |
| 1868 | |
| 1869 | |
| 1870 | BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1871 | BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1872 | BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1873 | BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1874 | |
| 1875 | BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP); |
| 1876 | BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM); |
| 1877 | BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM); |
| 1878 | |
| 1879 | poke_srmmu = poke_tsunami; |
| 1880 | |
| 1881 | tsunami_setup_blockops(); |
| 1882 | } |
| 1883 | |
Al Viro | 409832f | 2008-11-22 17:33:54 +0000 | [diff] [blame] | 1884 | static void __cpuinit poke_viking(void) |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1885 | { |
| 1886 | unsigned long mreg = srmmu_get_mmureg(); |
| 1887 | static int smp_catch; |
| 1888 | |
| 1889 | if(viking_mxcc_present) { |
| 1890 | unsigned long mxcc_control = mxcc_get_creg(); |
| 1891 | |
| 1892 | mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE); |
| 1893 | mxcc_control &= ~(MXCC_CTL_RRC); |
| 1894 | mxcc_set_creg(mxcc_control); |
| 1895 | |
| 1896 | /* |
| 1897 | * We don't need memory parity checks. |
| 1898 | * XXX This is a mess, have to dig out later. ecd. |
| 1899 | viking_mxcc_turn_off_parity(&mreg, &mxcc_control); |
| 1900 | */ |
| 1901 | |
| 1902 | /* We do cache ptables on MXCC. */ |
| 1903 | mreg |= VIKING_TCENABLE; |
| 1904 | } else { |
| 1905 | unsigned long bpreg; |
| 1906 | |
| 1907 | mreg &= ~(VIKING_TCENABLE); |
| 1908 | if(smp_catch++) { |
| 1909 | /* Must disable mixed-cmd mode here for other cpu's. */ |
| 1910 | bpreg = viking_get_bpreg(); |
| 1911 | bpreg &= ~(VIKING_ACTION_MIX); |
| 1912 | viking_set_bpreg(bpreg); |
| 1913 | |
| 1914 | /* Just in case PROM does something funny. */ |
| 1915 | msi_set_sync(); |
| 1916 | } |
| 1917 | } |
| 1918 | |
| 1919 | mreg |= VIKING_SPENABLE; |
| 1920 | mreg |= (VIKING_ICENABLE | VIKING_DCENABLE); |
| 1921 | mreg |= VIKING_SBENABLE; |
| 1922 | mreg &= ~(VIKING_ACENABLE); |
| 1923 | srmmu_set_mmureg(mreg); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 1924 | } |
| 1925 | |
| 1926 | static void __init init_viking(void) |
| 1927 | { |
| 1928 | unsigned long mreg = srmmu_get_mmureg(); |
| 1929 | |
| 1930 | /* Ahhh, the viking. SRMMU VLSI abortion number two... */ |
| 1931 | if(mreg & VIKING_MMODE) { |
| 1932 | srmmu_name = "TI Viking"; |
| 1933 | viking_mxcc_present = 0; |
| 1934 | msi_set_sync(); |
| 1935 | |
| 1936 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_NORM); |
| 1937 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_NORM); |
| 1938 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_NORM); |
| 1939 | |
| 1940 | /* |
| 1941 | * We need this to make sure old viking takes no hits |
| 1942 | * on it's cache for dma snoops to workaround the |
| 1943 | * "load from non-cacheable memory" interrupt bug. |
| 1944 | * This is only necessary because of the new way in |
| 1945 | * which we use the IOMMU. |
| 1946 | */ |
| 1947 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM); |
| 1948 | |
| 1949 | flush_page_for_dma_global = 0; |
| 1950 | } else { |
| 1951 | srmmu_name = "TI Viking/MXCC"; |
| 1952 | viking_mxcc_present = 1; |
| 1953 | |
| 1954 | srmmu_cache_pagetables = 1; |
| 1955 | |
| 1956 | /* MXCC vikings lack the DMA snooping bug. */ |
| 1957 | BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP); |
| 1958 | } |
| 1959 | |
| 1960 | BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM); |
| 1961 | BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM); |
| 1962 | BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM); |
| 1963 | BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM); |
| 1964 | |
| 1965 | #ifdef CONFIG_SMP |
| 1966 | if (sparc_cpu_model == sun4d) { |
| 1967 | BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1968 | BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1969 | BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1970 | BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1971 | } else |
| 1972 | #endif |
| 1973 | { |
| 1974 | BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM); |
| 1975 | BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 1976 | BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM); |
| 1977 | BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM); |
| 1978 | } |
| 1979 | |
| 1980 | BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP); |
| 1981 | BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP); |
| 1982 | |
| 1983 | poke_srmmu = poke_viking; |
| 1984 | } |
| 1985 | |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 1986 | #ifdef CONFIG_SPARC_LEON |
| 1987 | |
| 1988 | void __init poke_leonsparc(void) |
| 1989 | { |
| 1990 | } |
| 1991 | |
| 1992 | void __init init_leon(void) |
| 1993 | { |
| 1994 | |
Kristoffer Glembo | c803ba9 | 2009-12-02 04:30:22 +0000 | [diff] [blame] | 1995 | srmmu_name = "LEON"; |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 1996 | |
| 1997 | BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all, |
| 1998 | BTFIXUPCALL_NORM); |
| 1999 | BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all, |
| 2000 | BTFIXUPCALL_NORM); |
| 2001 | BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all, |
| 2002 | BTFIXUPCALL_NORM); |
| 2003 | BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all, |
| 2004 | BTFIXUPCALL_NORM); |
| 2005 | BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all, |
| 2006 | BTFIXUPCALL_NORM); |
| 2007 | |
| 2008 | BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
| 2009 | BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
| 2010 | BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
| 2011 | BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM); |
| 2012 | |
| 2013 | BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all, |
| 2014 | BTFIXUPCALL_NOP); |
| 2015 | BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP); |
| 2016 | |
| 2017 | poke_srmmu = poke_leonsparc; |
| 2018 | |
| 2019 | srmmu_cache_pagetables = 0; |
| 2020 | |
| 2021 | leon_flush_during_switch = leon_flush_needed(); |
| 2022 | } |
| 2023 | #endif |
| 2024 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2025 | /* Probe for the srmmu chip version. */ |
| 2026 | static void __init get_srmmu_type(void) |
| 2027 | { |
| 2028 | unsigned long mreg, psr; |
| 2029 | unsigned long mod_typ, mod_rev, psr_typ, psr_vers; |
| 2030 | |
| 2031 | srmmu_modtype = SRMMU_INVAL_MOD; |
| 2032 | hwbug_bitmask = 0; |
| 2033 | |
| 2034 | mreg = srmmu_get_mmureg(); psr = get_psr(); |
| 2035 | mod_typ = (mreg & 0xf0000000) >> 28; |
| 2036 | mod_rev = (mreg & 0x0f000000) >> 24; |
| 2037 | psr_typ = (psr >> 28) & 0xf; |
| 2038 | psr_vers = (psr >> 24) & 0xf; |
| 2039 | |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 2040 | /* First, check for sparc-leon. */ |
| 2041 | if (sparc_cpu_model == sparc_leon) { |
Konrad Eisele | 75d9e34 | 2009-08-17 00:13:33 +0000 | [diff] [blame] | 2042 | init_leon(); |
| 2043 | return; |
| 2044 | } |
| 2045 | |
| 2046 | /* Second, check for HyperSparc or Cypress. */ |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2047 | if(mod_typ == 1) { |
| 2048 | switch(mod_rev) { |
| 2049 | case 7: |
| 2050 | /* UP or MP Hypersparc */ |
| 2051 | init_hypersparc(); |
| 2052 | break; |
| 2053 | case 0: |
| 2054 | case 2: |
| 2055 | /* Uniprocessor Cypress */ |
| 2056 | init_cypress_604(); |
| 2057 | break; |
| 2058 | case 10: |
| 2059 | case 11: |
| 2060 | case 12: |
| 2061 | /* _REALLY OLD_ Cypress MP chips... */ |
| 2062 | case 13: |
| 2063 | case 14: |
| 2064 | case 15: |
| 2065 | /* MP Cypress mmu/cache-controller */ |
| 2066 | init_cypress_605(mod_rev); |
| 2067 | break; |
| 2068 | default: |
| 2069 | /* Some other Cypress revision, assume a 605. */ |
| 2070 | init_cypress_605(mod_rev); |
| 2071 | break; |
Joe Perches | 6cb79b3 | 2011-06-03 14:45:23 +0000 | [diff] [blame^] | 2072 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2073 | return; |
| 2074 | } |
| 2075 | |
| 2076 | /* |
| 2077 | * Now Fujitsu TurboSparc. It might happen that it is |
| 2078 | * in Swift emulation mode, so we will check later... |
| 2079 | */ |
| 2080 | if (psr_typ == 0 && psr_vers == 5) { |
| 2081 | init_turbosparc(); |
| 2082 | return; |
| 2083 | } |
| 2084 | |
| 2085 | /* Next check for Fujitsu Swift. */ |
| 2086 | if(psr_typ == 0 && psr_vers == 4) { |
Andres Salomon | 8d12556 | 2010-10-08 14:18:11 -0700 | [diff] [blame] | 2087 | phandle cpunode; |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2088 | char node_str[128]; |
| 2089 | |
| 2090 | /* Look if it is not a TurboSparc emulating Swift... */ |
| 2091 | cpunode = prom_getchild(prom_root_node); |
| 2092 | while((cpunode = prom_getsibling(cpunode)) != 0) { |
| 2093 | prom_getstring(cpunode, "device_type", node_str, sizeof(node_str)); |
| 2094 | if(!strcmp(node_str, "cpu")) { |
| 2095 | if (!prom_getintdefault(cpunode, "psr-implementation", 1) && |
| 2096 | prom_getintdefault(cpunode, "psr-version", 1) == 5) { |
| 2097 | init_turbosparc(); |
| 2098 | return; |
| 2099 | } |
| 2100 | break; |
| 2101 | } |
| 2102 | } |
| 2103 | |
| 2104 | init_swift(); |
| 2105 | return; |
| 2106 | } |
| 2107 | |
| 2108 | /* Now the Viking family of srmmu. */ |
| 2109 | if(psr_typ == 4 && |
| 2110 | ((psr_vers == 0) || |
| 2111 | ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) { |
| 2112 | init_viking(); |
| 2113 | return; |
| 2114 | } |
| 2115 | |
| 2116 | /* Finally the Tsunami. */ |
| 2117 | if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) { |
| 2118 | init_tsunami(); |
| 2119 | return; |
| 2120 | } |
| 2121 | |
| 2122 | /* Oh well */ |
| 2123 | srmmu_is_bad(); |
| 2124 | } |
| 2125 | |
| 2126 | /* don't laugh, static pagetables */ |
| 2127 | static void srmmu_check_pgt_cache(int low, int high) |
| 2128 | { |
| 2129 | } |
| 2130 | |
| 2131 | extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme, |
| 2132 | tsetup_mmu_patchme, rtrap_mmu_patchme; |
| 2133 | |
| 2134 | extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk, |
| 2135 | tsetup_srmmu_stackchk, srmmu_rett_stackchk; |
| 2136 | |
| 2137 | extern unsigned long srmmu_fault; |
| 2138 | |
| 2139 | #define PATCH_BRANCH(insn, dest) do { \ |
| 2140 | iaddr = &(insn); \ |
| 2141 | daddr = &(dest); \ |
| 2142 | *iaddr = SPARC_BRANCH((unsigned long) daddr, (unsigned long) iaddr); \ |
| 2143 | } while(0) |
| 2144 | |
| 2145 | static void __init patch_window_trap_handlers(void) |
| 2146 | { |
| 2147 | unsigned long *iaddr, *daddr; |
| 2148 | |
| 2149 | PATCH_BRANCH(spwin_mmu_patchme, spwin_srmmu_stackchk); |
| 2150 | PATCH_BRANCH(fwin_mmu_patchme, srmmu_fwin_stackchk); |
| 2151 | PATCH_BRANCH(tsetup_mmu_patchme, tsetup_srmmu_stackchk); |
| 2152 | PATCH_BRANCH(rtrap_mmu_patchme, srmmu_rett_stackchk); |
| 2153 | PATCH_BRANCH(sparc_ttable[SP_TRAP_TFLT].inst_three, srmmu_fault); |
| 2154 | PATCH_BRANCH(sparc_ttable[SP_TRAP_DFLT].inst_three, srmmu_fault); |
| 2155 | PATCH_BRANCH(sparc_ttable[SP_TRAP_DACC].inst_three, srmmu_fault); |
| 2156 | } |
| 2157 | |
| 2158 | #ifdef CONFIG_SMP |
| 2159 | /* Local cross-calls. */ |
| 2160 | static void smp_flush_page_for_dma(unsigned long page) |
| 2161 | { |
| 2162 | xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page); |
| 2163 | local_flush_page_for_dma(page); |
| 2164 | } |
| 2165 | |
| 2166 | #endif |
| 2167 | |
| 2168 | static pte_t srmmu_pgoff_to_pte(unsigned long pgoff) |
| 2169 | { |
| 2170 | return __pte((pgoff << SRMMU_PTE_FILE_SHIFT) | SRMMU_FILE); |
| 2171 | } |
| 2172 | |
| 2173 | static unsigned long srmmu_pte_to_pgoff(pte_t pte) |
| 2174 | { |
| 2175 | return pte_val(pte) >> SRMMU_PTE_FILE_SHIFT; |
| 2176 | } |
| 2177 | |
David S. Miller | 14778d9 | 2006-03-21 02:29:39 -0800 | [diff] [blame] | 2178 | static pgprot_t srmmu_pgprot_noncached(pgprot_t prot) |
| 2179 | { |
| 2180 | prot &= ~__pgprot(SRMMU_CACHE); |
| 2181 | |
| 2182 | return prot; |
| 2183 | } |
| 2184 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2185 | /* Load up routines and constants for sun4m and sun4d mmu */ |
| 2186 | void __init ld_mmu_srmmu(void) |
| 2187 | { |
| 2188 | extern void ld_mmu_iommu(void); |
| 2189 | extern void ld_mmu_iounit(void); |
| 2190 | extern void ___xchg32_sun4md(void); |
| 2191 | |
| 2192 | BTFIXUPSET_SIMM13(pgdir_shift, SRMMU_PGDIR_SHIFT); |
| 2193 | BTFIXUPSET_SETHI(pgdir_size, SRMMU_PGDIR_SIZE); |
| 2194 | BTFIXUPSET_SETHI(pgdir_mask, SRMMU_PGDIR_MASK); |
| 2195 | |
| 2196 | BTFIXUPSET_SIMM13(ptrs_per_pmd, SRMMU_PTRS_PER_PMD); |
| 2197 | BTFIXUPSET_SIMM13(ptrs_per_pgd, SRMMU_PTRS_PER_PGD); |
| 2198 | |
| 2199 | BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE)); |
Al Viro | 378e515 | 2007-07-21 19:20:34 -0700 | [diff] [blame] | 2200 | PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2201 | BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY)); |
| 2202 | BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY)); |
| 2203 | BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL)); |
| 2204 | page_kernel = pgprot_val(SRMMU_PAGE_KERNEL); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2205 | |
| 2206 | /* Functions */ |
David S. Miller | 14778d9 | 2006-03-21 02:29:39 -0800 | [diff] [blame] | 2207 | BTFIXUPSET_CALL(pgprot_noncached, srmmu_pgprot_noncached, BTFIXUPCALL_NORM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2208 | #ifndef CONFIG_SMP |
| 2209 | BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2); |
| 2210 | #endif |
| 2211 | BTFIXUPSET_CALL(do_check_pgt_cache, srmmu_check_pgt_cache, BTFIXUPCALL_NOP); |
| 2212 | |
| 2213 | BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1); |
| 2214 | BTFIXUPSET_CALL(switch_mm, srmmu_switch_mm, BTFIXUPCALL_NORM); |
| 2215 | |
| 2216 | BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM); |
| 2217 | BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM); |
Dave McCracken | 46a82b2 | 2006-09-25 23:31:48 -0700 | [diff] [blame] | 2218 | BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2219 | |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2220 | BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM); |
| 2221 | BTFIXUPSET_CALL(pte_clear, srmmu_pte_clear, BTFIXUPCALL_SWAPO0G0); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2222 | |
| 2223 | BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM); |
| 2224 | BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM); |
| 2225 | BTFIXUPSET_CALL(pmd_clear, srmmu_pmd_clear, BTFIXUPCALL_SWAPO0G0); |
| 2226 | |
| 2227 | BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM); |
| 2228 | BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM); |
| 2229 | BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM); |
| 2230 | BTFIXUPSET_CALL(pgd_clear, srmmu_pgd_clear, BTFIXUPCALL_SWAPO0G0); |
| 2231 | |
| 2232 | BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM); |
| 2233 | BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM); |
| 2234 | BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM); |
| 2235 | BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM); |
| 2236 | BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM); |
| 2237 | BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM); |
| 2238 | |
| 2239 | BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK); |
| 2240 | BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM); |
| 2241 | BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM); |
| 2242 | |
| 2243 | BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM); |
| 2244 | BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM); |
| 2245 | BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM); |
| 2246 | BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM); |
| 2247 | BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM); |
| 2248 | BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM); |
| 2249 | BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM); |
| 2250 | BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM); |
| 2251 | |
| 2252 | BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE); |
| 2253 | BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY); |
| 2254 | BTFIXUPSET_HALF(pte_youngi, SRMMU_REF); |
| 2255 | BTFIXUPSET_HALF(pte_filei, SRMMU_FILE); |
| 2256 | BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE); |
| 2257 | BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY); |
| 2258 | BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF); |
| 2259 | BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE)); |
| 2260 | BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY)); |
| 2261 | BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF)); |
| 2262 | BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP); |
| 2263 | BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM); |
| 2264 | |
| 2265 | BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM); |
| 2266 | BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM); |
| 2267 | |
| 2268 | BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM); |
| 2269 | BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM); |
| 2270 | BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM); |
| 2271 | |
| 2272 | BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM); |
| 2273 | |
Eric Dumazet | b6a8401 | 2011-03-22 16:30:42 -0700 | [diff] [blame] | 2274 | BTFIXUPSET_CALL(alloc_thread_info_node, srmmu_alloc_thread_info_node, BTFIXUPCALL_NORM); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2275 | BTFIXUPSET_CALL(free_thread_info, srmmu_free_thread_info, BTFIXUPCALL_NORM); |
| 2276 | |
| 2277 | BTFIXUPSET_CALL(pte_to_pgoff, srmmu_pte_to_pgoff, BTFIXUPCALL_NORM); |
| 2278 | BTFIXUPSET_CALL(pgoff_to_pte, srmmu_pgoff_to_pte, BTFIXUPCALL_NORM); |
| 2279 | |
| 2280 | get_srmmu_type(); |
| 2281 | patch_window_trap_handlers(); |
| 2282 | |
| 2283 | #ifdef CONFIG_SMP |
| 2284 | /* El switcheroo... */ |
| 2285 | |
| 2286 | BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all); |
| 2287 | BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm); |
| 2288 | BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range); |
| 2289 | BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page); |
| 2290 | BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all); |
| 2291 | BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm); |
| 2292 | BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range); |
| 2293 | BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page); |
| 2294 | BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram); |
| 2295 | BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns); |
| 2296 | BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma); |
| 2297 | |
| 2298 | BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM); |
| 2299 | BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM); |
| 2300 | BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM); |
| 2301 | BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM); |
Konrad Eisele | 8401707 | 2009-08-31 22:08:13 +0000 | [diff] [blame] | 2302 | if (sparc_cpu_model != sun4d && |
| 2303 | sparc_cpu_model != sparc_leon) { |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2304 | BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM); |
| 2305 | BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM); |
| 2306 | BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM); |
| 2307 | BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM); |
| 2308 | } |
| 2309 | BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM); |
| 2310 | BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM); |
| 2311 | BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM); |
David S. Miller | 64273d0 | 2008-11-26 01:00:58 -0800 | [diff] [blame] | 2312 | |
| 2313 | if (poke_srmmu == poke_viking) { |
| 2314 | /* Avoid unnecessary cross calls. */ |
| 2315 | BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all); |
| 2316 | BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm); |
| 2317 | BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range); |
| 2318 | BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page); |
| 2319 | BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram); |
| 2320 | BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns); |
| 2321 | BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma); |
| 2322 | } |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2323 | #endif |
| 2324 | |
| 2325 | if (sparc_cpu_model == sun4d) |
| 2326 | ld_mmu_iounit(); |
| 2327 | else |
| 2328 | ld_mmu_iommu(); |
| 2329 | #ifdef CONFIG_SMP |
| 2330 | if (sparc_cpu_model == sun4d) |
| 2331 | sun4d_init_smp(); |
Konrad Eisele | 8401707 | 2009-08-31 22:08:13 +0000 | [diff] [blame] | 2332 | else if (sparc_cpu_model == sparc_leon) |
| 2333 | leon_init_smp(); |
Linus Torvalds | 1da177e | 2005-04-16 15:20:36 -0700 | [diff] [blame] | 2334 | else |
| 2335 | sun4m_init_smp(); |
| 2336 | #endif |
| 2337 | } |