blob: 29dfabffcc5eeb12917f89c7564bc0bf779f9763 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/vmalloc.h>
14#include <linux/pagemap.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/bootmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070020#include <linux/kdebug.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000021#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/bitext.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/asi.h>
36#include <asm/msi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/mmu_context.h>
38#include <asm/io-unit.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Now the cpu specific definitions. */
43#include <asm/viking.h>
44#include <asm/mxcc.h>
45#include <asm/ross.h>
46#include <asm/tsunami.h>
47#include <asm/swift.h>
48#include <asm/turbosparc.h>
Konrad Eisele75d9e342009-08-17 00:13:33 +000049#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <asm/btfixup.h>
52
53enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
58extern struct resource sparc_iomap;
59
60extern unsigned long last_valid_pfn;
61
62extern unsigned long page_kernel;
63
Adrian Bunk50215d62008-06-05 11:41:51 -070064static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
66#ifdef CONFIG_SMP
67#define FLUSH_BEGIN(mm)
68#define FLUSH_END
69#else
70#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
71#define FLUSH_END }
72#endif
73
74BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
75#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
76
77int flush_page_for_dma_global = 1;
78
79#ifdef CONFIG_SMP
80BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
81#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
82#endif
83
84char *srmmu_name;
85
86ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070087static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070088
89int viking_mxcc_present;
90static DEFINE_SPINLOCK(srmmu_context_spinlock);
91
Adrian Bunk50215d62008-06-05 11:41:51 -070092static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
Linus Torvalds1da177e2005-04-16 15:20:36 -070094/* The very generic SRMMU page table operations. */
95static inline int srmmu_device_memory(unsigned long x)
96{
97 return ((x & 0xF0000000) != 0);
98}
99
Adrian Bunk50215d62008-06-05 11:41:51 -0700100static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -0700103static unsigned long srmmu_nocache_size;
104static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
106/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
107#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
108
109/* The context table is a nocache user with the biggest alignment needs. */
110#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
111
112void *srmmu_nocache_pool;
113void *srmmu_nocache_bitmap;
114static struct bit_map srmmu_nocache_map;
115
116static unsigned long srmmu_pte_pfn(pte_t pte)
117{
118 if (srmmu_device_memory(pte_val(pte))) {
119 /* Just return something that will cause
120 * pfn_valid() to return false. This makes
121 * copy_one_pte() to just directly copy to
122 * PTE over.
123 */
124 return ~0UL;
125 }
126 return (pte_val(pte) & SRMMU_PTE_PMASK) >> (PAGE_SHIFT-4);
127}
128
129static struct page *srmmu_pmd_page(pmd_t pmd)
130{
131
132 if (srmmu_device_memory(pmd_val(pmd)))
133 BUG();
134 return pfn_to_page((pmd_val(pmd) & SRMMU_PTD_PMASK) >> (PAGE_SHIFT-4));
135}
136
137static inline unsigned long srmmu_pgd_page(pgd_t pgd)
138{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
139
140
141static inline int srmmu_pte_none(pte_t pte)
142{ return !(pte_val(pte) & 0xFFFFFFF); }
143
144static inline int srmmu_pte_present(pte_t pte)
145{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
146
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147static inline int srmmu_pmd_none(pmd_t pmd)
148{ return !(pmd_val(pmd) & 0xFFFFFFF); }
149
150static inline int srmmu_pmd_bad(pmd_t pmd)
151{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
152
153static inline int srmmu_pmd_present(pmd_t pmd)
154{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156static inline int srmmu_pgd_none(pgd_t pgd)
157{ return !(pgd_val(pgd) & 0xFFFFFFF); }
158
159static inline int srmmu_pgd_bad(pgd_t pgd)
160{ return (pgd_val(pgd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
161
162static inline int srmmu_pgd_present(pgd_t pgd)
163{ return ((pgd_val(pgd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
164
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165static inline pte_t srmmu_pte_wrprotect(pte_t pte)
166{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
167
168static inline pte_t srmmu_pte_mkclean(pte_t pte)
169{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
170
171static inline pte_t srmmu_pte_mkold(pte_t pte)
172{ return __pte(pte_val(pte) & ~SRMMU_REF);}
173
174static inline pte_t srmmu_pte_mkwrite(pte_t pte)
175{ return __pte(pte_val(pte) | SRMMU_WRITE);}
176
177static inline pte_t srmmu_pte_mkdirty(pte_t pte)
178{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
179
180static inline pte_t srmmu_pte_mkyoung(pte_t pte)
181{ return __pte(pte_val(pte) | SRMMU_REF);}
182
183/*
184 * Conversion functions: convert a page and protection to a page entry,
185 * and a page entry and page directory to the page they refer to.
186 */
187static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
188{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
189
190static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
191{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
192
193static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
194{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
195
196/* XXX should we hyper_flush_whole_icache here - Anton */
197static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
198{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
199
200static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
201{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
202
203static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
204{
205 unsigned long ptp; /* Physical address, shifted right by 4 */
206 int i;
207
208 ptp = __nocache_pa((unsigned long) ptep) >> 4;
209 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
210 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
211 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
212 }
213}
214
215static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
216{
217 unsigned long ptp; /* Physical address, shifted right by 4 */
218 int i;
219
220 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
221 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
222 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
223 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
224 }
225}
226
227static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
228{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
229
230/* to find an entry in a top-level page table... */
Adrian Bunk31156242005-10-03 17:37:02 -0700231static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
233
234/* Find an entry in the second-level page table.. */
235static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
236{
237 return (pmd_t *) srmmu_pgd_page(*dir) +
238 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
239}
240
241/* Find an entry in the third-level page table.. */
242static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
243{
244 void *pte;
245
246 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
247 return (pte_t *) pte +
248 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
249}
250
251static unsigned long srmmu_swp_type(swp_entry_t entry)
252{
253 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
254}
255
256static unsigned long srmmu_swp_offset(swp_entry_t entry)
257{
258 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
259}
260
261static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
262{
263 return (swp_entry_t) {
264 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
265 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
266}
267
268/*
269 * size: bytes to allocate in the nocache area.
270 * align: bytes, number to align at.
271 * Returns the virtual address of the allocated area.
272 */
273static unsigned long __srmmu_get_nocache(int size, int align)
274{
275 int offset;
276
277 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
278 printk("Size 0x%x too small for nocache request\n", size);
279 size = SRMMU_NOCACHE_BITMAP_SHIFT;
280 }
281 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
282 printk("Size 0x%x unaligned int nocache request\n", size);
283 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
284 }
285 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
286
287 offset = bit_map_string_get(&srmmu_nocache_map,
288 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
289 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
290 if (offset == -1) {
291 printk("srmmu: out of nocache %d: %d/%d\n",
292 size, (int) srmmu_nocache_size,
293 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
294 return 0;
295 }
296
297 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
298}
299
Adrian Bunk50215d62008-06-05 11:41:51 -0700300static unsigned long srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301{
302 unsigned long tmp;
303
304 tmp = __srmmu_get_nocache(size, align);
305
306 if (tmp)
307 memset((void *)tmp, 0, size);
308
309 return tmp;
310}
311
Adrian Bunk50215d62008-06-05 11:41:51 -0700312static void srmmu_free_nocache(unsigned long vaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700313{
314 int offset;
315
316 if (vaddr < SRMMU_NOCACHE_VADDR) {
317 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
318 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
319 BUG();
320 }
321 if (vaddr+size > srmmu_nocache_end) {
322 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
323 vaddr, srmmu_nocache_end);
324 BUG();
325 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000326 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327 printk("Size 0x%x is not a power of 2\n", size);
328 BUG();
329 }
330 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
331 printk("Size 0x%x is too small\n", size);
332 BUG();
333 }
334 if (vaddr & (size-1)) {
335 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
336 BUG();
337 }
338
339 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
340 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
341
342 bit_map_clear(&srmmu_nocache_map, offset, size);
343}
344
Adrian Bunk50215d62008-06-05 11:41:51 -0700345static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
346 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
348extern unsigned long probe_memory(void); /* in fault.c */
349
350/*
351 * Reserve nocache dynamically proportionally to the amount of
352 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
353 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700354static void srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 unsigned long sysmemavail = probe_memory() / 1024;
357 int srmmu_nocache_npages;
358
359 srmmu_nocache_npages =
360 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
361
362 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
363 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
364 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
365 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
366
367 /* anything above 1280 blows up */
368 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
369 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
370
371 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
372 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
373}
374
Adrian Bunk50215d62008-06-05 11:41:51 -0700375static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376{
377 unsigned int bitmap_bits;
378 pgd_t *pgd;
379 pmd_t *pmd;
380 pte_t *pte;
381 unsigned long paddr, vaddr;
382 unsigned long pteval;
383
384 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
385
386 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
387 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
388 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
389
390 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
391 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
392
393 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
394 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
395 init_mm.pgd = srmmu_swapper_pg_dir;
396
397 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
398
399 paddr = __pa((unsigned long)srmmu_nocache_pool);
400 vaddr = SRMMU_NOCACHE_VADDR;
401
402 while (vaddr < srmmu_nocache_end) {
403 pgd = pgd_offset_k(vaddr);
404 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
405 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
406
407 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
408
409 if (srmmu_cache_pagetables)
410 pteval |= SRMMU_CACHE;
411
412 srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
413
414 vaddr += PAGE_SIZE;
415 paddr += PAGE_SIZE;
416 }
417
418 flush_cache_all();
419 flush_tlb_all();
420}
421
422static inline pgd_t *srmmu_get_pgd_fast(void)
423{
424 pgd_t *pgd = NULL;
425
426 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
427 if (pgd) {
428 pgd_t *init = pgd_offset_k(0);
429 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
430 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
431 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
432 }
433
434 return pgd;
435}
436
437static void srmmu_free_pgd_fast(pgd_t *pgd)
438{
439 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
440}
441
442static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
443{
444 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
445}
446
447static void srmmu_pmd_free(pmd_t * pmd)
448{
449 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
450}
451
452/*
453 * Hardware needs alignment to 256 only, but we align to whole page size
454 * to reduce fragmentation problems due to the buddy principle.
455 * XXX Provide actual fragmentation statistics in /proc.
456 *
457 * Alignments up to the page size are the same for physical and virtual
458 * addresses of the nocache area.
459 */
460static pte_t *
461srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
462{
463 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
464}
465
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800466static pgtable_t
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
468{
469 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800470 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700471
472 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
473 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800474 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
475 pgtable_page_ctor(page);
476 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700477}
478
479static void srmmu_free_pte_fast(pte_t *pte)
480{
481 srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
482}
483
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800484static void srmmu_pte_free(pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485{
486 unsigned long p;
487
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800488 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 p = (unsigned long)page_address(pte); /* Cached address (for test) */
490 if (p == 0)
491 BUG();
492 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
493 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
494 srmmu_free_nocache(p, PTE_SIZE);
495}
496
497/*
498 */
499static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
500{
501 struct ctx_list *ctxp;
502
503 ctxp = ctx_free.next;
504 if(ctxp != &ctx_free) {
505 remove_from_ctx_list(ctxp);
506 add_to_used_ctxlist(ctxp);
507 mm->context = ctxp->ctx_number;
508 ctxp->ctx_mm = mm;
509 return;
510 }
511 ctxp = ctx_used.next;
512 if(ctxp->ctx_mm == old_mm)
513 ctxp = ctxp->next;
514 if(ctxp == &ctx_used)
515 panic("out of mmu contexts");
516 flush_cache_mm(ctxp->ctx_mm);
517 flush_tlb_mm(ctxp->ctx_mm);
518 remove_from_ctx_list(ctxp);
519 add_to_used_ctxlist(ctxp);
520 ctxp->ctx_mm->context = NO_CONTEXT;
521 ctxp->ctx_mm = mm;
522 mm->context = ctxp->ctx_number;
523}
524
525static inline void free_context(int context)
526{
527 struct ctx_list *ctx_old;
528
529 ctx_old = ctx_list_pool + context;
530 remove_from_ctx_list(ctx_old);
531 add_to_free_ctxlist(ctx_old);
532}
533
534
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000535void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
536 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700537{
538 if(mm->context == NO_CONTEXT) {
539 spin_lock(&srmmu_context_spinlock);
540 alloc_context(old_mm, mm);
541 spin_unlock(&srmmu_context_spinlock);
542 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
543 }
544
Konrad Eisele75d9e342009-08-17 00:13:33 +0000545 if (sparc_cpu_model == sparc_leon)
546 leon_switch_mm();
547
Linus Torvalds1da177e2005-04-16 15:20:36 -0700548 if (is_hypersparc)
549 hyper_flush_whole_icache();
550
551 srmmu_set_context(mm->context);
552}
553
554/* Low level IO area allocation on the SRMMU. */
555static inline void srmmu_mapioaddr(unsigned long physaddr,
556 unsigned long virt_addr, int bus_type)
557{
558 pgd_t *pgdp;
559 pmd_t *pmdp;
560 pte_t *ptep;
561 unsigned long tmp;
562
563 physaddr &= PAGE_MASK;
564 pgdp = pgd_offset_k(virt_addr);
565 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
566 ptep = srmmu_pte_offset(pmdp, virt_addr);
567 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
568
569 /*
570 * I need to test whether this is consistent over all
571 * sun4m's. The bus_type represents the upper 4 bits of
572 * 36-bit physical address on the I/O space lines...
573 */
574 tmp |= (bus_type << 28);
575 tmp |= SRMMU_PRIV;
576 __flush_page_to_ram(virt_addr);
577 srmmu_set_pte(ptep, __pte(tmp));
578}
579
580static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
581 unsigned long xva, unsigned int len)
582{
583 while (len != 0) {
584 len -= PAGE_SIZE;
585 srmmu_mapioaddr(xpa, xva, bus);
586 xva += PAGE_SIZE;
587 xpa += PAGE_SIZE;
588 }
589 flush_tlb_all();
590}
591
592static inline void srmmu_unmapioaddr(unsigned long virt_addr)
593{
594 pgd_t *pgdp;
595 pmd_t *pmdp;
596 pte_t *ptep;
597
598 pgdp = pgd_offset_k(virt_addr);
599 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
600 ptep = srmmu_pte_offset(pmdp, virt_addr);
601
602 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700603 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604}
605
606static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
607{
608 while (len != 0) {
609 len -= PAGE_SIZE;
610 srmmu_unmapioaddr(virt_addr);
611 virt_addr += PAGE_SIZE;
612 }
613 flush_tlb_all();
614}
615
616/*
617 * On the SRMMU we do not have the problems with limited tlb entries
618 * for mapping kernel pages, so we just take things from the free page
619 * pool. As a side effect we are putting a little too much pressure
620 * on the gfp() subsystem. This setup also makes the logic of the
621 * iommu mapping code a lot easier as we can transparently handle
David S. Milleree906c92012-05-12 00:35:45 -0700622 * mappings on the kernel stack without any special code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700623 */
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000624struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625{
626 struct thread_info *ret;
627
628 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
629 THREAD_INFO_ORDER);
630#ifdef CONFIG_DEBUG_STACK_USAGE
631 if (ret)
632 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
633#endif /* DEBUG_STACK_USAGE */
634
635 return ret;
636}
637
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000638void free_thread_info(struct thread_info *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639{
640 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
641}
642
643/* tsunami.S */
644extern void tsunami_flush_cache_all(void);
645extern void tsunami_flush_cache_mm(struct mm_struct *mm);
646extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
647extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
648extern void tsunami_flush_page_to_ram(unsigned long page);
649extern void tsunami_flush_page_for_dma(unsigned long page);
650extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
651extern void tsunami_flush_tlb_all(void);
652extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
653extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
654extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
655extern void tsunami_setup_blockops(void);
656
657/*
658 * Workaround, until we find what's going on with Swift. When low on memory,
659 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
660 * out it is already in page tables/ fault again on the same instruction.
661 * I really don't understand it, have checked it and contexts
662 * are right, flush_tlb_all is done as well, and it faults again...
663 * Strange. -jj
664 *
665 * The following code is a deadwood that may be necessary when
666 * we start to make precise page flushes again. --zaitcev
667 */
Russell King4b3073e2009-12-18 16:40:18 +0000668static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700669{
670#if 0
671 static unsigned long last;
672 unsigned int val;
673 /* unsigned int n; */
674
675 if (address == last) {
676 val = srmmu_hwprobe(address);
Russell King4b3073e2009-12-18 16:40:18 +0000677 if (val != 0 && pte_val(*ptep) != val) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700678 printk("swift_update_mmu_cache: "
Joe Perchese9b57cc2012-02-28 16:08:02 -0500679 "addr %lx put %08x probed %08x from %pf\n",
Russell King4b3073e2009-12-18 16:40:18 +0000680 address, pte_val(*ptep), val,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 __builtin_return_address(0));
682 srmmu_flush_whole_tlb();
683 }
684 }
685 last = address;
686#endif
687}
688
689/* swift.S */
690extern void swift_flush_cache_all(void);
691extern void swift_flush_cache_mm(struct mm_struct *mm);
692extern void swift_flush_cache_range(struct vm_area_struct *vma,
693 unsigned long start, unsigned long end);
694extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
695extern void swift_flush_page_to_ram(unsigned long page);
696extern void swift_flush_page_for_dma(unsigned long page);
697extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
698extern void swift_flush_tlb_all(void);
699extern void swift_flush_tlb_mm(struct mm_struct *mm);
700extern void swift_flush_tlb_range(struct vm_area_struct *vma,
701 unsigned long start, unsigned long end);
702extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
703
704#if 0 /* P3: deadwood to debug precise flushes on Swift. */
705void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
706{
707 int cctx, ctx1;
708
709 page &= PAGE_MASK;
710 if ((ctx1 = vma->vm_mm->context) != -1) {
711 cctx = srmmu_get_context();
712/* Is context # ever different from current context? P3 */
713 if (cctx != ctx1) {
714 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
715 srmmu_set_context(ctx1);
716 swift_flush_page(page);
717 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
718 "r" (page), "i" (ASI_M_FLUSH_PROBE));
719 srmmu_set_context(cctx);
720 } else {
721 /* Rm. prot. bits from virt. c. */
722 /* swift_flush_cache_all(); */
723 /* swift_flush_cache_page(vma, page); */
724 swift_flush_page(page);
725
726 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
727 "r" (page), "i" (ASI_M_FLUSH_PROBE));
728 /* same as above: srmmu_flush_tlb_page() */
729 }
730 }
731}
732#endif
733
734/*
735 * The following are all MBUS based SRMMU modules, and therefore could
736 * be found in a multiprocessor configuration. On the whole, these
737 * chips seems to be much more touchy about DVMA and page tables
738 * with respect to cache coherency.
739 */
740
741/* Cypress flushes. */
742static void cypress_flush_cache_all(void)
743{
744 volatile unsigned long cypress_sucks;
745 unsigned long faddr, tagval;
746
747 flush_user_windows();
748 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
749 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
750 "=r" (tagval) :
751 "r" (faddr), "r" (0x40000),
752 "i" (ASI_M_DATAC_TAG));
753
754 /* If modified and valid, kick it. */
755 if((tagval & 0x60) == 0x60)
756 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
757 }
758}
759
760static void cypress_flush_cache_mm(struct mm_struct *mm)
761{
762 register unsigned long a, b, c, d, e, f, g;
763 unsigned long flags, faddr;
764 int octx;
765
766 FLUSH_BEGIN(mm)
767 flush_user_windows();
768 local_irq_save(flags);
769 octx = srmmu_get_context();
770 srmmu_set_context(mm->context);
771 a = 0x20; b = 0x40; c = 0x60;
772 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
773
774 faddr = (0x10000 - 0x100);
775 goto inside;
776 do {
777 faddr -= 0x100;
778 inside:
779 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
780 "sta %%g0, [%0 + %2] %1\n\t"
781 "sta %%g0, [%0 + %3] %1\n\t"
782 "sta %%g0, [%0 + %4] %1\n\t"
783 "sta %%g0, [%0 + %5] %1\n\t"
784 "sta %%g0, [%0 + %6] %1\n\t"
785 "sta %%g0, [%0 + %7] %1\n\t"
786 "sta %%g0, [%0 + %8] %1\n\t" : :
787 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
788 "r" (a), "r" (b), "r" (c), "r" (d),
789 "r" (e), "r" (f), "r" (g));
790 } while(faddr);
791 srmmu_set_context(octx);
792 local_irq_restore(flags);
793 FLUSH_END
794}
795
796static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
797{
798 struct mm_struct *mm = vma->vm_mm;
799 register unsigned long a, b, c, d, e, f, g;
800 unsigned long flags, faddr;
801 int octx;
802
803 FLUSH_BEGIN(mm)
804 flush_user_windows();
805 local_irq_save(flags);
806 octx = srmmu_get_context();
807 srmmu_set_context(mm->context);
808 a = 0x20; b = 0x40; c = 0x60;
809 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
810
811 start &= SRMMU_REAL_PMD_MASK;
812 while(start < end) {
813 faddr = (start + (0x10000 - 0x100));
814 goto inside;
815 do {
816 faddr -= 0x100;
817 inside:
818 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
819 "sta %%g0, [%0 + %2] %1\n\t"
820 "sta %%g0, [%0 + %3] %1\n\t"
821 "sta %%g0, [%0 + %4] %1\n\t"
822 "sta %%g0, [%0 + %5] %1\n\t"
823 "sta %%g0, [%0 + %6] %1\n\t"
824 "sta %%g0, [%0 + %7] %1\n\t"
825 "sta %%g0, [%0 + %8] %1\n\t" : :
826 "r" (faddr),
827 "i" (ASI_M_FLUSH_SEG),
828 "r" (a), "r" (b), "r" (c), "r" (d),
829 "r" (e), "r" (f), "r" (g));
830 } while (faddr != start);
831 start += SRMMU_REAL_PMD_SIZE;
832 }
833 srmmu_set_context(octx);
834 local_irq_restore(flags);
835 FLUSH_END
836}
837
838static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
839{
840 register unsigned long a, b, c, d, e, f, g;
841 struct mm_struct *mm = vma->vm_mm;
842 unsigned long flags, line;
843 int octx;
844
845 FLUSH_BEGIN(mm)
846 flush_user_windows();
847 local_irq_save(flags);
848 octx = srmmu_get_context();
849 srmmu_set_context(mm->context);
850 a = 0x20; b = 0x40; c = 0x60;
851 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
852
853 page &= PAGE_MASK;
854 line = (page + PAGE_SIZE) - 0x100;
855 goto inside;
856 do {
857 line -= 0x100;
858 inside:
859 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
860 "sta %%g0, [%0 + %2] %1\n\t"
861 "sta %%g0, [%0 + %3] %1\n\t"
862 "sta %%g0, [%0 + %4] %1\n\t"
863 "sta %%g0, [%0 + %5] %1\n\t"
864 "sta %%g0, [%0 + %6] %1\n\t"
865 "sta %%g0, [%0 + %7] %1\n\t"
866 "sta %%g0, [%0 + %8] %1\n\t" : :
867 "r" (line),
868 "i" (ASI_M_FLUSH_PAGE),
869 "r" (a), "r" (b), "r" (c), "r" (d),
870 "r" (e), "r" (f), "r" (g));
871 } while(line != page);
872 srmmu_set_context(octx);
873 local_irq_restore(flags);
874 FLUSH_END
875}
876
877/* Cypress is copy-back, at least that is how we configure it. */
878static void cypress_flush_page_to_ram(unsigned long page)
879{
880 register unsigned long a, b, c, d, e, f, g;
881 unsigned long line;
882
883 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
884 page &= PAGE_MASK;
885 line = (page + PAGE_SIZE) - 0x100;
886 goto inside;
887 do {
888 line -= 0x100;
889 inside:
890 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
891 "sta %%g0, [%0 + %2] %1\n\t"
892 "sta %%g0, [%0 + %3] %1\n\t"
893 "sta %%g0, [%0 + %4] %1\n\t"
894 "sta %%g0, [%0 + %5] %1\n\t"
895 "sta %%g0, [%0 + %6] %1\n\t"
896 "sta %%g0, [%0 + %7] %1\n\t"
897 "sta %%g0, [%0 + %8] %1\n\t" : :
898 "r" (line),
899 "i" (ASI_M_FLUSH_PAGE),
900 "r" (a), "r" (b), "r" (c), "r" (d),
901 "r" (e), "r" (f), "r" (g));
902 } while(line != page);
903}
904
905/* Cypress is also IO cache coherent. */
906static void cypress_flush_page_for_dma(unsigned long page)
907{
908}
909
910/* Cypress has unified L2 VIPT, from which both instructions and data
911 * are stored. It does not have an onboard icache of any sort, therefore
912 * no flush is necessary.
913 */
914static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
915{
916}
917
918static void cypress_flush_tlb_all(void)
919{
920 srmmu_flush_whole_tlb();
921}
922
923static void cypress_flush_tlb_mm(struct mm_struct *mm)
924{
925 FLUSH_BEGIN(mm)
926 __asm__ __volatile__(
927 "lda [%0] %3, %%g5\n\t"
928 "sta %2, [%0] %3\n\t"
929 "sta %%g0, [%1] %4\n\t"
930 "sta %%g5, [%0] %3\n"
931 : /* no outputs */
932 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
933 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
934 : "g5");
935 FLUSH_END
936}
937
938static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
939{
940 struct mm_struct *mm = vma->vm_mm;
941 unsigned long size;
942
943 FLUSH_BEGIN(mm)
944 start &= SRMMU_PGDIR_MASK;
945 size = SRMMU_PGDIR_ALIGN(end) - start;
946 __asm__ __volatile__(
947 "lda [%0] %5, %%g5\n\t"
948 "sta %1, [%0] %5\n"
949 "1:\n\t"
950 "subcc %3, %4, %3\n\t"
951 "bne 1b\n\t"
952 " sta %%g0, [%2 + %3] %6\n\t"
953 "sta %%g5, [%0] %5\n"
954 : /* no outputs */
955 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
956 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
957 "i" (ASI_M_FLUSH_PROBE)
958 : "g5", "cc");
959 FLUSH_END
960}
961
962static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
963{
964 struct mm_struct *mm = vma->vm_mm;
965
966 FLUSH_BEGIN(mm)
967 __asm__ __volatile__(
968 "lda [%0] %3, %%g5\n\t"
969 "sta %1, [%0] %3\n\t"
970 "sta %%g0, [%2] %4\n\t"
971 "sta %%g5, [%0] %3\n"
972 : /* no outputs */
973 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
974 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
975 : "g5");
976 FLUSH_END
977}
978
979/* viking.S */
980extern void viking_flush_cache_all(void);
981extern void viking_flush_cache_mm(struct mm_struct *mm);
982extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
983 unsigned long end);
984extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
985extern void viking_flush_page_to_ram(unsigned long page);
986extern void viking_flush_page_for_dma(unsigned long page);
987extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
988extern void viking_flush_page(unsigned long page);
989extern void viking_mxcc_flush_page(unsigned long page);
990extern void viking_flush_tlb_all(void);
991extern void viking_flush_tlb_mm(struct mm_struct *mm);
992extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
993 unsigned long end);
994extern void viking_flush_tlb_page(struct vm_area_struct *vma,
995 unsigned long page);
996extern void sun4dsmp_flush_tlb_all(void);
997extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
998extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
999 unsigned long end);
1000extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
1001 unsigned long page);
1002
1003/* hypersparc.S */
1004extern void hypersparc_flush_cache_all(void);
1005extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
1006extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
1007extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
1008extern void hypersparc_flush_page_to_ram(unsigned long page);
1009extern void hypersparc_flush_page_for_dma(unsigned long page);
1010extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
1011extern void hypersparc_flush_tlb_all(void);
1012extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
1013extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
1014extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
1015extern void hypersparc_setup_blockops(void);
1016
1017/*
1018 * NOTE: All of this startup code assumes the low 16mb (approx.) of
1019 * kernel mappings are done with one single contiguous chunk of
1020 * ram. On small ram machines (classics mainly) we only get
1021 * around 8mb mapped for us.
1022 */
1023
Adrian Bunk50215d62008-06-05 11:41:51 -07001024static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
1026 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
1027 prom_halt();
1028}
1029
Adrian Bunk50215d62008-06-05 11:41:51 -07001030static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
1031 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032{
1033 pgd_t *pgdp;
1034 pmd_t *pmdp;
1035 pte_t *ptep;
1036
1037 while(start < end) {
1038 pgdp = pgd_offset_k(start);
1039 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1040 pmdp = (pmd_t *) __srmmu_get_nocache(
1041 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1042 if (pmdp == NULL)
1043 early_pgtable_allocfail("pmd");
1044 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1045 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1046 }
1047 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1048 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1049 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
1050 if (ptep == NULL)
1051 early_pgtable_allocfail("pte");
1052 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1053 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1054 }
1055 if (start > (0xffffffffUL - PMD_SIZE))
1056 break;
1057 start = (start + PMD_SIZE) & PMD_MASK;
1058 }
1059}
1060
Adrian Bunk50215d62008-06-05 11:41:51 -07001061static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
1062 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001063{
1064 pgd_t *pgdp;
1065 pmd_t *pmdp;
1066 pte_t *ptep;
1067
1068 while(start < end) {
1069 pgdp = pgd_offset_k(start);
1070 if(srmmu_pgd_none(*pgdp)) {
1071 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1072 if (pmdp == NULL)
1073 early_pgtable_allocfail("pmd");
1074 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
1075 srmmu_pgd_set(pgdp, pmdp);
1076 }
1077 pmdp = srmmu_pmd_offset(pgdp, start);
1078 if(srmmu_pmd_none(*pmdp)) {
1079 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1080 PTE_SIZE);
1081 if (ptep == NULL)
1082 early_pgtable_allocfail("pte");
1083 memset(ptep, 0, PTE_SIZE);
1084 srmmu_pmd_set(pmdp, ptep);
1085 }
1086 if (start > (0xffffffffUL - PMD_SIZE))
1087 break;
1088 start = (start + PMD_SIZE) & PMD_MASK;
1089 }
1090}
1091
1092/*
1093 * This is much cleaner than poking around physical address space
1094 * looking at the prom's page table directly which is what most
1095 * other OS's do. Yuck... this is much better.
1096 */
Adrian Bunk50215d62008-06-05 11:41:51 -07001097static void __init srmmu_inherit_prom_mappings(unsigned long start,
1098 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099{
1100 pgd_t *pgdp;
1101 pmd_t *pmdp;
1102 pte_t *ptep;
1103 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1104 unsigned long prompte;
1105
1106 while(start <= end) {
1107 if (start == 0)
1108 break; /* probably wrap around */
1109 if(start == 0xfef00000)
1110 start = KADB_DEBUGGER_BEGVM;
1111 if(!(prompte = srmmu_hwprobe(start))) {
1112 start += PAGE_SIZE;
1113 continue;
1114 }
1115
1116 /* A red snapper, see what it really is. */
1117 what = 0;
1118
1119 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
1120 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
1121 what = 1;
1122 }
1123
1124 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1125 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1126 prompte)
1127 what = 2;
1128 }
1129
1130 pgdp = pgd_offset_k(start);
1131 if(what == 2) {
1132 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
1133 start += SRMMU_PGDIR_SIZE;
1134 continue;
1135 }
1136 if(srmmu_pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
1137 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1138 if (pmdp == NULL)
1139 early_pgtable_allocfail("pmd");
1140 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1141 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1142 }
1143 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1144 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1145 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1146 PTE_SIZE);
1147 if (ptep == NULL)
1148 early_pgtable_allocfail("pte");
1149 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1150 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1151 }
1152 if(what == 1) {
1153 /*
1154 * We bend the rule where all 16 PTPs in a pmd_t point
1155 * inside the same PTE page, and we leak a perfectly
1156 * good hardware PTE piece. Alternatives seem worse.
1157 */
1158 unsigned int x; /* Index of HW PMD in soft cluster */
1159 x = (start >> PMD_SHIFT) & 15;
1160 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
1161 start += SRMMU_REAL_PMD_SIZE;
1162 continue;
1163 }
1164 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
1165 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1166 start += PAGE_SIZE;
1167 }
1168}
1169
1170#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1171
1172/* Create a third-level SRMMU 16MB page mapping. */
1173static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
1174{
1175 pgd_t *pgdp = pgd_offset_k(vaddr);
1176 unsigned long big_pte;
1177
1178 big_pte = KERNEL_PTE(phys_base >> 4);
1179 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
1180}
1181
1182/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
1183static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1184{
1185 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1186 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1187 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1188 /* Map "low" memory only */
1189 const unsigned long min_vaddr = PAGE_OFFSET;
1190 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
1191
1192 if (vstart < min_vaddr || vstart >= max_vaddr)
1193 return vstart;
1194
1195 if (vend > max_vaddr || vend < min_vaddr)
1196 vend = max_vaddr;
1197
1198 while(vstart < vend) {
1199 do_large_mapping(vstart, pstart);
1200 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1201 }
1202 return vstart;
1203}
1204
1205static inline void memprobe_error(char *msg)
1206{
1207 prom_printf(msg);
1208 prom_printf("Halting now...\n");
1209 prom_halt();
1210}
1211
1212static inline void map_kernel(void)
1213{
1214 int i;
1215
1216 if (phys_base > 0) {
1217 do_large_mapping(PAGE_OFFSET, phys_base);
1218 }
1219
1220 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1221 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1222 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223}
1224
1225/* Paging initialization on the Sparc Reference MMU. */
1226extern void sparc_context_init(int);
1227
Al Viro409832f2008-11-22 17:33:54 +00001228void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001229
1230extern unsigned long bootmem_init(unsigned long *pages_avail);
1231
1232void __init srmmu_paging_init(void)
1233{
Andres Salomon8d125562010-10-08 14:18:11 -07001234 int i;
1235 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 char node_str[128];
1237 pgd_t *pgd;
1238 pmd_t *pmd;
1239 pte_t *pte;
1240 unsigned long pages_avail;
1241
1242 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
1243
1244 if (sparc_cpu_model == sun4d)
1245 num_contexts = 65536; /* We know it is Viking */
1246 else {
1247 /* Find the number of contexts on the srmmu. */
1248 cpunode = prom_getchild(prom_root_node);
1249 num_contexts = 0;
1250 while(cpunode != 0) {
1251 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1252 if(!strcmp(node_str, "cpu")) {
1253 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1254 break;
1255 }
1256 cpunode = prom_getsibling(cpunode);
1257 }
1258 }
1259
1260 if(!num_contexts) {
1261 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1262 prom_halt();
1263 }
1264
1265 pages_avail = 0;
1266 last_valid_pfn = bootmem_init(&pages_avail);
1267
1268 srmmu_nocache_calcsize();
1269 srmmu_nocache_init();
1270 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1271 map_kernel();
1272
1273 /* ctx table has to be physically aligned to its size */
1274 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
1275 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
1276
1277 for(i = 0; i < num_contexts; i++)
1278 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
1279
1280 flush_cache_all();
1281 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -08001282#ifdef CONFIG_SMP
1283 /* Stop from hanging here... */
1284 local_flush_tlb_all();
1285#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001286 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -08001287#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001288 poke_srmmu();
1289
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
1291 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001292
1293 srmmu_allocate_ptable_skeleton(
1294 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
1295 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1296
1297 pgd = pgd_offset_k(PKMAP_BASE);
1298 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
1299 pte = srmmu_pte_offset(pmd, PKMAP_BASE);
1300 pkmap_page_table = pte;
1301
1302 flush_cache_all();
1303 flush_tlb_all();
1304
1305 sparc_context_init(num_contexts);
1306
1307 kmap_init();
1308
1309 {
1310 unsigned long zones_size[MAX_NR_ZONES];
1311 unsigned long zholes_size[MAX_NR_ZONES];
1312 unsigned long npages;
1313 int znum;
1314
1315 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1316 zones_size[znum] = zholes_size[znum] = 0;
1317
1318 npages = max_low_pfn - pfn_base;
1319
1320 zones_size[ZONE_DMA] = npages;
1321 zholes_size[ZONE_DMA] = npages - pages_avail;
1322
1323 npages = highend_pfn - max_low_pfn;
1324 zones_size[ZONE_HIGHMEM] = npages;
1325 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1326
Johannes Weiner9109fb72008-07-23 21:27:20 -07001327 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001328 }
1329}
1330
1331static void srmmu_mmu_info(struct seq_file *m)
1332{
1333 seq_printf(m,
1334 "MMU type\t: %s\n"
1335 "contexts\t: %d\n"
1336 "nocache total\t: %ld\n"
1337 "nocache used\t: %d\n",
1338 srmmu_name,
1339 num_contexts,
1340 srmmu_nocache_size,
1341 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1342}
1343
1344static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1345{
1346}
1347
1348static void srmmu_destroy_context(struct mm_struct *mm)
1349{
1350
1351 if(mm->context != NO_CONTEXT) {
1352 flush_cache_mm(mm);
1353 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1354 flush_tlb_mm(mm);
1355 spin_lock(&srmmu_context_spinlock);
1356 free_context(mm->context);
1357 spin_unlock(&srmmu_context_spinlock);
1358 mm->context = NO_CONTEXT;
1359 }
1360}
1361
1362/* Init various srmmu chip types. */
1363static void __init srmmu_is_bad(void)
1364{
1365 prom_printf("Could not determine SRMMU chip type.\n");
1366 prom_halt();
1367}
1368
1369static void __init init_vac_layout(void)
1370{
Andres Salomon8d125562010-10-08 14:18:11 -07001371 phandle nd;
1372 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 char node_str[128];
1374#ifdef CONFIG_SMP
1375 int cpu = 0;
1376 unsigned long max_size = 0;
1377 unsigned long min_line_size = 0x10000000;
1378#endif
1379
1380 nd = prom_getchild(prom_root_node);
1381 while((nd = prom_getsibling(nd)) != 0) {
1382 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1383 if(!strcmp(node_str, "cpu")) {
1384 vac_line_size = prom_getint(nd, "cache-line-size");
1385 if (vac_line_size == -1) {
1386 prom_printf("can't determine cache-line-size, "
1387 "halting.\n");
1388 prom_halt();
1389 }
1390 cache_lines = prom_getint(nd, "cache-nlines");
1391 if (cache_lines == -1) {
1392 prom_printf("can't determine cache-nlines, halting.\n");
1393 prom_halt();
1394 }
1395
1396 vac_cache_size = cache_lines * vac_line_size;
1397#ifdef CONFIG_SMP
1398 if(vac_cache_size > max_size)
1399 max_size = vac_cache_size;
1400 if(vac_line_size < min_line_size)
1401 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -08001402 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001403 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +10301404 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001405 break;
1406#else
1407 break;
1408#endif
1409 }
1410 }
1411 if(nd == 0) {
1412 prom_printf("No CPU nodes found, halting.\n");
1413 prom_halt();
1414 }
1415#ifdef CONFIG_SMP
1416 vac_cache_size = max_size;
1417 vac_line_size = min_line_size;
1418#endif
1419 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1420 (int)vac_cache_size, (int)vac_line_size);
1421}
1422
Al Viro409832f2008-11-22 17:33:54 +00001423static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001424{
1425 volatile unsigned long clear;
1426 unsigned long mreg = srmmu_get_mmureg();
1427
1428 hyper_flush_unconditional_combined();
1429
1430 mreg &= ~(HYPERSPARC_CWENABLE);
1431 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1432 mreg |= (HYPERSPARC_CMODE);
1433
1434 srmmu_set_mmureg(mreg);
1435
1436#if 0 /* XXX I think this is bad news... -DaveM */
1437 hyper_clear_all_tags();
1438#endif
1439
1440 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1441 hyper_flush_whole_icache();
1442 clear = srmmu_get_faddr();
1443 clear = srmmu_get_fstatus();
1444}
1445
1446static void __init init_hypersparc(void)
1447{
1448 srmmu_name = "ROSS HyperSparc";
1449 srmmu_modtype = HyperSparc;
1450
1451 init_vac_layout();
1452
1453 is_hypersparc = 1;
1454
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1456 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1457 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1458 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1459
1460 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1461 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1462 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1463 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1464
1465 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1466 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1467 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1468
1469
1470 poke_srmmu = poke_hypersparc;
1471
1472 hypersparc_setup_blockops();
1473}
1474
Al Viro409832f2008-11-22 17:33:54 +00001475static void __cpuinit poke_cypress(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476{
1477 unsigned long mreg = srmmu_get_mmureg();
1478 unsigned long faddr, tagval;
1479 volatile unsigned long cypress_sucks;
1480 volatile unsigned long clear;
1481
1482 clear = srmmu_get_faddr();
1483 clear = srmmu_get_fstatus();
1484
1485 if (!(mreg & CYPRESS_CENABLE)) {
1486 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1487 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1488 "sta %%g0, [%0] %2\n\t" : :
1489 "r" (faddr), "r" (0x40000),
1490 "i" (ASI_M_DATAC_TAG));
1491 }
1492 } else {
1493 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1494 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1495 "=r" (tagval) :
1496 "r" (faddr), "r" (0x40000),
1497 "i" (ASI_M_DATAC_TAG));
1498
1499 /* If modified and valid, kick it. */
1500 if((tagval & 0x60) == 0x60)
1501 cypress_sucks = *(unsigned long *)
1502 (0xf0020000 + faddr);
1503 }
1504 }
1505
1506 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1507 clear = srmmu_get_faddr();
1508 clear = srmmu_get_fstatus();
1509
1510 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1511 srmmu_set_mmureg(mreg);
1512}
1513
1514static void __init init_cypress_common(void)
1515{
1516 init_vac_layout();
1517
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1519 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1520 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1521 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1522
1523 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1524 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1525 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1526 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1527
1528
1529 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1530 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1531 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1532
1533 poke_srmmu = poke_cypress;
1534}
1535
1536static void __init init_cypress_604(void)
1537{
1538 srmmu_name = "ROSS Cypress-604(UP)";
1539 srmmu_modtype = Cypress;
1540 init_cypress_common();
1541}
1542
1543static void __init init_cypress_605(unsigned long mrev)
1544{
1545 srmmu_name = "ROSS Cypress-605(MP)";
1546 if(mrev == 0xe) {
1547 srmmu_modtype = Cypress_vE;
1548 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1549 } else {
1550 if(mrev == 0xd) {
1551 srmmu_modtype = Cypress_vD;
1552 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1553 } else {
1554 srmmu_modtype = Cypress;
1555 }
1556 }
1557 init_cypress_common();
1558}
1559
Al Viro409832f2008-11-22 17:33:54 +00001560static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001561{
1562 unsigned long mreg;
1563
1564 /* Clear any crap from the cache or else... */
1565 swift_flush_cache_all();
1566
1567 /* Enable I & D caches */
1568 mreg = srmmu_get_mmureg();
1569 mreg |= (SWIFT_IE | SWIFT_DE);
1570 /*
1571 * The Swift branch folding logic is completely broken. At
1572 * trap time, if things are just right, if can mistakenly
1573 * think that a trap is coming from kernel mode when in fact
1574 * it is coming from user mode (it mis-executes the branch in
1575 * the trap code). So you see things like crashme completely
1576 * hosing your machine which is completely unacceptable. Turn
1577 * this shit off... nice job Fujitsu.
1578 */
1579 mreg &= ~(SWIFT_BF);
1580 srmmu_set_mmureg(mreg);
1581}
1582
1583#define SWIFT_MASKID_ADDR 0x10003018
1584static void __init init_swift(void)
1585{
1586 unsigned long swift_rev;
1587
1588 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1589 "srl %0, 0x18, %0\n\t" :
1590 "=r" (swift_rev) :
1591 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1592 srmmu_name = "Fujitsu Swift";
1593 switch(swift_rev) {
1594 case 0x11:
1595 case 0x20:
1596 case 0x23:
1597 case 0x30:
1598 srmmu_modtype = Swift_lots_o_bugs;
1599 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1600 /*
1601 * Gee george, I wonder why Sun is so hush hush about
1602 * this hardware bug... really braindamage stuff going
1603 * on here. However I think we can find a way to avoid
1604 * all of the workaround overhead under Linux. Basically,
1605 * any page fault can cause kernel pages to become user
1606 * accessible (the mmu gets confused and clears some of
1607 * the ACC bits in kernel ptes). Aha, sounds pretty
1608 * horrible eh? But wait, after extensive testing it appears
1609 * that if you use pgd_t level large kernel pte's (like the
1610 * 4MB pages on the Pentium) the bug does not get tripped
1611 * at all. This avoids almost all of the major overhead.
1612 * Welcome to a world where your vendor tells you to,
1613 * "apply this kernel patch" instead of "sorry for the
1614 * broken hardware, send it back and we'll give you
1615 * properly functioning parts"
1616 */
1617 break;
1618 case 0x25:
1619 case 0x31:
1620 srmmu_modtype = Swift_bad_c;
1621 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1622 /*
1623 * You see Sun allude to this hardware bug but never
1624 * admit things directly, they'll say things like,
1625 * "the Swift chip cache problems" or similar.
1626 */
1627 break;
1628 default:
1629 srmmu_modtype = Swift_ok;
1630 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001631 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632
1633 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1634 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1635 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1636 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1637
1638
1639 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1640 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1641 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1642 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1643
1644 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1645 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1646 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1647
1648 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1649
1650 flush_page_for_dma_global = 0;
1651
1652 /*
1653 * Are you now convinced that the Swift is one of the
1654 * biggest VLSI abortions of all time? Bravo Fujitsu!
1655 * Fujitsu, the !#?!%$'d up processor people. I bet if
1656 * you examined the microcode of the Swift you'd find
1657 * XXX's all over the place.
1658 */
1659 poke_srmmu = poke_swift;
1660}
1661
1662static void turbosparc_flush_cache_all(void)
1663{
1664 flush_user_windows();
1665 turbosparc_idflash_clear();
1666}
1667
1668static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1669{
1670 FLUSH_BEGIN(mm)
1671 flush_user_windows();
1672 turbosparc_idflash_clear();
1673 FLUSH_END
1674}
1675
1676static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1677{
1678 FLUSH_BEGIN(vma->vm_mm)
1679 flush_user_windows();
1680 turbosparc_idflash_clear();
1681 FLUSH_END
1682}
1683
1684static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1685{
1686 FLUSH_BEGIN(vma->vm_mm)
1687 flush_user_windows();
1688 if (vma->vm_flags & VM_EXEC)
1689 turbosparc_flush_icache();
1690 turbosparc_flush_dcache();
1691 FLUSH_END
1692}
1693
1694/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1695static void turbosparc_flush_page_to_ram(unsigned long page)
1696{
1697#ifdef TURBOSPARC_WRITEBACK
1698 volatile unsigned long clear;
1699
1700 if (srmmu_hwprobe(page))
1701 turbosparc_flush_page_cache(page);
1702 clear = srmmu_get_fstatus();
1703#endif
1704}
1705
1706static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1707{
1708}
1709
1710static void turbosparc_flush_page_for_dma(unsigned long page)
1711{
1712 turbosparc_flush_dcache();
1713}
1714
1715static void turbosparc_flush_tlb_all(void)
1716{
1717 srmmu_flush_whole_tlb();
1718}
1719
1720static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1721{
1722 FLUSH_BEGIN(mm)
1723 srmmu_flush_whole_tlb();
1724 FLUSH_END
1725}
1726
1727static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1728{
1729 FLUSH_BEGIN(vma->vm_mm)
1730 srmmu_flush_whole_tlb();
1731 FLUSH_END
1732}
1733
1734static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1735{
1736 FLUSH_BEGIN(vma->vm_mm)
1737 srmmu_flush_whole_tlb();
1738 FLUSH_END
1739}
1740
1741
Al Viro409832f2008-11-22 17:33:54 +00001742static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743{
1744 unsigned long mreg = srmmu_get_mmureg();
1745 unsigned long ccreg;
1746
1747 /* Clear any crap from the cache or else... */
1748 turbosparc_flush_cache_all();
1749 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1750 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1751 srmmu_set_mmureg(mreg);
1752
1753 ccreg = turbosparc_get_ccreg();
1754
1755#ifdef TURBOSPARC_WRITEBACK
1756 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1757 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1758 /* Write-back D-cache, emulate VLSI
1759 * abortion number three, not number one */
1760#else
1761 /* For now let's play safe, optimize later */
1762 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1763 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1764 ccreg &= ~(TURBOSPARC_uS2);
1765 /* Emulate VLSI abortion number three, not number one */
1766#endif
1767
1768 switch (ccreg & 7) {
1769 case 0: /* No SE cache */
1770 case 7: /* Test mode */
1771 break;
1772 default:
1773 ccreg |= (TURBOSPARC_SCENABLE);
1774 }
1775 turbosparc_set_ccreg (ccreg);
1776
1777 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1778 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1779 srmmu_set_mmureg(mreg);
1780}
1781
1782static void __init init_turbosparc(void)
1783{
1784 srmmu_name = "Fujitsu TurboSparc";
1785 srmmu_modtype = TurboSparc;
1786
1787 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1788 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1789 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1790 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1791
1792 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1793 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1794 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1795 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1796
1797 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1798
1799 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1800 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1801
1802 poke_srmmu = poke_turbosparc;
1803}
1804
Al Viro409832f2008-11-22 17:33:54 +00001805static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
1807 unsigned long mreg = srmmu_get_mmureg();
1808
1809 tsunami_flush_icache();
1810 tsunami_flush_dcache();
1811 mreg &= ~TSUNAMI_ITD;
1812 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1813 srmmu_set_mmureg(mreg);
1814}
1815
1816static void __init init_tsunami(void)
1817{
1818 /*
1819 * Tsunami's pretty sane, Sun and TI actually got it
1820 * somewhat right this time. Fujitsu should have
1821 * taken some lessons from them.
1822 */
1823
1824 srmmu_name = "TI Tsunami";
1825 srmmu_modtype = Tsunami;
1826
1827 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1828 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1829 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1830 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1831
1832
1833 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1834 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1835 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1836 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1837
1838 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1839 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1840 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1841
1842 poke_srmmu = poke_tsunami;
1843
1844 tsunami_setup_blockops();
1845}
1846
Al Viro409832f2008-11-22 17:33:54 +00001847static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001848{
1849 unsigned long mreg = srmmu_get_mmureg();
1850 static int smp_catch;
1851
1852 if(viking_mxcc_present) {
1853 unsigned long mxcc_control = mxcc_get_creg();
1854
1855 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1856 mxcc_control &= ~(MXCC_CTL_RRC);
1857 mxcc_set_creg(mxcc_control);
1858
1859 /*
1860 * We don't need memory parity checks.
1861 * XXX This is a mess, have to dig out later. ecd.
1862 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1863 */
1864
1865 /* We do cache ptables on MXCC. */
1866 mreg |= VIKING_TCENABLE;
1867 } else {
1868 unsigned long bpreg;
1869
1870 mreg &= ~(VIKING_TCENABLE);
1871 if(smp_catch++) {
1872 /* Must disable mixed-cmd mode here for other cpu's. */
1873 bpreg = viking_get_bpreg();
1874 bpreg &= ~(VIKING_ACTION_MIX);
1875 viking_set_bpreg(bpreg);
1876
1877 /* Just in case PROM does something funny. */
1878 msi_set_sync();
1879 }
1880 }
1881
1882 mreg |= VIKING_SPENABLE;
1883 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1884 mreg |= VIKING_SBENABLE;
1885 mreg &= ~(VIKING_ACENABLE);
1886 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887}
1888
1889static void __init init_viking(void)
1890{
1891 unsigned long mreg = srmmu_get_mmureg();
1892
1893 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1894 if(mreg & VIKING_MMODE) {
1895 srmmu_name = "TI Viking";
1896 viking_mxcc_present = 0;
1897 msi_set_sync();
1898
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899 /*
1900 * We need this to make sure old viking takes no hits
1901 * on it's cache for dma snoops to workaround the
1902 * "load from non-cacheable memory" interrupt bug.
1903 * This is only necessary because of the new way in
1904 * which we use the IOMMU.
1905 */
1906 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1907
1908 flush_page_for_dma_global = 0;
1909 } else {
1910 srmmu_name = "TI Viking/MXCC";
1911 viking_mxcc_present = 1;
1912
1913 srmmu_cache_pagetables = 1;
1914
1915 /* MXCC vikings lack the DMA snooping bug. */
1916 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1917 }
1918
1919 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1920 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1921 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1922 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1923
1924#ifdef CONFIG_SMP
1925 if (sparc_cpu_model == sun4d) {
1926 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1927 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1928 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1929 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1930 } else
1931#endif
1932 {
1933 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1934 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1935 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1936 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1937 }
1938
1939 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1940 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1941
1942 poke_srmmu = poke_viking;
1943}
1944
Konrad Eisele75d9e342009-08-17 00:13:33 +00001945#ifdef CONFIG_SPARC_LEON
1946
1947void __init poke_leonsparc(void)
1948{
1949}
1950
1951void __init init_leon(void)
1952{
1953
Kristoffer Glemboc803ba92009-12-02 04:30:22 +00001954 srmmu_name = "LEON";
Konrad Eisele75d9e342009-08-17 00:13:33 +00001955
1956 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1957 BTFIXUPCALL_NORM);
1958 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1959 BTFIXUPCALL_NORM);
1960 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
1961 BTFIXUPCALL_NORM);
1962 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
1963 BTFIXUPCALL_NORM);
1964 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
1965 BTFIXUPCALL_NORM);
1966
1967 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1968 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1969 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1970 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1971
1972 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
1973 BTFIXUPCALL_NOP);
1974 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
1975
1976 poke_srmmu = poke_leonsparc;
1977
1978 srmmu_cache_pagetables = 0;
1979
1980 leon_flush_during_switch = leon_flush_needed();
1981}
1982#endif
1983
Linus Torvalds1da177e2005-04-16 15:20:36 -07001984/* Probe for the srmmu chip version. */
1985static void __init get_srmmu_type(void)
1986{
1987 unsigned long mreg, psr;
1988 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1989
1990 srmmu_modtype = SRMMU_INVAL_MOD;
1991 hwbug_bitmask = 0;
1992
1993 mreg = srmmu_get_mmureg(); psr = get_psr();
1994 mod_typ = (mreg & 0xf0000000) >> 28;
1995 mod_rev = (mreg & 0x0f000000) >> 24;
1996 psr_typ = (psr >> 28) & 0xf;
1997 psr_vers = (psr >> 24) & 0xf;
1998
Konrad Eisele75d9e342009-08-17 00:13:33 +00001999 /* First, check for sparc-leon. */
2000 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00002001 init_leon();
2002 return;
2003 }
2004
2005 /* Second, check for HyperSparc or Cypress. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006 if(mod_typ == 1) {
2007 switch(mod_rev) {
2008 case 7:
2009 /* UP or MP Hypersparc */
2010 init_hypersparc();
2011 break;
2012 case 0:
2013 case 2:
2014 /* Uniprocessor Cypress */
2015 init_cypress_604();
2016 break;
2017 case 10:
2018 case 11:
2019 case 12:
2020 /* _REALLY OLD_ Cypress MP chips... */
2021 case 13:
2022 case 14:
2023 case 15:
2024 /* MP Cypress mmu/cache-controller */
2025 init_cypress_605(mod_rev);
2026 break;
2027 default:
2028 /* Some other Cypress revision, assume a 605. */
2029 init_cypress_605(mod_rev);
2030 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00002031 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002032 return;
2033 }
2034
2035 /*
2036 * Now Fujitsu TurboSparc. It might happen that it is
2037 * in Swift emulation mode, so we will check later...
2038 */
2039 if (psr_typ == 0 && psr_vers == 5) {
2040 init_turbosparc();
2041 return;
2042 }
2043
2044 /* Next check for Fujitsu Swift. */
2045 if(psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07002046 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047 char node_str[128];
2048
2049 /* Look if it is not a TurboSparc emulating Swift... */
2050 cpunode = prom_getchild(prom_root_node);
2051 while((cpunode = prom_getsibling(cpunode)) != 0) {
2052 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
2053 if(!strcmp(node_str, "cpu")) {
2054 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
2055 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
2056 init_turbosparc();
2057 return;
2058 }
2059 break;
2060 }
2061 }
2062
2063 init_swift();
2064 return;
2065 }
2066
2067 /* Now the Viking family of srmmu. */
2068 if(psr_typ == 4 &&
2069 ((psr_vers == 0) ||
2070 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2071 init_viking();
2072 return;
2073 }
2074
2075 /* Finally the Tsunami. */
2076 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2077 init_tsunami();
2078 return;
2079 }
2080
2081 /* Oh well */
2082 srmmu_is_bad();
2083}
2084
Linus Torvalds1da177e2005-04-16 15:20:36 -07002085extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2086 tsetup_mmu_patchme, rtrap_mmu_patchme;
2087
2088extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2089 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2090
Linus Torvalds1da177e2005-04-16 15:20:36 -07002091#ifdef CONFIG_SMP
2092/* Local cross-calls. */
2093static void smp_flush_page_for_dma(unsigned long page)
2094{
2095 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2096 local_flush_page_for_dma(page);
2097}
2098
2099#endif
2100
Linus Torvalds1da177e2005-04-16 15:20:36 -07002101/* Load up routines and constants for sun4m and sun4d mmu */
2102void __init ld_mmu_srmmu(void)
2103{
2104 extern void ld_mmu_iommu(void);
2105 extern void ld_mmu_iounit(void);
2106 extern void ___xchg32_sun4md(void);
2107
Linus Torvalds1da177e2005-04-16 15:20:36 -07002108 BTFIXUPSET_INT(page_none, pgprot_val(SRMMU_PAGE_NONE));
Al Viro378e5152007-07-21 19:20:34 -07002109 PAGE_SHARED = pgprot_val(SRMMU_PAGE_SHARED);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002110 BTFIXUPSET_INT(page_copy, pgprot_val(SRMMU_PAGE_COPY));
2111 BTFIXUPSET_INT(page_readonly, pgprot_val(SRMMU_PAGE_RDONLY));
2112 BTFIXUPSET_INT(page_kernel, pgprot_val(SRMMU_PAGE_KERNEL));
2113 page_kernel = pgprot_val(SRMMU_PAGE_KERNEL);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002114
2115 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002116#ifndef CONFIG_SMP
2117 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2118#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002119
2120 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002121
2122 BTFIXUPSET_CALL(pte_pfn, srmmu_pte_pfn, BTFIXUPCALL_NORM);
2123 BTFIXUPSET_CALL(pmd_page, srmmu_pmd_page, BTFIXUPCALL_NORM);
Dave McCracken46a82b22006-09-25 23:31:48 -07002124 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2129 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130
2131 BTFIXUPSET_CALL(pgd_none, srmmu_pgd_none, BTFIXUPCALL_NORM);
2132 BTFIXUPSET_CALL(pgd_bad, srmmu_pgd_bad, BTFIXUPCALL_NORM);
2133 BTFIXUPSET_CALL(pgd_present, srmmu_pgd_present, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002134
2135 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2136 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
2137 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
2138 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2139 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
2140 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
2141
2142 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2143 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2144 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2145
2146 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
2147 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2148 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2149 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
2150 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
2151 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
2152 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
2153 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
2154
2155 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
2156 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
2157 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
2158 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
2159 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
2160 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
2161 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
2162 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
2163 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
2164 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
2165 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2166 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2167
2168 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2169 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2170
2171 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2172 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2173 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2174
2175 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2176
Linus Torvalds1da177e2005-04-16 15:20:36 -07002177 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002178
2179#ifdef CONFIG_SMP
2180 /* El switcheroo... */
2181
2182 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
2183 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
2184 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
2185 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
2186 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
2187 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2188 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2189 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2190 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2191 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2192 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2193
2194 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2195 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2196 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2197 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
Konrad Eisele84017072009-08-31 22:08:13 +00002198 if (sparc_cpu_model != sun4d &&
2199 sparc_cpu_model != sparc_leon) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002200 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2201 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2202 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2203 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2204 }
2205 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2206 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2207 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
David S. Miller64273d02008-11-26 01:00:58 -08002208
2209 if (poke_srmmu == poke_viking) {
2210 /* Avoid unnecessary cross calls. */
2211 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2212 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2213 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2214 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2215 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
2216 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2217 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2218 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002219#endif
2220
2221 if (sparc_cpu_model == sun4d)
2222 ld_mmu_iounit();
2223 else
2224 ld_mmu_iommu();
2225#ifdef CONFIG_SMP
2226 if (sparc_cpu_model == sun4d)
2227 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00002228 else if (sparc_cpu_model == sparc_leon)
2229 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002230 else
2231 sun4m_init_smp();
2232#endif
2233}