blob: 94dcd06632a26390e0e66703822ac02cf6a9ddb3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/vmalloc.h>
14#include <linux/pagemap.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/bootmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070020#include <linux/kdebug.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000021#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/bitext.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/asi.h>
36#include <asm/msi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/mmu_context.h>
38#include <asm/io-unit.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Now the cpu specific definitions. */
43#include <asm/viking.h>
44#include <asm/mxcc.h>
45#include <asm/ross.h>
46#include <asm/tsunami.h>
47#include <asm/swift.h>
48#include <asm/turbosparc.h>
Konrad Eisele75d9e342009-08-17 00:13:33 +000049#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <asm/btfixup.h>
52
53enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
58extern struct resource sparc_iomap;
59
60extern unsigned long last_valid_pfn;
61
Adrian Bunk50215d62008-06-05 11:41:51 -070062static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070063
64#ifdef CONFIG_SMP
65#define FLUSH_BEGIN(mm)
66#define FLUSH_END
67#else
68#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
69#define FLUSH_END }
70#endif
71
72BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
73#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
74
75int flush_page_for_dma_global = 1;
76
77#ifdef CONFIG_SMP
78BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
79#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
80#endif
81
82char *srmmu_name;
83
84ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070085static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87int viking_mxcc_present;
88static DEFINE_SPINLOCK(srmmu_context_spinlock);
89
Adrian Bunk50215d62008-06-05 11:41:51 -070090static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Adrian Bunk50215d62008-06-05 11:41:51 -070092static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070095static unsigned long srmmu_nocache_size;
96static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
100
101/* The context table is a nocache user with the biggest alignment needs. */
102#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
103
104void *srmmu_nocache_pool;
105void *srmmu_nocache_bitmap;
106static struct bit_map srmmu_nocache_map;
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static inline unsigned long srmmu_pgd_page(pgd_t pgd)
109{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
110
111
112static inline int srmmu_pte_none(pte_t pte)
113{ return !(pte_val(pte) & 0xFFFFFFF); }
114
115static inline int srmmu_pte_present(pte_t pte)
116{ return ((pte_val(pte) & SRMMU_ET_MASK) == SRMMU_ET_PTE); }
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static inline int srmmu_pmd_none(pmd_t pmd)
119{ return !(pmd_val(pmd) & 0xFFFFFFF); }
120
121static inline int srmmu_pmd_bad(pmd_t pmd)
122{ return (pmd_val(pmd) & SRMMU_ET_MASK) != SRMMU_ET_PTD; }
123
124static inline int srmmu_pmd_present(pmd_t pmd)
125{ return ((pmd_val(pmd) & SRMMU_ET_MASK) == SRMMU_ET_PTD); }
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127static inline pte_t srmmu_pte_wrprotect(pte_t pte)
128{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
129
130static inline pte_t srmmu_pte_mkclean(pte_t pte)
131{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
132
133static inline pte_t srmmu_pte_mkold(pte_t pte)
134{ return __pte(pte_val(pte) & ~SRMMU_REF);}
135
136static inline pte_t srmmu_pte_mkwrite(pte_t pte)
137{ return __pte(pte_val(pte) | SRMMU_WRITE);}
138
139static inline pte_t srmmu_pte_mkdirty(pte_t pte)
140{ return __pte(pte_val(pte) | SRMMU_DIRTY);}
141
142static inline pte_t srmmu_pte_mkyoung(pte_t pte)
143{ return __pte(pte_val(pte) | SRMMU_REF);}
144
145/*
146 * Conversion functions: convert a page and protection to a page entry,
147 * and a page entry and page directory to the page they refer to.
148 */
149static pte_t srmmu_mk_pte(struct page *page, pgprot_t pgprot)
150{ return __pte((page_to_pfn(page) << (PAGE_SHIFT-4)) | pgprot_val(pgprot)); }
151
152static pte_t srmmu_mk_pte_phys(unsigned long page, pgprot_t pgprot)
153{ return __pte(((page) >> 4) | pgprot_val(pgprot)); }
154
155static pte_t srmmu_mk_pte_io(unsigned long page, pgprot_t pgprot, int space)
156{ return __pte(((page) >> 4) | (space << 28) | pgprot_val(pgprot)); }
157
158/* XXX should we hyper_flush_whole_icache here - Anton */
159static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
160{ srmmu_set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
161
162static inline void srmmu_pgd_set(pgd_t * pgdp, pmd_t * pmdp)
163{ srmmu_set_pte((pte_t *)pgdp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pmdp) >> 4))); }
164
165static void srmmu_pmd_set(pmd_t *pmdp, pte_t *ptep)
166{
167 unsigned long ptp; /* Physical address, shifted right by 4 */
168 int i;
169
170 ptp = __nocache_pa((unsigned long) ptep) >> 4;
171 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
172 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
173 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
174 }
175}
176
177static void srmmu_pmd_populate(pmd_t *pmdp, struct page *ptep)
178{
179 unsigned long ptp; /* Physical address, shifted right by 4 */
180 int i;
181
182 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
183 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
184 srmmu_set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
185 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
186 }
187}
188
189static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
190{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
191
192/* to find an entry in a top-level page table... */
Adrian Bunk31156242005-10-03 17:37:02 -0700193static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700194{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
195
196/* Find an entry in the second-level page table.. */
197static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
198{
199 return (pmd_t *) srmmu_pgd_page(*dir) +
200 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
201}
202
203/* Find an entry in the third-level page table.. */
204static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
205{
206 void *pte;
207
208 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
209 return (pte_t *) pte +
210 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
211}
212
213static unsigned long srmmu_swp_type(swp_entry_t entry)
214{
215 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
216}
217
218static unsigned long srmmu_swp_offset(swp_entry_t entry)
219{
220 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
221}
222
223static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
224{
225 return (swp_entry_t) {
226 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
227 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
228}
229
230/*
231 * size: bytes to allocate in the nocache area.
232 * align: bytes, number to align at.
233 * Returns the virtual address of the allocated area.
234 */
235static unsigned long __srmmu_get_nocache(int size, int align)
236{
237 int offset;
238
239 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
240 printk("Size 0x%x too small for nocache request\n", size);
241 size = SRMMU_NOCACHE_BITMAP_SHIFT;
242 }
243 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
244 printk("Size 0x%x unaligned int nocache request\n", size);
245 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
246 }
247 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
248
249 offset = bit_map_string_get(&srmmu_nocache_map,
250 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
251 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
252 if (offset == -1) {
253 printk("srmmu: out of nocache %d: %d/%d\n",
254 size, (int) srmmu_nocache_size,
255 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
256 return 0;
257 }
258
259 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
260}
261
Adrian Bunk50215d62008-06-05 11:41:51 -0700262static unsigned long srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 unsigned long tmp;
265
266 tmp = __srmmu_get_nocache(size, align);
267
268 if (tmp)
269 memset((void *)tmp, 0, size);
270
271 return tmp;
272}
273
Adrian Bunk50215d62008-06-05 11:41:51 -0700274static void srmmu_free_nocache(unsigned long vaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700275{
276 int offset;
277
278 if (vaddr < SRMMU_NOCACHE_VADDR) {
279 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
280 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
281 BUG();
282 }
283 if (vaddr+size > srmmu_nocache_end) {
284 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
285 vaddr, srmmu_nocache_end);
286 BUG();
287 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000288 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289 printk("Size 0x%x is not a power of 2\n", size);
290 BUG();
291 }
292 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
293 printk("Size 0x%x is too small\n", size);
294 BUG();
295 }
296 if (vaddr & (size-1)) {
297 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
298 BUG();
299 }
300
301 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
302 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
303
304 bit_map_clear(&srmmu_nocache_map, offset, size);
305}
306
Adrian Bunk50215d62008-06-05 11:41:51 -0700307static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
308 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309
310extern unsigned long probe_memory(void); /* in fault.c */
311
312/*
313 * Reserve nocache dynamically proportionally to the amount of
314 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
315 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700316static void srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317{
318 unsigned long sysmemavail = probe_memory() / 1024;
319 int srmmu_nocache_npages;
320
321 srmmu_nocache_npages =
322 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
323
324 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
325 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
326 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
327 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
328
329 /* anything above 1280 blows up */
330 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
331 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
332
333 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
334 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
335}
336
Adrian Bunk50215d62008-06-05 11:41:51 -0700337static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338{
339 unsigned int bitmap_bits;
340 pgd_t *pgd;
341 pmd_t *pmd;
342 pte_t *pte;
343 unsigned long paddr, vaddr;
344 unsigned long pteval;
345
346 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
347
348 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
349 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
350 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
351
352 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
353 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
354
355 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
356 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
357 init_mm.pgd = srmmu_swapper_pg_dir;
358
359 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
360
361 paddr = __pa((unsigned long)srmmu_nocache_pool);
362 vaddr = SRMMU_NOCACHE_VADDR;
363
364 while (vaddr < srmmu_nocache_end) {
365 pgd = pgd_offset_k(vaddr);
366 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
367 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
368
369 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
370
371 if (srmmu_cache_pagetables)
372 pteval |= SRMMU_CACHE;
373
374 srmmu_set_pte(__nocache_fix(pte), __pte(pteval));
375
376 vaddr += PAGE_SIZE;
377 paddr += PAGE_SIZE;
378 }
379
380 flush_cache_all();
381 flush_tlb_all();
382}
383
384static inline pgd_t *srmmu_get_pgd_fast(void)
385{
386 pgd_t *pgd = NULL;
387
388 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
389 if (pgd) {
390 pgd_t *init = pgd_offset_k(0);
391 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
392 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
393 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
394 }
395
396 return pgd;
397}
398
399static void srmmu_free_pgd_fast(pgd_t *pgd)
400{
401 srmmu_free_nocache((unsigned long)pgd, SRMMU_PGD_TABLE_SIZE);
402}
403
404static pmd_t *srmmu_pmd_alloc_one(struct mm_struct *mm, unsigned long address)
405{
406 return (pmd_t *)srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
407}
408
409static void srmmu_pmd_free(pmd_t * pmd)
410{
411 srmmu_free_nocache((unsigned long)pmd, SRMMU_PMD_TABLE_SIZE);
412}
413
414/*
415 * Hardware needs alignment to 256 only, but we align to whole page size
416 * to reduce fragmentation problems due to the buddy principle.
417 * XXX Provide actual fragmentation statistics in /proc.
418 *
419 * Alignments up to the page size are the same for physical and virtual
420 * addresses of the nocache area.
421 */
422static pte_t *
423srmmu_pte_alloc_one_kernel(struct mm_struct *mm, unsigned long address)
424{
425 return (pte_t *)srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
426}
427
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800428static pgtable_t
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429srmmu_pte_alloc_one(struct mm_struct *mm, unsigned long address)
430{
431 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800432 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433
434 if ((pte = (unsigned long)srmmu_pte_alloc_one_kernel(mm, address)) == 0)
435 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800436 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
437 pgtable_page_ctor(page);
438 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700439}
440
441static void srmmu_free_pte_fast(pte_t *pte)
442{
443 srmmu_free_nocache((unsigned long)pte, PTE_SIZE);
444}
445
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800446static void srmmu_pte_free(pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447{
448 unsigned long p;
449
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800450 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700451 p = (unsigned long)page_address(pte); /* Cached address (for test) */
452 if (p == 0)
453 BUG();
454 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
455 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
456 srmmu_free_nocache(p, PTE_SIZE);
457}
458
459/*
460 */
461static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
462{
463 struct ctx_list *ctxp;
464
465 ctxp = ctx_free.next;
466 if(ctxp != &ctx_free) {
467 remove_from_ctx_list(ctxp);
468 add_to_used_ctxlist(ctxp);
469 mm->context = ctxp->ctx_number;
470 ctxp->ctx_mm = mm;
471 return;
472 }
473 ctxp = ctx_used.next;
474 if(ctxp->ctx_mm == old_mm)
475 ctxp = ctxp->next;
476 if(ctxp == &ctx_used)
477 panic("out of mmu contexts");
478 flush_cache_mm(ctxp->ctx_mm);
479 flush_tlb_mm(ctxp->ctx_mm);
480 remove_from_ctx_list(ctxp);
481 add_to_used_ctxlist(ctxp);
482 ctxp->ctx_mm->context = NO_CONTEXT;
483 ctxp->ctx_mm = mm;
484 mm->context = ctxp->ctx_number;
485}
486
487static inline void free_context(int context)
488{
489 struct ctx_list *ctx_old;
490
491 ctx_old = ctx_list_pool + context;
492 remove_from_ctx_list(ctx_old);
493 add_to_free_ctxlist(ctx_old);
494}
495
496
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000497void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
498 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700499{
500 if(mm->context == NO_CONTEXT) {
501 spin_lock(&srmmu_context_spinlock);
502 alloc_context(old_mm, mm);
503 spin_unlock(&srmmu_context_spinlock);
504 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
505 }
506
Konrad Eisele75d9e342009-08-17 00:13:33 +0000507 if (sparc_cpu_model == sparc_leon)
508 leon_switch_mm();
509
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510 if (is_hypersparc)
511 hyper_flush_whole_icache();
512
513 srmmu_set_context(mm->context);
514}
515
516/* Low level IO area allocation on the SRMMU. */
517static inline void srmmu_mapioaddr(unsigned long physaddr,
518 unsigned long virt_addr, int bus_type)
519{
520 pgd_t *pgdp;
521 pmd_t *pmdp;
522 pte_t *ptep;
523 unsigned long tmp;
524
525 physaddr &= PAGE_MASK;
526 pgdp = pgd_offset_k(virt_addr);
527 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
528 ptep = srmmu_pte_offset(pmdp, virt_addr);
529 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
530
531 /*
532 * I need to test whether this is consistent over all
533 * sun4m's. The bus_type represents the upper 4 bits of
534 * 36-bit physical address on the I/O space lines...
535 */
536 tmp |= (bus_type << 28);
537 tmp |= SRMMU_PRIV;
538 __flush_page_to_ram(virt_addr);
539 srmmu_set_pte(ptep, __pte(tmp));
540}
541
542static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
543 unsigned long xva, unsigned int len)
544{
545 while (len != 0) {
546 len -= PAGE_SIZE;
547 srmmu_mapioaddr(xpa, xva, bus);
548 xva += PAGE_SIZE;
549 xpa += PAGE_SIZE;
550 }
551 flush_tlb_all();
552}
553
554static inline void srmmu_unmapioaddr(unsigned long virt_addr)
555{
556 pgd_t *pgdp;
557 pmd_t *pmdp;
558 pte_t *ptep;
559
560 pgdp = pgd_offset_k(virt_addr);
561 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
562 ptep = srmmu_pte_offset(pmdp, virt_addr);
563
564 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700565 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566}
567
568static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
569{
570 while (len != 0) {
571 len -= PAGE_SIZE;
572 srmmu_unmapioaddr(virt_addr);
573 virt_addr += PAGE_SIZE;
574 }
575 flush_tlb_all();
576}
577
578/*
579 * On the SRMMU we do not have the problems with limited tlb entries
580 * for mapping kernel pages, so we just take things from the free page
581 * pool. As a side effect we are putting a little too much pressure
582 * on the gfp() subsystem. This setup also makes the logic of the
583 * iommu mapping code a lot easier as we can transparently handle
David S. Milleree906c92012-05-12 00:35:45 -0700584 * mappings on the kernel stack without any special code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700585 */
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000586struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587{
588 struct thread_info *ret;
589
590 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
591 THREAD_INFO_ORDER);
592#ifdef CONFIG_DEBUG_STACK_USAGE
593 if (ret)
594 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
595#endif /* DEBUG_STACK_USAGE */
596
597 return ret;
598}
599
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000600void free_thread_info(struct thread_info *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601{
602 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
603}
604
605/* tsunami.S */
606extern void tsunami_flush_cache_all(void);
607extern void tsunami_flush_cache_mm(struct mm_struct *mm);
608extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
609extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
610extern void tsunami_flush_page_to_ram(unsigned long page);
611extern void tsunami_flush_page_for_dma(unsigned long page);
612extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
613extern void tsunami_flush_tlb_all(void);
614extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
615extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
616extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
617extern void tsunami_setup_blockops(void);
618
619/*
620 * Workaround, until we find what's going on with Swift. When low on memory,
621 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
622 * out it is already in page tables/ fault again on the same instruction.
623 * I really don't understand it, have checked it and contexts
624 * are right, flush_tlb_all is done as well, and it faults again...
625 * Strange. -jj
626 *
627 * The following code is a deadwood that may be necessary when
628 * we start to make precise page flushes again. --zaitcev
629 */
Russell King4b3073e2009-12-18 16:40:18 +0000630static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700631{
632#if 0
633 static unsigned long last;
634 unsigned int val;
635 /* unsigned int n; */
636
637 if (address == last) {
638 val = srmmu_hwprobe(address);
Russell King4b3073e2009-12-18 16:40:18 +0000639 if (val != 0 && pte_val(*ptep) != val) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640 printk("swift_update_mmu_cache: "
Joe Perchese9b57cc2012-02-28 16:08:02 -0500641 "addr %lx put %08x probed %08x from %pf\n",
Russell King4b3073e2009-12-18 16:40:18 +0000642 address, pte_val(*ptep), val,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 __builtin_return_address(0));
644 srmmu_flush_whole_tlb();
645 }
646 }
647 last = address;
648#endif
649}
650
651/* swift.S */
652extern void swift_flush_cache_all(void);
653extern void swift_flush_cache_mm(struct mm_struct *mm);
654extern void swift_flush_cache_range(struct vm_area_struct *vma,
655 unsigned long start, unsigned long end);
656extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
657extern void swift_flush_page_to_ram(unsigned long page);
658extern void swift_flush_page_for_dma(unsigned long page);
659extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
660extern void swift_flush_tlb_all(void);
661extern void swift_flush_tlb_mm(struct mm_struct *mm);
662extern void swift_flush_tlb_range(struct vm_area_struct *vma,
663 unsigned long start, unsigned long end);
664extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
665
666#if 0 /* P3: deadwood to debug precise flushes on Swift. */
667void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
668{
669 int cctx, ctx1;
670
671 page &= PAGE_MASK;
672 if ((ctx1 = vma->vm_mm->context) != -1) {
673 cctx = srmmu_get_context();
674/* Is context # ever different from current context? P3 */
675 if (cctx != ctx1) {
676 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
677 srmmu_set_context(ctx1);
678 swift_flush_page(page);
679 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
680 "r" (page), "i" (ASI_M_FLUSH_PROBE));
681 srmmu_set_context(cctx);
682 } else {
683 /* Rm. prot. bits from virt. c. */
684 /* swift_flush_cache_all(); */
685 /* swift_flush_cache_page(vma, page); */
686 swift_flush_page(page);
687
688 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
689 "r" (page), "i" (ASI_M_FLUSH_PROBE));
690 /* same as above: srmmu_flush_tlb_page() */
691 }
692 }
693}
694#endif
695
696/*
697 * The following are all MBUS based SRMMU modules, and therefore could
698 * be found in a multiprocessor configuration. On the whole, these
699 * chips seems to be much more touchy about DVMA and page tables
700 * with respect to cache coherency.
701 */
702
703/* Cypress flushes. */
704static void cypress_flush_cache_all(void)
705{
706 volatile unsigned long cypress_sucks;
707 unsigned long faddr, tagval;
708
709 flush_user_windows();
710 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
711 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
712 "=r" (tagval) :
713 "r" (faddr), "r" (0x40000),
714 "i" (ASI_M_DATAC_TAG));
715
716 /* If modified and valid, kick it. */
717 if((tagval & 0x60) == 0x60)
718 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
719 }
720}
721
722static void cypress_flush_cache_mm(struct mm_struct *mm)
723{
724 register unsigned long a, b, c, d, e, f, g;
725 unsigned long flags, faddr;
726 int octx;
727
728 FLUSH_BEGIN(mm)
729 flush_user_windows();
730 local_irq_save(flags);
731 octx = srmmu_get_context();
732 srmmu_set_context(mm->context);
733 a = 0x20; b = 0x40; c = 0x60;
734 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
735
736 faddr = (0x10000 - 0x100);
737 goto inside;
738 do {
739 faddr -= 0x100;
740 inside:
741 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
742 "sta %%g0, [%0 + %2] %1\n\t"
743 "sta %%g0, [%0 + %3] %1\n\t"
744 "sta %%g0, [%0 + %4] %1\n\t"
745 "sta %%g0, [%0 + %5] %1\n\t"
746 "sta %%g0, [%0 + %6] %1\n\t"
747 "sta %%g0, [%0 + %7] %1\n\t"
748 "sta %%g0, [%0 + %8] %1\n\t" : :
749 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
750 "r" (a), "r" (b), "r" (c), "r" (d),
751 "r" (e), "r" (f), "r" (g));
752 } while(faddr);
753 srmmu_set_context(octx);
754 local_irq_restore(flags);
755 FLUSH_END
756}
757
758static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
759{
760 struct mm_struct *mm = vma->vm_mm;
761 register unsigned long a, b, c, d, e, f, g;
762 unsigned long flags, faddr;
763 int octx;
764
765 FLUSH_BEGIN(mm)
766 flush_user_windows();
767 local_irq_save(flags);
768 octx = srmmu_get_context();
769 srmmu_set_context(mm->context);
770 a = 0x20; b = 0x40; c = 0x60;
771 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
772
773 start &= SRMMU_REAL_PMD_MASK;
774 while(start < end) {
775 faddr = (start + (0x10000 - 0x100));
776 goto inside;
777 do {
778 faddr -= 0x100;
779 inside:
780 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
781 "sta %%g0, [%0 + %2] %1\n\t"
782 "sta %%g0, [%0 + %3] %1\n\t"
783 "sta %%g0, [%0 + %4] %1\n\t"
784 "sta %%g0, [%0 + %5] %1\n\t"
785 "sta %%g0, [%0 + %6] %1\n\t"
786 "sta %%g0, [%0 + %7] %1\n\t"
787 "sta %%g0, [%0 + %8] %1\n\t" : :
788 "r" (faddr),
789 "i" (ASI_M_FLUSH_SEG),
790 "r" (a), "r" (b), "r" (c), "r" (d),
791 "r" (e), "r" (f), "r" (g));
792 } while (faddr != start);
793 start += SRMMU_REAL_PMD_SIZE;
794 }
795 srmmu_set_context(octx);
796 local_irq_restore(flags);
797 FLUSH_END
798}
799
800static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
801{
802 register unsigned long a, b, c, d, e, f, g;
803 struct mm_struct *mm = vma->vm_mm;
804 unsigned long flags, line;
805 int octx;
806
807 FLUSH_BEGIN(mm)
808 flush_user_windows();
809 local_irq_save(flags);
810 octx = srmmu_get_context();
811 srmmu_set_context(mm->context);
812 a = 0x20; b = 0x40; c = 0x60;
813 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
814
815 page &= PAGE_MASK;
816 line = (page + PAGE_SIZE) - 0x100;
817 goto inside;
818 do {
819 line -= 0x100;
820 inside:
821 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
822 "sta %%g0, [%0 + %2] %1\n\t"
823 "sta %%g0, [%0 + %3] %1\n\t"
824 "sta %%g0, [%0 + %4] %1\n\t"
825 "sta %%g0, [%0 + %5] %1\n\t"
826 "sta %%g0, [%0 + %6] %1\n\t"
827 "sta %%g0, [%0 + %7] %1\n\t"
828 "sta %%g0, [%0 + %8] %1\n\t" : :
829 "r" (line),
830 "i" (ASI_M_FLUSH_PAGE),
831 "r" (a), "r" (b), "r" (c), "r" (d),
832 "r" (e), "r" (f), "r" (g));
833 } while(line != page);
834 srmmu_set_context(octx);
835 local_irq_restore(flags);
836 FLUSH_END
837}
838
839/* Cypress is copy-back, at least that is how we configure it. */
840static void cypress_flush_page_to_ram(unsigned long page)
841{
842 register unsigned long a, b, c, d, e, f, g;
843 unsigned long line;
844
845 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
846 page &= PAGE_MASK;
847 line = (page + PAGE_SIZE) - 0x100;
848 goto inside;
849 do {
850 line -= 0x100;
851 inside:
852 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
853 "sta %%g0, [%0 + %2] %1\n\t"
854 "sta %%g0, [%0 + %3] %1\n\t"
855 "sta %%g0, [%0 + %4] %1\n\t"
856 "sta %%g0, [%0 + %5] %1\n\t"
857 "sta %%g0, [%0 + %6] %1\n\t"
858 "sta %%g0, [%0 + %7] %1\n\t"
859 "sta %%g0, [%0 + %8] %1\n\t" : :
860 "r" (line),
861 "i" (ASI_M_FLUSH_PAGE),
862 "r" (a), "r" (b), "r" (c), "r" (d),
863 "r" (e), "r" (f), "r" (g));
864 } while(line != page);
865}
866
867/* Cypress is also IO cache coherent. */
868static void cypress_flush_page_for_dma(unsigned long page)
869{
870}
871
872/* Cypress has unified L2 VIPT, from which both instructions and data
873 * are stored. It does not have an onboard icache of any sort, therefore
874 * no flush is necessary.
875 */
876static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
877{
878}
879
880static void cypress_flush_tlb_all(void)
881{
882 srmmu_flush_whole_tlb();
883}
884
885static void cypress_flush_tlb_mm(struct mm_struct *mm)
886{
887 FLUSH_BEGIN(mm)
888 __asm__ __volatile__(
889 "lda [%0] %3, %%g5\n\t"
890 "sta %2, [%0] %3\n\t"
891 "sta %%g0, [%1] %4\n\t"
892 "sta %%g5, [%0] %3\n"
893 : /* no outputs */
894 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
895 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
896 : "g5");
897 FLUSH_END
898}
899
900static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
901{
902 struct mm_struct *mm = vma->vm_mm;
903 unsigned long size;
904
905 FLUSH_BEGIN(mm)
906 start &= SRMMU_PGDIR_MASK;
907 size = SRMMU_PGDIR_ALIGN(end) - start;
908 __asm__ __volatile__(
909 "lda [%0] %5, %%g5\n\t"
910 "sta %1, [%0] %5\n"
911 "1:\n\t"
912 "subcc %3, %4, %3\n\t"
913 "bne 1b\n\t"
914 " sta %%g0, [%2 + %3] %6\n\t"
915 "sta %%g5, [%0] %5\n"
916 : /* no outputs */
917 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
918 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
919 "i" (ASI_M_FLUSH_PROBE)
920 : "g5", "cc");
921 FLUSH_END
922}
923
924static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
925{
926 struct mm_struct *mm = vma->vm_mm;
927
928 FLUSH_BEGIN(mm)
929 __asm__ __volatile__(
930 "lda [%0] %3, %%g5\n\t"
931 "sta %1, [%0] %3\n\t"
932 "sta %%g0, [%2] %4\n\t"
933 "sta %%g5, [%0] %3\n"
934 : /* no outputs */
935 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
936 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
937 : "g5");
938 FLUSH_END
939}
940
941/* viking.S */
942extern void viking_flush_cache_all(void);
943extern void viking_flush_cache_mm(struct mm_struct *mm);
944extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
945 unsigned long end);
946extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
947extern void viking_flush_page_to_ram(unsigned long page);
948extern void viking_flush_page_for_dma(unsigned long page);
949extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
950extern void viking_flush_page(unsigned long page);
951extern void viking_mxcc_flush_page(unsigned long page);
952extern void viking_flush_tlb_all(void);
953extern void viking_flush_tlb_mm(struct mm_struct *mm);
954extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
955 unsigned long end);
956extern void viking_flush_tlb_page(struct vm_area_struct *vma,
957 unsigned long page);
958extern void sun4dsmp_flush_tlb_all(void);
959extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
960extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
961 unsigned long end);
962extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
963 unsigned long page);
964
965/* hypersparc.S */
966extern void hypersparc_flush_cache_all(void);
967extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
968extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
969extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
970extern void hypersparc_flush_page_to_ram(unsigned long page);
971extern void hypersparc_flush_page_for_dma(unsigned long page);
972extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
973extern void hypersparc_flush_tlb_all(void);
974extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
975extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
976extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
977extern void hypersparc_setup_blockops(void);
978
979/*
980 * NOTE: All of this startup code assumes the low 16mb (approx.) of
981 * kernel mappings are done with one single contiguous chunk of
982 * ram. On small ram machines (classics mainly) we only get
983 * around 8mb mapped for us.
984 */
985
Adrian Bunk50215d62008-06-05 11:41:51 -0700986static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987{
988 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
989 prom_halt();
990}
991
Adrian Bunk50215d62008-06-05 11:41:51 -0700992static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
993 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994{
995 pgd_t *pgdp;
996 pmd_t *pmdp;
997 pte_t *ptep;
998
999 while(start < end) {
1000 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -07001001 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 pmdp = (pmd_t *) __srmmu_get_nocache(
1003 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1004 if (pmdp == NULL)
1005 early_pgtable_allocfail("pmd");
1006 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1007 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1008 }
1009 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1010 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1011 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
1012 if (ptep == NULL)
1013 early_pgtable_allocfail("pte");
1014 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1015 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1016 }
1017 if (start > (0xffffffffUL - PMD_SIZE))
1018 break;
1019 start = (start + PMD_SIZE) & PMD_MASK;
1020 }
1021}
1022
Adrian Bunk50215d62008-06-05 11:41:51 -07001023static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
1024 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025{
1026 pgd_t *pgdp;
1027 pmd_t *pmdp;
1028 pte_t *ptep;
1029
1030 while(start < end) {
1031 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -07001032 if (pgd_none(*pgdp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001033 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1034 if (pmdp == NULL)
1035 early_pgtable_allocfail("pmd");
1036 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
1037 srmmu_pgd_set(pgdp, pmdp);
1038 }
1039 pmdp = srmmu_pmd_offset(pgdp, start);
1040 if(srmmu_pmd_none(*pmdp)) {
1041 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1042 PTE_SIZE);
1043 if (ptep == NULL)
1044 early_pgtable_allocfail("pte");
1045 memset(ptep, 0, PTE_SIZE);
1046 srmmu_pmd_set(pmdp, ptep);
1047 }
1048 if (start > (0xffffffffUL - PMD_SIZE))
1049 break;
1050 start = (start + PMD_SIZE) & PMD_MASK;
1051 }
1052}
1053
1054/*
1055 * This is much cleaner than poking around physical address space
1056 * looking at the prom's page table directly which is what most
1057 * other OS's do. Yuck... this is much better.
1058 */
Adrian Bunk50215d62008-06-05 11:41:51 -07001059static void __init srmmu_inherit_prom_mappings(unsigned long start,
1060 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001061{
1062 pgd_t *pgdp;
1063 pmd_t *pmdp;
1064 pte_t *ptep;
1065 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1066 unsigned long prompte;
1067
1068 while(start <= end) {
1069 if (start == 0)
1070 break; /* probably wrap around */
1071 if(start == 0xfef00000)
1072 start = KADB_DEBUGGER_BEGVM;
1073 if(!(prompte = srmmu_hwprobe(start))) {
1074 start += PAGE_SIZE;
1075 continue;
1076 }
1077
1078 /* A red snapper, see what it really is. */
1079 what = 0;
1080
1081 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
1082 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
1083 what = 1;
1084 }
1085
1086 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1087 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1088 prompte)
1089 what = 2;
1090 }
1091
1092 pgdp = pgd_offset_k(start);
1093 if(what == 2) {
1094 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
1095 start += SRMMU_PGDIR_SIZE;
1096 continue;
1097 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -07001098 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001099 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1100 if (pmdp == NULL)
1101 early_pgtable_allocfail("pmd");
1102 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
1103 srmmu_pgd_set(__nocache_fix(pgdp), pmdp);
1104 }
1105 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1106 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1107 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1108 PTE_SIZE);
1109 if (ptep == NULL)
1110 early_pgtable_allocfail("pte");
1111 memset(__nocache_fix(ptep), 0, PTE_SIZE);
1112 srmmu_pmd_set(__nocache_fix(pmdp), ptep);
1113 }
1114 if(what == 1) {
1115 /*
1116 * We bend the rule where all 16 PTPs in a pmd_t point
1117 * inside the same PTE page, and we leak a perfectly
1118 * good hardware PTE piece. Alternatives seem worse.
1119 */
1120 unsigned int x; /* Index of HW PMD in soft cluster */
1121 x = (start >> PMD_SHIFT) & 15;
1122 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
1123 start += SRMMU_REAL_PMD_SIZE;
1124 continue;
1125 }
1126 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
1127 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1128 start += PAGE_SIZE;
1129 }
1130}
1131
1132#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1133
1134/* Create a third-level SRMMU 16MB page mapping. */
1135static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
1136{
1137 pgd_t *pgdp = pgd_offset_k(vaddr);
1138 unsigned long big_pte;
1139
1140 big_pte = KERNEL_PTE(phys_base >> 4);
1141 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
1142}
1143
1144/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
1145static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1146{
1147 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1148 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1149 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1150 /* Map "low" memory only */
1151 const unsigned long min_vaddr = PAGE_OFFSET;
1152 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
1153
1154 if (vstart < min_vaddr || vstart >= max_vaddr)
1155 return vstart;
1156
1157 if (vend > max_vaddr || vend < min_vaddr)
1158 vend = max_vaddr;
1159
1160 while(vstart < vend) {
1161 do_large_mapping(vstart, pstart);
1162 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1163 }
1164 return vstart;
1165}
1166
1167static inline void memprobe_error(char *msg)
1168{
1169 prom_printf(msg);
1170 prom_printf("Halting now...\n");
1171 prom_halt();
1172}
1173
1174static inline void map_kernel(void)
1175{
1176 int i;
1177
1178 if (phys_base > 0) {
1179 do_large_mapping(PAGE_OFFSET, phys_base);
1180 }
1181
1182 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1183 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1184 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001185}
1186
1187/* Paging initialization on the Sparc Reference MMU. */
1188extern void sparc_context_init(int);
1189
Al Viro409832f2008-11-22 17:33:54 +00001190void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191
1192extern unsigned long bootmem_init(unsigned long *pages_avail);
1193
1194void __init srmmu_paging_init(void)
1195{
Andres Salomon8d125562010-10-08 14:18:11 -07001196 int i;
1197 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001198 char node_str[128];
1199 pgd_t *pgd;
1200 pmd_t *pmd;
1201 pte_t *pte;
1202 unsigned long pages_avail;
1203
1204 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
1205
1206 if (sparc_cpu_model == sun4d)
1207 num_contexts = 65536; /* We know it is Viking */
1208 else {
1209 /* Find the number of contexts on the srmmu. */
1210 cpunode = prom_getchild(prom_root_node);
1211 num_contexts = 0;
1212 while(cpunode != 0) {
1213 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1214 if(!strcmp(node_str, "cpu")) {
1215 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1216 break;
1217 }
1218 cpunode = prom_getsibling(cpunode);
1219 }
1220 }
1221
1222 if(!num_contexts) {
1223 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1224 prom_halt();
1225 }
1226
1227 pages_avail = 0;
1228 last_valid_pfn = bootmem_init(&pages_avail);
1229
1230 srmmu_nocache_calcsize();
1231 srmmu_nocache_init();
1232 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1233 map_kernel();
1234
1235 /* ctx table has to be physically aligned to its size */
1236 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
1237 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
1238
1239 for(i = 0; i < num_contexts; i++)
1240 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
1241
1242 flush_cache_all();
1243 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -08001244#ifdef CONFIG_SMP
1245 /* Stop from hanging here... */
1246 local_flush_tlb_all();
1247#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -08001249#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 poke_srmmu();
1251
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
1253 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254
1255 srmmu_allocate_ptable_skeleton(
1256 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
1257 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1258
1259 pgd = pgd_offset_k(PKMAP_BASE);
1260 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
1261 pte = srmmu_pte_offset(pmd, PKMAP_BASE);
1262 pkmap_page_table = pte;
1263
1264 flush_cache_all();
1265 flush_tlb_all();
1266
1267 sparc_context_init(num_contexts);
1268
1269 kmap_init();
1270
1271 {
1272 unsigned long zones_size[MAX_NR_ZONES];
1273 unsigned long zholes_size[MAX_NR_ZONES];
1274 unsigned long npages;
1275 int znum;
1276
1277 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1278 zones_size[znum] = zholes_size[znum] = 0;
1279
1280 npages = max_low_pfn - pfn_base;
1281
1282 zones_size[ZONE_DMA] = npages;
1283 zholes_size[ZONE_DMA] = npages - pages_avail;
1284
1285 npages = highend_pfn - max_low_pfn;
1286 zones_size[ZONE_HIGHMEM] = npages;
1287 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1288
Johannes Weiner9109fb72008-07-23 21:27:20 -07001289 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001290 }
1291}
1292
1293static void srmmu_mmu_info(struct seq_file *m)
1294{
1295 seq_printf(m,
1296 "MMU type\t: %s\n"
1297 "contexts\t: %d\n"
1298 "nocache total\t: %ld\n"
1299 "nocache used\t: %d\n",
1300 srmmu_name,
1301 num_contexts,
1302 srmmu_nocache_size,
1303 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1304}
1305
1306static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1307{
1308}
1309
1310static void srmmu_destroy_context(struct mm_struct *mm)
1311{
1312
1313 if(mm->context != NO_CONTEXT) {
1314 flush_cache_mm(mm);
1315 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1316 flush_tlb_mm(mm);
1317 spin_lock(&srmmu_context_spinlock);
1318 free_context(mm->context);
1319 spin_unlock(&srmmu_context_spinlock);
1320 mm->context = NO_CONTEXT;
1321 }
1322}
1323
1324/* Init various srmmu chip types. */
1325static void __init srmmu_is_bad(void)
1326{
1327 prom_printf("Could not determine SRMMU chip type.\n");
1328 prom_halt();
1329}
1330
1331static void __init init_vac_layout(void)
1332{
Andres Salomon8d125562010-10-08 14:18:11 -07001333 phandle nd;
1334 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001335 char node_str[128];
1336#ifdef CONFIG_SMP
1337 int cpu = 0;
1338 unsigned long max_size = 0;
1339 unsigned long min_line_size = 0x10000000;
1340#endif
1341
1342 nd = prom_getchild(prom_root_node);
1343 while((nd = prom_getsibling(nd)) != 0) {
1344 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1345 if(!strcmp(node_str, "cpu")) {
1346 vac_line_size = prom_getint(nd, "cache-line-size");
1347 if (vac_line_size == -1) {
1348 prom_printf("can't determine cache-line-size, "
1349 "halting.\n");
1350 prom_halt();
1351 }
1352 cache_lines = prom_getint(nd, "cache-nlines");
1353 if (cache_lines == -1) {
1354 prom_printf("can't determine cache-nlines, halting.\n");
1355 prom_halt();
1356 }
1357
1358 vac_cache_size = cache_lines * vac_line_size;
1359#ifdef CONFIG_SMP
1360 if(vac_cache_size > max_size)
1361 max_size = vac_cache_size;
1362 if(vac_line_size < min_line_size)
1363 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -08001364 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +10301366 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 break;
1368#else
1369 break;
1370#endif
1371 }
1372 }
1373 if(nd == 0) {
1374 prom_printf("No CPU nodes found, halting.\n");
1375 prom_halt();
1376 }
1377#ifdef CONFIG_SMP
1378 vac_cache_size = max_size;
1379 vac_line_size = min_line_size;
1380#endif
1381 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1382 (int)vac_cache_size, (int)vac_line_size);
1383}
1384
Al Viro409832f2008-11-22 17:33:54 +00001385static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001386{
1387 volatile unsigned long clear;
1388 unsigned long mreg = srmmu_get_mmureg();
1389
1390 hyper_flush_unconditional_combined();
1391
1392 mreg &= ~(HYPERSPARC_CWENABLE);
1393 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1394 mreg |= (HYPERSPARC_CMODE);
1395
1396 srmmu_set_mmureg(mreg);
1397
1398#if 0 /* XXX I think this is bad news... -DaveM */
1399 hyper_clear_all_tags();
1400#endif
1401
1402 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1403 hyper_flush_whole_icache();
1404 clear = srmmu_get_faddr();
1405 clear = srmmu_get_fstatus();
1406}
1407
1408static void __init init_hypersparc(void)
1409{
1410 srmmu_name = "ROSS HyperSparc";
1411 srmmu_modtype = HyperSparc;
1412
1413 init_vac_layout();
1414
1415 is_hypersparc = 1;
1416
Linus Torvalds1da177e2005-04-16 15:20:36 -07001417 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1418 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1419 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1420 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1421
1422 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1423 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1424 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1425 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1426
1427 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1428 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1429 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1430
1431
1432 poke_srmmu = poke_hypersparc;
1433
1434 hypersparc_setup_blockops();
1435}
1436
Al Viro409832f2008-11-22 17:33:54 +00001437static void __cpuinit poke_cypress(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 unsigned long mreg = srmmu_get_mmureg();
1440 unsigned long faddr, tagval;
1441 volatile unsigned long cypress_sucks;
1442 volatile unsigned long clear;
1443
1444 clear = srmmu_get_faddr();
1445 clear = srmmu_get_fstatus();
1446
1447 if (!(mreg & CYPRESS_CENABLE)) {
1448 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1449 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1450 "sta %%g0, [%0] %2\n\t" : :
1451 "r" (faddr), "r" (0x40000),
1452 "i" (ASI_M_DATAC_TAG));
1453 }
1454 } else {
1455 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1456 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1457 "=r" (tagval) :
1458 "r" (faddr), "r" (0x40000),
1459 "i" (ASI_M_DATAC_TAG));
1460
1461 /* If modified and valid, kick it. */
1462 if((tagval & 0x60) == 0x60)
1463 cypress_sucks = *(unsigned long *)
1464 (0xf0020000 + faddr);
1465 }
1466 }
1467
1468 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1469 clear = srmmu_get_faddr();
1470 clear = srmmu_get_fstatus();
1471
1472 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1473 srmmu_set_mmureg(mreg);
1474}
1475
1476static void __init init_cypress_common(void)
1477{
1478 init_vac_layout();
1479
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1481 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1482 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1483 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1484
1485 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1486 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1487 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1488 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1489
1490
1491 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1492 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1493 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1494
1495 poke_srmmu = poke_cypress;
1496}
1497
1498static void __init init_cypress_604(void)
1499{
1500 srmmu_name = "ROSS Cypress-604(UP)";
1501 srmmu_modtype = Cypress;
1502 init_cypress_common();
1503}
1504
1505static void __init init_cypress_605(unsigned long mrev)
1506{
1507 srmmu_name = "ROSS Cypress-605(MP)";
1508 if(mrev == 0xe) {
1509 srmmu_modtype = Cypress_vE;
1510 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1511 } else {
1512 if(mrev == 0xd) {
1513 srmmu_modtype = Cypress_vD;
1514 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1515 } else {
1516 srmmu_modtype = Cypress;
1517 }
1518 }
1519 init_cypress_common();
1520}
1521
Al Viro409832f2008-11-22 17:33:54 +00001522static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523{
1524 unsigned long mreg;
1525
1526 /* Clear any crap from the cache or else... */
1527 swift_flush_cache_all();
1528
1529 /* Enable I & D caches */
1530 mreg = srmmu_get_mmureg();
1531 mreg |= (SWIFT_IE | SWIFT_DE);
1532 /*
1533 * The Swift branch folding logic is completely broken. At
1534 * trap time, if things are just right, if can mistakenly
1535 * think that a trap is coming from kernel mode when in fact
1536 * it is coming from user mode (it mis-executes the branch in
1537 * the trap code). So you see things like crashme completely
1538 * hosing your machine which is completely unacceptable. Turn
1539 * this shit off... nice job Fujitsu.
1540 */
1541 mreg &= ~(SWIFT_BF);
1542 srmmu_set_mmureg(mreg);
1543}
1544
1545#define SWIFT_MASKID_ADDR 0x10003018
1546static void __init init_swift(void)
1547{
1548 unsigned long swift_rev;
1549
1550 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1551 "srl %0, 0x18, %0\n\t" :
1552 "=r" (swift_rev) :
1553 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1554 srmmu_name = "Fujitsu Swift";
1555 switch(swift_rev) {
1556 case 0x11:
1557 case 0x20:
1558 case 0x23:
1559 case 0x30:
1560 srmmu_modtype = Swift_lots_o_bugs;
1561 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1562 /*
1563 * Gee george, I wonder why Sun is so hush hush about
1564 * this hardware bug... really braindamage stuff going
1565 * on here. However I think we can find a way to avoid
1566 * all of the workaround overhead under Linux. Basically,
1567 * any page fault can cause kernel pages to become user
1568 * accessible (the mmu gets confused and clears some of
1569 * the ACC bits in kernel ptes). Aha, sounds pretty
1570 * horrible eh? But wait, after extensive testing it appears
1571 * that if you use pgd_t level large kernel pte's (like the
1572 * 4MB pages on the Pentium) the bug does not get tripped
1573 * at all. This avoids almost all of the major overhead.
1574 * Welcome to a world where your vendor tells you to,
1575 * "apply this kernel patch" instead of "sorry for the
1576 * broken hardware, send it back and we'll give you
1577 * properly functioning parts"
1578 */
1579 break;
1580 case 0x25:
1581 case 0x31:
1582 srmmu_modtype = Swift_bad_c;
1583 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1584 /*
1585 * You see Sun allude to this hardware bug but never
1586 * admit things directly, they'll say things like,
1587 * "the Swift chip cache problems" or similar.
1588 */
1589 break;
1590 default:
1591 srmmu_modtype = Swift_ok;
1592 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001593 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001594
1595 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1596 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1597 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1598 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1599
1600
1601 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1602 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1603 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1604 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1605
1606 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1607 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1608 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1609
1610 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1611
1612 flush_page_for_dma_global = 0;
1613
1614 /*
1615 * Are you now convinced that the Swift is one of the
1616 * biggest VLSI abortions of all time? Bravo Fujitsu!
1617 * Fujitsu, the !#?!%$'d up processor people. I bet if
1618 * you examined the microcode of the Swift you'd find
1619 * XXX's all over the place.
1620 */
1621 poke_srmmu = poke_swift;
1622}
1623
1624static void turbosparc_flush_cache_all(void)
1625{
1626 flush_user_windows();
1627 turbosparc_idflash_clear();
1628}
1629
1630static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1631{
1632 FLUSH_BEGIN(mm)
1633 flush_user_windows();
1634 turbosparc_idflash_clear();
1635 FLUSH_END
1636}
1637
1638static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1639{
1640 FLUSH_BEGIN(vma->vm_mm)
1641 flush_user_windows();
1642 turbosparc_idflash_clear();
1643 FLUSH_END
1644}
1645
1646static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1647{
1648 FLUSH_BEGIN(vma->vm_mm)
1649 flush_user_windows();
1650 if (vma->vm_flags & VM_EXEC)
1651 turbosparc_flush_icache();
1652 turbosparc_flush_dcache();
1653 FLUSH_END
1654}
1655
1656/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1657static void turbosparc_flush_page_to_ram(unsigned long page)
1658{
1659#ifdef TURBOSPARC_WRITEBACK
1660 volatile unsigned long clear;
1661
1662 if (srmmu_hwprobe(page))
1663 turbosparc_flush_page_cache(page);
1664 clear = srmmu_get_fstatus();
1665#endif
1666}
1667
1668static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1669{
1670}
1671
1672static void turbosparc_flush_page_for_dma(unsigned long page)
1673{
1674 turbosparc_flush_dcache();
1675}
1676
1677static void turbosparc_flush_tlb_all(void)
1678{
1679 srmmu_flush_whole_tlb();
1680}
1681
1682static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1683{
1684 FLUSH_BEGIN(mm)
1685 srmmu_flush_whole_tlb();
1686 FLUSH_END
1687}
1688
1689static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1690{
1691 FLUSH_BEGIN(vma->vm_mm)
1692 srmmu_flush_whole_tlb();
1693 FLUSH_END
1694}
1695
1696static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1697{
1698 FLUSH_BEGIN(vma->vm_mm)
1699 srmmu_flush_whole_tlb();
1700 FLUSH_END
1701}
1702
1703
Al Viro409832f2008-11-22 17:33:54 +00001704static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 unsigned long mreg = srmmu_get_mmureg();
1707 unsigned long ccreg;
1708
1709 /* Clear any crap from the cache or else... */
1710 turbosparc_flush_cache_all();
1711 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1712 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1713 srmmu_set_mmureg(mreg);
1714
1715 ccreg = turbosparc_get_ccreg();
1716
1717#ifdef TURBOSPARC_WRITEBACK
1718 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1719 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1720 /* Write-back D-cache, emulate VLSI
1721 * abortion number three, not number one */
1722#else
1723 /* For now let's play safe, optimize later */
1724 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1725 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1726 ccreg &= ~(TURBOSPARC_uS2);
1727 /* Emulate VLSI abortion number three, not number one */
1728#endif
1729
1730 switch (ccreg & 7) {
1731 case 0: /* No SE cache */
1732 case 7: /* Test mode */
1733 break;
1734 default:
1735 ccreg |= (TURBOSPARC_SCENABLE);
1736 }
1737 turbosparc_set_ccreg (ccreg);
1738
1739 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1740 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1741 srmmu_set_mmureg(mreg);
1742}
1743
1744static void __init init_turbosparc(void)
1745{
1746 srmmu_name = "Fujitsu TurboSparc";
1747 srmmu_modtype = TurboSparc;
1748
1749 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1750 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1751 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1752 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1753
1754 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1755 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1756 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1757 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1758
1759 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1760
1761 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1762 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1763
1764 poke_srmmu = poke_turbosparc;
1765}
1766
Al Viro409832f2008-11-22 17:33:54 +00001767static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001768{
1769 unsigned long mreg = srmmu_get_mmureg();
1770
1771 tsunami_flush_icache();
1772 tsunami_flush_dcache();
1773 mreg &= ~TSUNAMI_ITD;
1774 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1775 srmmu_set_mmureg(mreg);
1776}
1777
1778static void __init init_tsunami(void)
1779{
1780 /*
1781 * Tsunami's pretty sane, Sun and TI actually got it
1782 * somewhat right this time. Fujitsu should have
1783 * taken some lessons from them.
1784 */
1785
1786 srmmu_name = "TI Tsunami";
1787 srmmu_modtype = Tsunami;
1788
1789 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1790 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1791 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1792 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1793
1794
1795 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1796 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1797 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1798 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1799
1800 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1801 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1802 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1803
1804 poke_srmmu = poke_tsunami;
1805
1806 tsunami_setup_blockops();
1807}
1808
Al Viro409832f2008-11-22 17:33:54 +00001809static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810{
1811 unsigned long mreg = srmmu_get_mmureg();
1812 static int smp_catch;
1813
1814 if(viking_mxcc_present) {
1815 unsigned long mxcc_control = mxcc_get_creg();
1816
1817 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1818 mxcc_control &= ~(MXCC_CTL_RRC);
1819 mxcc_set_creg(mxcc_control);
1820
1821 /*
1822 * We don't need memory parity checks.
1823 * XXX This is a mess, have to dig out later. ecd.
1824 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1825 */
1826
1827 /* We do cache ptables on MXCC. */
1828 mreg |= VIKING_TCENABLE;
1829 } else {
1830 unsigned long bpreg;
1831
1832 mreg &= ~(VIKING_TCENABLE);
1833 if(smp_catch++) {
1834 /* Must disable mixed-cmd mode here for other cpu's. */
1835 bpreg = viking_get_bpreg();
1836 bpreg &= ~(VIKING_ACTION_MIX);
1837 viking_set_bpreg(bpreg);
1838
1839 /* Just in case PROM does something funny. */
1840 msi_set_sync();
1841 }
1842 }
1843
1844 mreg |= VIKING_SPENABLE;
1845 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1846 mreg |= VIKING_SBENABLE;
1847 mreg &= ~(VIKING_ACENABLE);
1848 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849}
1850
1851static void __init init_viking(void)
1852{
1853 unsigned long mreg = srmmu_get_mmureg();
1854
1855 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1856 if(mreg & VIKING_MMODE) {
1857 srmmu_name = "TI Viking";
1858 viking_mxcc_present = 0;
1859 msi_set_sync();
1860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861 /*
1862 * We need this to make sure old viking takes no hits
1863 * on it's cache for dma snoops to workaround the
1864 * "load from non-cacheable memory" interrupt bug.
1865 * This is only necessary because of the new way in
1866 * which we use the IOMMU.
1867 */
1868 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1869
1870 flush_page_for_dma_global = 0;
1871 } else {
1872 srmmu_name = "TI Viking/MXCC";
1873 viking_mxcc_present = 1;
1874
1875 srmmu_cache_pagetables = 1;
1876
1877 /* MXCC vikings lack the DMA snooping bug. */
1878 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1879 }
1880
1881 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1882 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1883 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1884 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1885
1886#ifdef CONFIG_SMP
1887 if (sparc_cpu_model == sun4d) {
1888 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1889 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1890 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1891 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1892 } else
1893#endif
1894 {
1895 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1896 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1897 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1898 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1899 }
1900
1901 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1902 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1903
1904 poke_srmmu = poke_viking;
1905}
1906
Konrad Eisele75d9e342009-08-17 00:13:33 +00001907#ifdef CONFIG_SPARC_LEON
1908
1909void __init poke_leonsparc(void)
1910{
1911}
1912
1913void __init init_leon(void)
1914{
1915
Kristoffer Glemboc803ba92009-12-02 04:30:22 +00001916 srmmu_name = "LEON";
Konrad Eisele75d9e342009-08-17 00:13:33 +00001917
1918 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1919 BTFIXUPCALL_NORM);
1920 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1921 BTFIXUPCALL_NORM);
1922 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
1923 BTFIXUPCALL_NORM);
1924 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
1925 BTFIXUPCALL_NORM);
1926 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
1927 BTFIXUPCALL_NORM);
1928
1929 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1930 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1931 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1932 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1933
1934 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
1935 BTFIXUPCALL_NOP);
1936 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
1937
1938 poke_srmmu = poke_leonsparc;
1939
1940 srmmu_cache_pagetables = 0;
1941
1942 leon_flush_during_switch = leon_flush_needed();
1943}
1944#endif
1945
Linus Torvalds1da177e2005-04-16 15:20:36 -07001946/* Probe for the srmmu chip version. */
1947static void __init get_srmmu_type(void)
1948{
1949 unsigned long mreg, psr;
1950 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1951
1952 srmmu_modtype = SRMMU_INVAL_MOD;
1953 hwbug_bitmask = 0;
1954
1955 mreg = srmmu_get_mmureg(); psr = get_psr();
1956 mod_typ = (mreg & 0xf0000000) >> 28;
1957 mod_rev = (mreg & 0x0f000000) >> 24;
1958 psr_typ = (psr >> 28) & 0xf;
1959 psr_vers = (psr >> 24) & 0xf;
1960
Konrad Eisele75d9e342009-08-17 00:13:33 +00001961 /* First, check for sparc-leon. */
1962 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001963 init_leon();
1964 return;
1965 }
1966
1967 /* Second, check for HyperSparc or Cypress. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968 if(mod_typ == 1) {
1969 switch(mod_rev) {
1970 case 7:
1971 /* UP or MP Hypersparc */
1972 init_hypersparc();
1973 break;
1974 case 0:
1975 case 2:
1976 /* Uniprocessor Cypress */
1977 init_cypress_604();
1978 break;
1979 case 10:
1980 case 11:
1981 case 12:
1982 /* _REALLY OLD_ Cypress MP chips... */
1983 case 13:
1984 case 14:
1985 case 15:
1986 /* MP Cypress mmu/cache-controller */
1987 init_cypress_605(mod_rev);
1988 break;
1989 default:
1990 /* Some other Cypress revision, assume a 605. */
1991 init_cypress_605(mod_rev);
1992 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001993 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994 return;
1995 }
1996
1997 /*
1998 * Now Fujitsu TurboSparc. It might happen that it is
1999 * in Swift emulation mode, so we will check later...
2000 */
2001 if (psr_typ == 0 && psr_vers == 5) {
2002 init_turbosparc();
2003 return;
2004 }
2005
2006 /* Next check for Fujitsu Swift. */
2007 if(psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07002008 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002009 char node_str[128];
2010
2011 /* Look if it is not a TurboSparc emulating Swift... */
2012 cpunode = prom_getchild(prom_root_node);
2013 while((cpunode = prom_getsibling(cpunode)) != 0) {
2014 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
2015 if(!strcmp(node_str, "cpu")) {
2016 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
2017 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
2018 init_turbosparc();
2019 return;
2020 }
2021 break;
2022 }
2023 }
2024
2025 init_swift();
2026 return;
2027 }
2028
2029 /* Now the Viking family of srmmu. */
2030 if(psr_typ == 4 &&
2031 ((psr_vers == 0) ||
2032 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
2033 init_viking();
2034 return;
2035 }
2036
2037 /* Finally the Tsunami. */
2038 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
2039 init_tsunami();
2040 return;
2041 }
2042
2043 /* Oh well */
2044 srmmu_is_bad();
2045}
2046
Linus Torvalds1da177e2005-04-16 15:20:36 -07002047extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
2048 tsetup_mmu_patchme, rtrap_mmu_patchme;
2049
2050extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
2051 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
2052
Linus Torvalds1da177e2005-04-16 15:20:36 -07002053#ifdef CONFIG_SMP
2054/* Local cross-calls. */
2055static void smp_flush_page_for_dma(unsigned long page)
2056{
2057 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2058 local_flush_page_for_dma(page);
2059}
2060
2061#endif
2062
Linus Torvalds1da177e2005-04-16 15:20:36 -07002063/* Load up routines and constants for sun4m and sun4d mmu */
2064void __init ld_mmu_srmmu(void)
2065{
2066 extern void ld_mmu_iommu(void);
2067 extern void ld_mmu_iounit(void);
2068 extern void ___xchg32_sun4md(void);
2069
Linus Torvalds1da177e2005-04-16 15:20:36 -07002070 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071#ifndef CONFIG_SMP
2072 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2073#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002074
2075 BTFIXUPSET_CALL(set_pte, srmmu_set_pte, BTFIXUPCALL_SWAPO0O1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076
Dave McCracken46a82b22006-09-25 23:31:48 -07002077 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 BTFIXUPSET_CALL(pte_present, srmmu_pte_present, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002080
2081 BTFIXUPSET_CALL(pmd_bad, srmmu_pmd_bad, BTFIXUPCALL_NORM);
2082 BTFIXUPSET_CALL(pmd_present, srmmu_pmd_present, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002083
Linus Torvalds1da177e2005-04-16 15:20:36 -07002084 BTFIXUPSET_CALL(mk_pte, srmmu_mk_pte, BTFIXUPCALL_NORM);
2085 BTFIXUPSET_CALL(mk_pte_phys, srmmu_mk_pte_phys, BTFIXUPCALL_NORM);
2086 BTFIXUPSET_CALL(mk_pte_io, srmmu_mk_pte_io, BTFIXUPCALL_NORM);
2087 BTFIXUPSET_CALL(pgd_set, srmmu_pgd_set, BTFIXUPCALL_NORM);
2088 BTFIXUPSET_CALL(pmd_set, srmmu_pmd_set, BTFIXUPCALL_NORM);
2089 BTFIXUPSET_CALL(pmd_populate, srmmu_pmd_populate, BTFIXUPCALL_NORM);
2090
2091 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2092 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2093 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2094
2095 BTFIXUPSET_CALL(free_pte_fast, srmmu_free_pte_fast, BTFIXUPCALL_NORM);
2096 BTFIXUPSET_CALL(pte_free, srmmu_pte_free, BTFIXUPCALL_NORM);
2097 BTFIXUPSET_CALL(pte_alloc_one_kernel, srmmu_pte_alloc_one_kernel, BTFIXUPCALL_NORM);
2098 BTFIXUPSET_CALL(pte_alloc_one, srmmu_pte_alloc_one, BTFIXUPCALL_NORM);
2099 BTFIXUPSET_CALL(free_pmd_fast, srmmu_pmd_free, BTFIXUPCALL_NORM);
2100 BTFIXUPSET_CALL(pmd_alloc_one, srmmu_pmd_alloc_one, BTFIXUPCALL_NORM);
2101 BTFIXUPSET_CALL(free_pgd_fast, srmmu_free_pgd_fast, BTFIXUPCALL_NORM);
2102 BTFIXUPSET_CALL(get_pgd_fast, srmmu_get_pgd_fast, BTFIXUPCALL_NORM);
2103
2104 BTFIXUPSET_HALF(pte_writei, SRMMU_WRITE);
2105 BTFIXUPSET_HALF(pte_dirtyi, SRMMU_DIRTY);
2106 BTFIXUPSET_HALF(pte_youngi, SRMMU_REF);
2107 BTFIXUPSET_HALF(pte_filei, SRMMU_FILE);
2108 BTFIXUPSET_HALF(pte_wrprotecti, SRMMU_WRITE);
2109 BTFIXUPSET_HALF(pte_mkcleani, SRMMU_DIRTY);
2110 BTFIXUPSET_HALF(pte_mkoldi, SRMMU_REF);
2111 BTFIXUPSET_CALL(pte_mkwrite, srmmu_pte_mkwrite, BTFIXUPCALL_ORINT(SRMMU_WRITE));
2112 BTFIXUPSET_CALL(pte_mkdirty, srmmu_pte_mkdirty, BTFIXUPCALL_ORINT(SRMMU_DIRTY));
2113 BTFIXUPSET_CALL(pte_mkyoung, srmmu_pte_mkyoung, BTFIXUPCALL_ORINT(SRMMU_REF));
2114 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2115 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2116
2117 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2118 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2119
2120 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2121 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2122 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2123
2124 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2125
Linus Torvalds1da177e2005-04-16 15:20:36 -07002126 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127
2128#ifdef CONFIG_SMP
2129 /* El switcheroo... */
2130
2131 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
2132 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
2133 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
2134 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
2135 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
2136 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2137 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2138 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2139 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2140 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2141 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2142
2143 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2144 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2145 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2146 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
Konrad Eisele84017072009-08-31 22:08:13 +00002147 if (sparc_cpu_model != sun4d &&
2148 sparc_cpu_model != sparc_leon) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002149 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2150 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2151 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2152 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2153 }
2154 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2155 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2156 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
David S. Miller64273d02008-11-26 01:00:58 -08002157
2158 if (poke_srmmu == poke_viking) {
2159 /* Avoid unnecessary cross calls. */
2160 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2161 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2162 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2163 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2164 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
2165 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2166 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2167 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002168#endif
2169
2170 if (sparc_cpu_model == sun4d)
2171 ld_mmu_iounit();
2172 else
2173 ld_mmu_iommu();
2174#ifdef CONFIG_SMP
2175 if (sparc_cpu_model == sun4d)
2176 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00002177 else if (sparc_cpu_model == sparc_leon)
2178 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002179 else
2180 sun4m_init_smp();
2181#endif
2182}