blob: 32cec268c2c13d5d344fc9e9ae96748f695e1cc1 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/vmalloc.h>
14#include <linux/pagemap.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/bootmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070020#include <linux/kdebug.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000021#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/bitext.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/asi.h>
36#include <asm/msi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/mmu_context.h>
38#include <asm/io-unit.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Now the cpu specific definitions. */
43#include <asm/viking.h>
44#include <asm/mxcc.h>
45#include <asm/ross.h>
46#include <asm/tsunami.h>
47#include <asm/swift.h>
48#include <asm/turbosparc.h>
Konrad Eisele75d9e342009-08-17 00:13:33 +000049#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <asm/btfixup.h>
52
53enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
Sam Ravnborga3c5c662012-05-12 20:35:52 +020058struct ctx_list *ctx_list_pool;
59struct ctx_list ctx_free;
60struct ctx_list ctx_used;
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern struct resource sparc_iomap;
63
64extern unsigned long last_valid_pfn;
65
Adrian Bunk50215d62008-06-05 11:41:51 -070066static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#ifdef CONFIG_SMP
69#define FLUSH_BEGIN(mm)
70#define FLUSH_END
71#else
72#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
73#define FLUSH_END }
74#endif
75
76BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
77#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78
79int flush_page_for_dma_global = 1;
80
81#ifdef CONFIG_SMP
82BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84#endif
85
86char *srmmu_name;
87
88ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070089static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91int viking_mxcc_present;
92static DEFINE_SPINLOCK(srmmu_context_spinlock);
93
Adrian Bunk50215d62008-06-05 11:41:51 -070094static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Adrian Bunk50215d62008-06-05 11:41:51 -070096static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070099static unsigned long srmmu_nocache_size;
100static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
103#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
104
105/* The context table is a nocache user with the biggest alignment needs. */
106#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
107
108void *srmmu_nocache_pool;
109void *srmmu_nocache_bitmap;
110static struct bit_map srmmu_nocache_map;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static inline int srmmu_pte_none(pte_t pte)
113{ return !(pte_val(pte) & 0xFFFFFFF); }
114
Linus Torvalds1da177e2005-04-16 15:20:36 -0700115static inline int srmmu_pmd_none(pmd_t pmd)
116{ return !(pmd_val(pmd) & 0xFFFFFFF); }
117
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118static inline pte_t srmmu_pte_wrprotect(pte_t pte)
119{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
120
121static inline pte_t srmmu_pte_mkclean(pte_t pte)
122{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
123
124static inline pte_t srmmu_pte_mkold(pte_t pte)
125{ return __pte(pte_val(pte) & ~SRMMU_REF);}
126
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127/* XXX should we hyper_flush_whole_icache here - Anton */
128static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
David S. Miller62875cf2012-05-12 13:39:23 -0700129{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200131void pmd_set(pmd_t *pmdp, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700132{
133 unsigned long ptp; /* Physical address, shifted right by 4 */
134 int i;
135
136 ptp = __nocache_pa((unsigned long) ptep) >> 4;
137 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700138 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700139 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
140 }
141}
142
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200143void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144{
145 unsigned long ptp; /* Physical address, shifted right by 4 */
146 int i;
147
148 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
149 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700150 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
152 }
153}
154
155static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
156{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
157
158/* to find an entry in a top-level page table... */
Adrian Bunk31156242005-10-03 17:37:02 -0700159static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
161
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162/* Find an entry in the third-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200163pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{
165 void *pte;
166
167 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
168 return (pte_t *) pte +
169 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
170}
171
Linus Torvalds1da177e2005-04-16 15:20:36 -0700172/*
173 * size: bytes to allocate in the nocache area.
174 * align: bytes, number to align at.
175 * Returns the virtual address of the allocated area.
176 */
177static unsigned long __srmmu_get_nocache(int size, int align)
178{
179 int offset;
180
181 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
182 printk("Size 0x%x too small for nocache request\n", size);
183 size = SRMMU_NOCACHE_BITMAP_SHIFT;
184 }
185 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
186 printk("Size 0x%x unaligned int nocache request\n", size);
187 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
188 }
189 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
190
191 offset = bit_map_string_get(&srmmu_nocache_map,
192 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
193 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
194 if (offset == -1) {
195 printk("srmmu: out of nocache %d: %d/%d\n",
196 size, (int) srmmu_nocache_size,
197 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
198 return 0;
199 }
200
201 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
202}
203
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200204unsigned long srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700205{
206 unsigned long tmp;
207
208 tmp = __srmmu_get_nocache(size, align);
209
210 if (tmp)
211 memset((void *)tmp, 0, size);
212
213 return tmp;
214}
215
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200216void srmmu_free_nocache(unsigned long vaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700217{
218 int offset;
219
220 if (vaddr < SRMMU_NOCACHE_VADDR) {
221 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
222 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
223 BUG();
224 }
225 if (vaddr+size > srmmu_nocache_end) {
226 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
227 vaddr, srmmu_nocache_end);
228 BUG();
229 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000230 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231 printk("Size 0x%x is not a power of 2\n", size);
232 BUG();
233 }
234 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
235 printk("Size 0x%x is too small\n", size);
236 BUG();
237 }
238 if (vaddr & (size-1)) {
239 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
240 BUG();
241 }
242
243 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
244 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
245
246 bit_map_clear(&srmmu_nocache_map, offset, size);
247}
248
Adrian Bunk50215d62008-06-05 11:41:51 -0700249static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
250 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700251
252extern unsigned long probe_memory(void); /* in fault.c */
253
254/*
255 * Reserve nocache dynamically proportionally to the amount of
256 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
257 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700258static void srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259{
260 unsigned long sysmemavail = probe_memory() / 1024;
261 int srmmu_nocache_npages;
262
263 srmmu_nocache_npages =
264 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
265
266 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
267 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
268 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
269 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
270
271 /* anything above 1280 blows up */
272 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
273 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
274
275 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
276 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
277}
278
Adrian Bunk50215d62008-06-05 11:41:51 -0700279static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700280{
281 unsigned int bitmap_bits;
282 pgd_t *pgd;
283 pmd_t *pmd;
284 pte_t *pte;
285 unsigned long paddr, vaddr;
286 unsigned long pteval;
287
288 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
289
290 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
291 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
292 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
293
294 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
295 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
296
297 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
298 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
299 init_mm.pgd = srmmu_swapper_pg_dir;
300
301 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
302
303 paddr = __pa((unsigned long)srmmu_nocache_pool);
304 vaddr = SRMMU_NOCACHE_VADDR;
305
306 while (vaddr < srmmu_nocache_end) {
307 pgd = pgd_offset_k(vaddr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200308 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
309 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310
311 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
312
313 if (srmmu_cache_pagetables)
314 pteval |= SRMMU_CACHE;
315
David S. Miller62875cf2012-05-12 13:39:23 -0700316 set_pte(__nocache_fix(pte), __pte(pteval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700317
318 vaddr += PAGE_SIZE;
319 paddr += PAGE_SIZE;
320 }
321
322 flush_cache_all();
323 flush_tlb_all();
324}
325
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200326pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327{
328 pgd_t *pgd = NULL;
329
330 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
331 if (pgd) {
332 pgd_t *init = pgd_offset_k(0);
333 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
334 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
335 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
336 }
337
338 return pgd;
339}
340
Linus Torvalds1da177e2005-04-16 15:20:36 -0700341/*
342 * Hardware needs alignment to 256 only, but we align to whole page size
343 * to reduce fragmentation problems due to the buddy principle.
344 * XXX Provide actual fragmentation statistics in /proc.
345 *
346 * Alignments up to the page size are the same for physical and virtual
347 * addresses of the nocache area.
348 */
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200349pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700350{
351 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800352 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200354 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800356 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
357 pgtable_page_ctor(page);
358 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359}
360
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200361void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362{
363 unsigned long p;
364
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800365 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 p = (unsigned long)page_address(pte); /* Cached address (for test) */
367 if (p == 0)
368 BUG();
369 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
370 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
371 srmmu_free_nocache(p, PTE_SIZE);
372}
373
374/*
375 */
376static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
377{
378 struct ctx_list *ctxp;
379
380 ctxp = ctx_free.next;
381 if(ctxp != &ctx_free) {
382 remove_from_ctx_list(ctxp);
383 add_to_used_ctxlist(ctxp);
384 mm->context = ctxp->ctx_number;
385 ctxp->ctx_mm = mm;
386 return;
387 }
388 ctxp = ctx_used.next;
389 if(ctxp->ctx_mm == old_mm)
390 ctxp = ctxp->next;
391 if(ctxp == &ctx_used)
392 panic("out of mmu contexts");
393 flush_cache_mm(ctxp->ctx_mm);
394 flush_tlb_mm(ctxp->ctx_mm);
395 remove_from_ctx_list(ctxp);
396 add_to_used_ctxlist(ctxp);
397 ctxp->ctx_mm->context = NO_CONTEXT;
398 ctxp->ctx_mm = mm;
399 mm->context = ctxp->ctx_number;
400}
401
402static inline void free_context(int context)
403{
404 struct ctx_list *ctx_old;
405
406 ctx_old = ctx_list_pool + context;
407 remove_from_ctx_list(ctx_old);
408 add_to_free_ctxlist(ctx_old);
409}
410
411
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000412void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
413 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700414{
415 if(mm->context == NO_CONTEXT) {
416 spin_lock(&srmmu_context_spinlock);
417 alloc_context(old_mm, mm);
418 spin_unlock(&srmmu_context_spinlock);
419 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
420 }
421
Konrad Eisele75d9e342009-08-17 00:13:33 +0000422 if (sparc_cpu_model == sparc_leon)
423 leon_switch_mm();
424
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425 if (is_hypersparc)
426 hyper_flush_whole_icache();
427
428 srmmu_set_context(mm->context);
429}
430
431/* Low level IO area allocation on the SRMMU. */
432static inline void srmmu_mapioaddr(unsigned long physaddr,
433 unsigned long virt_addr, int bus_type)
434{
435 pgd_t *pgdp;
436 pmd_t *pmdp;
437 pte_t *ptep;
438 unsigned long tmp;
439
440 physaddr &= PAGE_MASK;
441 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200442 pmdp = pmd_offset(pgdp, virt_addr);
443 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700444 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
445
446 /*
447 * I need to test whether this is consistent over all
448 * sun4m's. The bus_type represents the upper 4 bits of
449 * 36-bit physical address on the I/O space lines...
450 */
451 tmp |= (bus_type << 28);
452 tmp |= SRMMU_PRIV;
453 __flush_page_to_ram(virt_addr);
David S. Miller62875cf2012-05-12 13:39:23 -0700454 set_pte(ptep, __pte(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700455}
456
Sam Ravnborg9701b262012-05-13 10:21:25 +0200457void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
458 unsigned long xva, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459{
460 while (len != 0) {
461 len -= PAGE_SIZE;
462 srmmu_mapioaddr(xpa, xva, bus);
463 xva += PAGE_SIZE;
464 xpa += PAGE_SIZE;
465 }
466 flush_tlb_all();
467}
468
469static inline void srmmu_unmapioaddr(unsigned long virt_addr)
470{
471 pgd_t *pgdp;
472 pmd_t *pmdp;
473 pte_t *ptep;
474
475 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200476 pmdp = pmd_offset(pgdp, virt_addr);
477 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478
479 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700480 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700481}
482
Sam Ravnborg9701b262012-05-13 10:21:25 +0200483void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700484{
485 while (len != 0) {
486 len -= PAGE_SIZE;
487 srmmu_unmapioaddr(virt_addr);
488 virt_addr += PAGE_SIZE;
489 }
490 flush_tlb_all();
491}
492
493/*
494 * On the SRMMU we do not have the problems with limited tlb entries
495 * for mapping kernel pages, so we just take things from the free page
496 * pool. As a side effect we are putting a little too much pressure
497 * on the gfp() subsystem. This setup also makes the logic of the
498 * iommu mapping code a lot easier as we can transparently handle
David S. Milleree906c92012-05-12 00:35:45 -0700499 * mappings on the kernel stack without any special code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 */
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000501struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700502{
503 struct thread_info *ret;
504
505 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
506 THREAD_INFO_ORDER);
507#ifdef CONFIG_DEBUG_STACK_USAGE
508 if (ret)
509 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
510#endif /* DEBUG_STACK_USAGE */
511
512 return ret;
513}
514
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000515void free_thread_info(struct thread_info *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700516{
517 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
518}
519
520/* tsunami.S */
521extern void tsunami_flush_cache_all(void);
522extern void tsunami_flush_cache_mm(struct mm_struct *mm);
523extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
524extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
525extern void tsunami_flush_page_to_ram(unsigned long page);
526extern void tsunami_flush_page_for_dma(unsigned long page);
527extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
528extern void tsunami_flush_tlb_all(void);
529extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
530extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
531extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
532extern void tsunami_setup_blockops(void);
533
534/*
535 * Workaround, until we find what's going on with Swift. When low on memory,
536 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
537 * out it is already in page tables/ fault again on the same instruction.
538 * I really don't understand it, have checked it and contexts
539 * are right, flush_tlb_all is done as well, and it faults again...
540 * Strange. -jj
541 *
542 * The following code is a deadwood that may be necessary when
543 * we start to make precise page flushes again. --zaitcev
544 */
Russell King4b3073e2009-12-18 16:40:18 +0000545static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546{
547#if 0
548 static unsigned long last;
549 unsigned int val;
550 /* unsigned int n; */
551
552 if (address == last) {
553 val = srmmu_hwprobe(address);
Russell King4b3073e2009-12-18 16:40:18 +0000554 if (val != 0 && pte_val(*ptep) != val) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700555 printk("swift_update_mmu_cache: "
Joe Perchese9b57cc2012-02-28 16:08:02 -0500556 "addr %lx put %08x probed %08x from %pf\n",
Russell King4b3073e2009-12-18 16:40:18 +0000557 address, pte_val(*ptep), val,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700558 __builtin_return_address(0));
559 srmmu_flush_whole_tlb();
560 }
561 }
562 last = address;
563#endif
564}
565
566/* swift.S */
567extern void swift_flush_cache_all(void);
568extern void swift_flush_cache_mm(struct mm_struct *mm);
569extern void swift_flush_cache_range(struct vm_area_struct *vma,
570 unsigned long start, unsigned long end);
571extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
572extern void swift_flush_page_to_ram(unsigned long page);
573extern void swift_flush_page_for_dma(unsigned long page);
574extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
575extern void swift_flush_tlb_all(void);
576extern void swift_flush_tlb_mm(struct mm_struct *mm);
577extern void swift_flush_tlb_range(struct vm_area_struct *vma,
578 unsigned long start, unsigned long end);
579extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
580
581#if 0 /* P3: deadwood to debug precise flushes on Swift. */
582void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
583{
584 int cctx, ctx1;
585
586 page &= PAGE_MASK;
587 if ((ctx1 = vma->vm_mm->context) != -1) {
588 cctx = srmmu_get_context();
589/* Is context # ever different from current context? P3 */
590 if (cctx != ctx1) {
591 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
592 srmmu_set_context(ctx1);
593 swift_flush_page(page);
594 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
595 "r" (page), "i" (ASI_M_FLUSH_PROBE));
596 srmmu_set_context(cctx);
597 } else {
598 /* Rm. prot. bits from virt. c. */
599 /* swift_flush_cache_all(); */
600 /* swift_flush_cache_page(vma, page); */
601 swift_flush_page(page);
602
603 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
604 "r" (page), "i" (ASI_M_FLUSH_PROBE));
605 /* same as above: srmmu_flush_tlb_page() */
606 }
607 }
608}
609#endif
610
611/*
612 * The following are all MBUS based SRMMU modules, and therefore could
613 * be found in a multiprocessor configuration. On the whole, these
614 * chips seems to be much more touchy about DVMA and page tables
615 * with respect to cache coherency.
616 */
617
618/* Cypress flushes. */
619static void cypress_flush_cache_all(void)
620{
621 volatile unsigned long cypress_sucks;
622 unsigned long faddr, tagval;
623
624 flush_user_windows();
625 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
626 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
627 "=r" (tagval) :
628 "r" (faddr), "r" (0x40000),
629 "i" (ASI_M_DATAC_TAG));
630
631 /* If modified and valid, kick it. */
632 if((tagval & 0x60) == 0x60)
633 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
634 }
635}
636
637static void cypress_flush_cache_mm(struct mm_struct *mm)
638{
639 register unsigned long a, b, c, d, e, f, g;
640 unsigned long flags, faddr;
641 int octx;
642
643 FLUSH_BEGIN(mm)
644 flush_user_windows();
645 local_irq_save(flags);
646 octx = srmmu_get_context();
647 srmmu_set_context(mm->context);
648 a = 0x20; b = 0x40; c = 0x60;
649 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
650
651 faddr = (0x10000 - 0x100);
652 goto inside;
653 do {
654 faddr -= 0x100;
655 inside:
656 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
657 "sta %%g0, [%0 + %2] %1\n\t"
658 "sta %%g0, [%0 + %3] %1\n\t"
659 "sta %%g0, [%0 + %4] %1\n\t"
660 "sta %%g0, [%0 + %5] %1\n\t"
661 "sta %%g0, [%0 + %6] %1\n\t"
662 "sta %%g0, [%0 + %7] %1\n\t"
663 "sta %%g0, [%0 + %8] %1\n\t" : :
664 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
665 "r" (a), "r" (b), "r" (c), "r" (d),
666 "r" (e), "r" (f), "r" (g));
667 } while(faddr);
668 srmmu_set_context(octx);
669 local_irq_restore(flags);
670 FLUSH_END
671}
672
673static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
674{
675 struct mm_struct *mm = vma->vm_mm;
676 register unsigned long a, b, c, d, e, f, g;
677 unsigned long flags, faddr;
678 int octx;
679
680 FLUSH_BEGIN(mm)
681 flush_user_windows();
682 local_irq_save(flags);
683 octx = srmmu_get_context();
684 srmmu_set_context(mm->context);
685 a = 0x20; b = 0x40; c = 0x60;
686 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
687
688 start &= SRMMU_REAL_PMD_MASK;
689 while(start < end) {
690 faddr = (start + (0x10000 - 0x100));
691 goto inside;
692 do {
693 faddr -= 0x100;
694 inside:
695 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
696 "sta %%g0, [%0 + %2] %1\n\t"
697 "sta %%g0, [%0 + %3] %1\n\t"
698 "sta %%g0, [%0 + %4] %1\n\t"
699 "sta %%g0, [%0 + %5] %1\n\t"
700 "sta %%g0, [%0 + %6] %1\n\t"
701 "sta %%g0, [%0 + %7] %1\n\t"
702 "sta %%g0, [%0 + %8] %1\n\t" : :
703 "r" (faddr),
704 "i" (ASI_M_FLUSH_SEG),
705 "r" (a), "r" (b), "r" (c), "r" (d),
706 "r" (e), "r" (f), "r" (g));
707 } while (faddr != start);
708 start += SRMMU_REAL_PMD_SIZE;
709 }
710 srmmu_set_context(octx);
711 local_irq_restore(flags);
712 FLUSH_END
713}
714
715static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
716{
717 register unsigned long a, b, c, d, e, f, g;
718 struct mm_struct *mm = vma->vm_mm;
719 unsigned long flags, line;
720 int octx;
721
722 FLUSH_BEGIN(mm)
723 flush_user_windows();
724 local_irq_save(flags);
725 octx = srmmu_get_context();
726 srmmu_set_context(mm->context);
727 a = 0x20; b = 0x40; c = 0x60;
728 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
729
730 page &= PAGE_MASK;
731 line = (page + PAGE_SIZE) - 0x100;
732 goto inside;
733 do {
734 line -= 0x100;
735 inside:
736 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
737 "sta %%g0, [%0 + %2] %1\n\t"
738 "sta %%g0, [%0 + %3] %1\n\t"
739 "sta %%g0, [%0 + %4] %1\n\t"
740 "sta %%g0, [%0 + %5] %1\n\t"
741 "sta %%g0, [%0 + %6] %1\n\t"
742 "sta %%g0, [%0 + %7] %1\n\t"
743 "sta %%g0, [%0 + %8] %1\n\t" : :
744 "r" (line),
745 "i" (ASI_M_FLUSH_PAGE),
746 "r" (a), "r" (b), "r" (c), "r" (d),
747 "r" (e), "r" (f), "r" (g));
748 } while(line != page);
749 srmmu_set_context(octx);
750 local_irq_restore(flags);
751 FLUSH_END
752}
753
754/* Cypress is copy-back, at least that is how we configure it. */
755static void cypress_flush_page_to_ram(unsigned long page)
756{
757 register unsigned long a, b, c, d, e, f, g;
758 unsigned long line;
759
760 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
761 page &= PAGE_MASK;
762 line = (page + PAGE_SIZE) - 0x100;
763 goto inside;
764 do {
765 line -= 0x100;
766 inside:
767 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
768 "sta %%g0, [%0 + %2] %1\n\t"
769 "sta %%g0, [%0 + %3] %1\n\t"
770 "sta %%g0, [%0 + %4] %1\n\t"
771 "sta %%g0, [%0 + %5] %1\n\t"
772 "sta %%g0, [%0 + %6] %1\n\t"
773 "sta %%g0, [%0 + %7] %1\n\t"
774 "sta %%g0, [%0 + %8] %1\n\t" : :
775 "r" (line),
776 "i" (ASI_M_FLUSH_PAGE),
777 "r" (a), "r" (b), "r" (c), "r" (d),
778 "r" (e), "r" (f), "r" (g));
779 } while(line != page);
780}
781
782/* Cypress is also IO cache coherent. */
783static void cypress_flush_page_for_dma(unsigned long page)
784{
785}
786
787/* Cypress has unified L2 VIPT, from which both instructions and data
788 * are stored. It does not have an onboard icache of any sort, therefore
789 * no flush is necessary.
790 */
791static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
792{
793}
794
795static void cypress_flush_tlb_all(void)
796{
797 srmmu_flush_whole_tlb();
798}
799
800static void cypress_flush_tlb_mm(struct mm_struct *mm)
801{
802 FLUSH_BEGIN(mm)
803 __asm__ __volatile__(
804 "lda [%0] %3, %%g5\n\t"
805 "sta %2, [%0] %3\n\t"
806 "sta %%g0, [%1] %4\n\t"
807 "sta %%g5, [%0] %3\n"
808 : /* no outputs */
809 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
810 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
811 : "g5");
812 FLUSH_END
813}
814
815static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
816{
817 struct mm_struct *mm = vma->vm_mm;
818 unsigned long size;
819
820 FLUSH_BEGIN(mm)
821 start &= SRMMU_PGDIR_MASK;
822 size = SRMMU_PGDIR_ALIGN(end) - start;
823 __asm__ __volatile__(
824 "lda [%0] %5, %%g5\n\t"
825 "sta %1, [%0] %5\n"
826 "1:\n\t"
827 "subcc %3, %4, %3\n\t"
828 "bne 1b\n\t"
829 " sta %%g0, [%2 + %3] %6\n\t"
830 "sta %%g5, [%0] %5\n"
831 : /* no outputs */
832 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
833 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
834 "i" (ASI_M_FLUSH_PROBE)
835 : "g5", "cc");
836 FLUSH_END
837}
838
839static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
840{
841 struct mm_struct *mm = vma->vm_mm;
842
843 FLUSH_BEGIN(mm)
844 __asm__ __volatile__(
845 "lda [%0] %3, %%g5\n\t"
846 "sta %1, [%0] %3\n\t"
847 "sta %%g0, [%2] %4\n\t"
848 "sta %%g5, [%0] %3\n"
849 : /* no outputs */
850 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
851 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
852 : "g5");
853 FLUSH_END
854}
855
856/* viking.S */
857extern void viking_flush_cache_all(void);
858extern void viking_flush_cache_mm(struct mm_struct *mm);
859extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
860 unsigned long end);
861extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
862extern void viking_flush_page_to_ram(unsigned long page);
863extern void viking_flush_page_for_dma(unsigned long page);
864extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
865extern void viking_flush_page(unsigned long page);
866extern void viking_mxcc_flush_page(unsigned long page);
867extern void viking_flush_tlb_all(void);
868extern void viking_flush_tlb_mm(struct mm_struct *mm);
869extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
870 unsigned long end);
871extern void viking_flush_tlb_page(struct vm_area_struct *vma,
872 unsigned long page);
873extern void sun4dsmp_flush_tlb_all(void);
874extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
875extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
876 unsigned long end);
877extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
878 unsigned long page);
879
880/* hypersparc.S */
881extern void hypersparc_flush_cache_all(void);
882extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
883extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
884extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
885extern void hypersparc_flush_page_to_ram(unsigned long page);
886extern void hypersparc_flush_page_for_dma(unsigned long page);
887extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
888extern void hypersparc_flush_tlb_all(void);
889extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
890extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
891extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
892extern void hypersparc_setup_blockops(void);
893
894/*
895 * NOTE: All of this startup code assumes the low 16mb (approx.) of
896 * kernel mappings are done with one single contiguous chunk of
897 * ram. On small ram machines (classics mainly) we only get
898 * around 8mb mapped for us.
899 */
900
Adrian Bunk50215d62008-06-05 11:41:51 -0700901static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700902{
903 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
904 prom_halt();
905}
906
Adrian Bunk50215d62008-06-05 11:41:51 -0700907static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
908 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
910 pgd_t *pgdp;
911 pmd_t *pmdp;
912 pte_t *ptep;
913
914 while(start < end) {
915 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700916 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700917 pmdp = (pmd_t *) __srmmu_get_nocache(
918 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
919 if (pmdp == NULL)
920 early_pgtable_allocfail("pmd");
921 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200922 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700923 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200924 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
926 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
927 if (ptep == NULL)
928 early_pgtable_allocfail("pte");
929 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200930 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700931 }
932 if (start > (0xffffffffUL - PMD_SIZE))
933 break;
934 start = (start + PMD_SIZE) & PMD_MASK;
935 }
936}
937
Adrian Bunk50215d62008-06-05 11:41:51 -0700938static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
939 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940{
941 pgd_t *pgdp;
942 pmd_t *pmdp;
943 pte_t *ptep;
944
945 while(start < end) {
946 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700947 if (pgd_none(*pgdp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
949 if (pmdp == NULL)
950 early_pgtable_allocfail("pmd");
951 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200952 pgd_set(pgdp, pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700953 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200954 pmdp = pmd_offset(pgdp, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 if(srmmu_pmd_none(*pmdp)) {
956 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
957 PTE_SIZE);
958 if (ptep == NULL)
959 early_pgtable_allocfail("pte");
960 memset(ptep, 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200961 pmd_set(pmdp, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700962 }
963 if (start > (0xffffffffUL - PMD_SIZE))
964 break;
965 start = (start + PMD_SIZE) & PMD_MASK;
966 }
967}
968
969/*
970 * This is much cleaner than poking around physical address space
971 * looking at the prom's page table directly which is what most
972 * other OS's do. Yuck... this is much better.
973 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700974static void __init srmmu_inherit_prom_mappings(unsigned long start,
975 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976{
977 pgd_t *pgdp;
978 pmd_t *pmdp;
979 pte_t *ptep;
980 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
981 unsigned long prompte;
982
983 while(start <= end) {
984 if (start == 0)
985 break; /* probably wrap around */
986 if(start == 0xfef00000)
987 start = KADB_DEBUGGER_BEGVM;
988 if(!(prompte = srmmu_hwprobe(start))) {
989 start += PAGE_SIZE;
990 continue;
991 }
992
993 /* A red snapper, see what it really is. */
994 what = 0;
995
996 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
997 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
998 what = 1;
999 }
1000
1001 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1002 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1003 prompte)
1004 what = 2;
1005 }
1006
1007 pgdp = pgd_offset_k(start);
1008 if(what == 2) {
1009 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
1010 start += SRMMU_PGDIR_SIZE;
1011 continue;
1012 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -07001013 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001014 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1015 if (pmdp == NULL)
1016 early_pgtable_allocfail("pmd");
1017 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +02001018 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 }
Sam Ravnborg9701b262012-05-13 10:21:25 +02001020 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1022 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1023 PTE_SIZE);
1024 if (ptep == NULL)
1025 early_pgtable_allocfail("pte");
1026 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +02001027 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 }
1029 if(what == 1) {
1030 /*
1031 * We bend the rule where all 16 PTPs in a pmd_t point
1032 * inside the same PTE page, and we leak a perfectly
1033 * good hardware PTE piece. Alternatives seem worse.
1034 */
1035 unsigned int x; /* Index of HW PMD in soft cluster */
1036 x = (start >> PMD_SHIFT) & 15;
1037 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
1038 start += SRMMU_REAL_PMD_SIZE;
1039 continue;
1040 }
Sam Ravnborg9701b262012-05-13 10:21:25 +02001041 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1043 start += PAGE_SIZE;
1044 }
1045}
1046
1047#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1048
1049/* Create a third-level SRMMU 16MB page mapping. */
1050static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
1051{
1052 pgd_t *pgdp = pgd_offset_k(vaddr);
1053 unsigned long big_pte;
1054
1055 big_pte = KERNEL_PTE(phys_base >> 4);
1056 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
1057}
1058
1059/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
1060static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1061{
1062 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1063 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1064 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1065 /* Map "low" memory only */
1066 const unsigned long min_vaddr = PAGE_OFFSET;
1067 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
1068
1069 if (vstart < min_vaddr || vstart >= max_vaddr)
1070 return vstart;
1071
1072 if (vend > max_vaddr || vend < min_vaddr)
1073 vend = max_vaddr;
1074
1075 while(vstart < vend) {
1076 do_large_mapping(vstart, pstart);
1077 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1078 }
1079 return vstart;
1080}
1081
1082static inline void memprobe_error(char *msg)
1083{
1084 prom_printf(msg);
1085 prom_printf("Halting now...\n");
1086 prom_halt();
1087}
1088
1089static inline void map_kernel(void)
1090{
1091 int i;
1092
1093 if (phys_base > 0) {
1094 do_large_mapping(PAGE_OFFSET, phys_base);
1095 }
1096
1097 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1098 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1099 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100}
1101
1102/* Paging initialization on the Sparc Reference MMU. */
1103extern void sparc_context_init(int);
1104
Al Viro409832f2008-11-22 17:33:54 +00001105void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001106
1107extern unsigned long bootmem_init(unsigned long *pages_avail);
1108
1109void __init srmmu_paging_init(void)
1110{
Andres Salomon8d125562010-10-08 14:18:11 -07001111 int i;
1112 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001113 char node_str[128];
1114 pgd_t *pgd;
1115 pmd_t *pmd;
1116 pte_t *pte;
1117 unsigned long pages_avail;
1118
1119 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
1120
1121 if (sparc_cpu_model == sun4d)
1122 num_contexts = 65536; /* We know it is Viking */
1123 else {
1124 /* Find the number of contexts on the srmmu. */
1125 cpunode = prom_getchild(prom_root_node);
1126 num_contexts = 0;
1127 while(cpunode != 0) {
1128 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1129 if(!strcmp(node_str, "cpu")) {
1130 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1131 break;
1132 }
1133 cpunode = prom_getsibling(cpunode);
1134 }
1135 }
1136
1137 if(!num_contexts) {
1138 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1139 prom_halt();
1140 }
1141
1142 pages_avail = 0;
1143 last_valid_pfn = bootmem_init(&pages_avail);
1144
1145 srmmu_nocache_calcsize();
1146 srmmu_nocache_init();
1147 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1148 map_kernel();
1149
1150 /* ctx table has to be physically aligned to its size */
1151 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
1152 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
1153
1154 for(i = 0; i < num_contexts; i++)
1155 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
1156
1157 flush_cache_all();
1158 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -08001159#ifdef CONFIG_SMP
1160 /* Stop from hanging here... */
1161 local_flush_tlb_all();
1162#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001163 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -08001164#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001165 poke_srmmu();
1166
Linus Torvalds1da177e2005-04-16 15:20:36 -07001167 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
1168 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001169
1170 srmmu_allocate_ptable_skeleton(
1171 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
1172 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1173
1174 pgd = pgd_offset_k(PKMAP_BASE);
Sam Ravnborg9701b262012-05-13 10:21:25 +02001175 pmd = pmd_offset(pgd, PKMAP_BASE);
1176 pte = pte_offset_kernel(pmd, PKMAP_BASE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001177 pkmap_page_table = pte;
1178
1179 flush_cache_all();
1180 flush_tlb_all();
1181
1182 sparc_context_init(num_contexts);
1183
1184 kmap_init();
1185
1186 {
1187 unsigned long zones_size[MAX_NR_ZONES];
1188 unsigned long zholes_size[MAX_NR_ZONES];
1189 unsigned long npages;
1190 int znum;
1191
1192 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1193 zones_size[znum] = zholes_size[znum] = 0;
1194
1195 npages = max_low_pfn - pfn_base;
1196
1197 zones_size[ZONE_DMA] = npages;
1198 zholes_size[ZONE_DMA] = npages - pages_avail;
1199
1200 npages = highend_pfn - max_low_pfn;
1201 zones_size[ZONE_HIGHMEM] = npages;
1202 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1203
Johannes Weiner9109fb72008-07-23 21:27:20 -07001204 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001205 }
1206}
1207
Sam Ravnborg9701b262012-05-13 10:21:25 +02001208void mmu_info(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001209{
1210 seq_printf(m,
1211 "MMU type\t: %s\n"
1212 "contexts\t: %d\n"
1213 "nocache total\t: %ld\n"
1214 "nocache used\t: %d\n",
1215 srmmu_name,
1216 num_contexts,
1217 srmmu_nocache_size,
1218 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1219}
1220
1221static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1222{
1223}
1224
1225static void srmmu_destroy_context(struct mm_struct *mm)
1226{
1227
1228 if(mm->context != NO_CONTEXT) {
1229 flush_cache_mm(mm);
1230 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1231 flush_tlb_mm(mm);
1232 spin_lock(&srmmu_context_spinlock);
1233 free_context(mm->context);
1234 spin_unlock(&srmmu_context_spinlock);
1235 mm->context = NO_CONTEXT;
1236 }
1237}
1238
1239/* Init various srmmu chip types. */
1240static void __init srmmu_is_bad(void)
1241{
1242 prom_printf("Could not determine SRMMU chip type.\n");
1243 prom_halt();
1244}
1245
1246static void __init init_vac_layout(void)
1247{
Andres Salomon8d125562010-10-08 14:18:11 -07001248 phandle nd;
1249 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250 char node_str[128];
1251#ifdef CONFIG_SMP
1252 int cpu = 0;
1253 unsigned long max_size = 0;
1254 unsigned long min_line_size = 0x10000000;
1255#endif
1256
1257 nd = prom_getchild(prom_root_node);
1258 while((nd = prom_getsibling(nd)) != 0) {
1259 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1260 if(!strcmp(node_str, "cpu")) {
1261 vac_line_size = prom_getint(nd, "cache-line-size");
1262 if (vac_line_size == -1) {
1263 prom_printf("can't determine cache-line-size, "
1264 "halting.\n");
1265 prom_halt();
1266 }
1267 cache_lines = prom_getint(nd, "cache-nlines");
1268 if (cache_lines == -1) {
1269 prom_printf("can't determine cache-nlines, halting.\n");
1270 prom_halt();
1271 }
1272
1273 vac_cache_size = cache_lines * vac_line_size;
1274#ifdef CONFIG_SMP
1275 if(vac_cache_size > max_size)
1276 max_size = vac_cache_size;
1277 if(vac_line_size < min_line_size)
1278 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -08001279 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +10301281 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282 break;
1283#else
1284 break;
1285#endif
1286 }
1287 }
1288 if(nd == 0) {
1289 prom_printf("No CPU nodes found, halting.\n");
1290 prom_halt();
1291 }
1292#ifdef CONFIG_SMP
1293 vac_cache_size = max_size;
1294 vac_line_size = min_line_size;
1295#endif
1296 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1297 (int)vac_cache_size, (int)vac_line_size);
1298}
1299
Al Viro409832f2008-11-22 17:33:54 +00001300static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301{
1302 volatile unsigned long clear;
1303 unsigned long mreg = srmmu_get_mmureg();
1304
1305 hyper_flush_unconditional_combined();
1306
1307 mreg &= ~(HYPERSPARC_CWENABLE);
1308 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1309 mreg |= (HYPERSPARC_CMODE);
1310
1311 srmmu_set_mmureg(mreg);
1312
1313#if 0 /* XXX I think this is bad news... -DaveM */
1314 hyper_clear_all_tags();
1315#endif
1316
1317 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1318 hyper_flush_whole_icache();
1319 clear = srmmu_get_faddr();
1320 clear = srmmu_get_fstatus();
1321}
1322
1323static void __init init_hypersparc(void)
1324{
1325 srmmu_name = "ROSS HyperSparc";
1326 srmmu_modtype = HyperSparc;
1327
1328 init_vac_layout();
1329
1330 is_hypersparc = 1;
1331
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1333 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1334 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1335 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1336
1337 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1338 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1339 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1340 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1341
1342 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1343 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1344 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1345
1346
1347 poke_srmmu = poke_hypersparc;
1348
1349 hypersparc_setup_blockops();
1350}
1351
Al Viro409832f2008-11-22 17:33:54 +00001352static void __cpuinit poke_cypress(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353{
1354 unsigned long mreg = srmmu_get_mmureg();
1355 unsigned long faddr, tagval;
1356 volatile unsigned long cypress_sucks;
1357 volatile unsigned long clear;
1358
1359 clear = srmmu_get_faddr();
1360 clear = srmmu_get_fstatus();
1361
1362 if (!(mreg & CYPRESS_CENABLE)) {
1363 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1364 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1365 "sta %%g0, [%0] %2\n\t" : :
1366 "r" (faddr), "r" (0x40000),
1367 "i" (ASI_M_DATAC_TAG));
1368 }
1369 } else {
1370 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1371 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1372 "=r" (tagval) :
1373 "r" (faddr), "r" (0x40000),
1374 "i" (ASI_M_DATAC_TAG));
1375
1376 /* If modified and valid, kick it. */
1377 if((tagval & 0x60) == 0x60)
1378 cypress_sucks = *(unsigned long *)
1379 (0xf0020000 + faddr);
1380 }
1381 }
1382
1383 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1384 clear = srmmu_get_faddr();
1385 clear = srmmu_get_fstatus();
1386
1387 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1388 srmmu_set_mmureg(mreg);
1389}
1390
1391static void __init init_cypress_common(void)
1392{
1393 init_vac_layout();
1394
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1396 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1397 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1398 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1399
1400 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1401 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1402 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1403 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1404
1405
1406 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1407 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1408 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1409
1410 poke_srmmu = poke_cypress;
1411}
1412
1413static void __init init_cypress_604(void)
1414{
1415 srmmu_name = "ROSS Cypress-604(UP)";
1416 srmmu_modtype = Cypress;
1417 init_cypress_common();
1418}
1419
1420static void __init init_cypress_605(unsigned long mrev)
1421{
1422 srmmu_name = "ROSS Cypress-605(MP)";
1423 if(mrev == 0xe) {
1424 srmmu_modtype = Cypress_vE;
1425 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1426 } else {
1427 if(mrev == 0xd) {
1428 srmmu_modtype = Cypress_vD;
1429 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1430 } else {
1431 srmmu_modtype = Cypress;
1432 }
1433 }
1434 init_cypress_common();
1435}
1436
Al Viro409832f2008-11-22 17:33:54 +00001437static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438{
1439 unsigned long mreg;
1440
1441 /* Clear any crap from the cache or else... */
1442 swift_flush_cache_all();
1443
1444 /* Enable I & D caches */
1445 mreg = srmmu_get_mmureg();
1446 mreg |= (SWIFT_IE | SWIFT_DE);
1447 /*
1448 * The Swift branch folding logic is completely broken. At
1449 * trap time, if things are just right, if can mistakenly
1450 * think that a trap is coming from kernel mode when in fact
1451 * it is coming from user mode (it mis-executes the branch in
1452 * the trap code). So you see things like crashme completely
1453 * hosing your machine which is completely unacceptable. Turn
1454 * this shit off... nice job Fujitsu.
1455 */
1456 mreg &= ~(SWIFT_BF);
1457 srmmu_set_mmureg(mreg);
1458}
1459
1460#define SWIFT_MASKID_ADDR 0x10003018
1461static void __init init_swift(void)
1462{
1463 unsigned long swift_rev;
1464
1465 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1466 "srl %0, 0x18, %0\n\t" :
1467 "=r" (swift_rev) :
1468 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1469 srmmu_name = "Fujitsu Swift";
1470 switch(swift_rev) {
1471 case 0x11:
1472 case 0x20:
1473 case 0x23:
1474 case 0x30:
1475 srmmu_modtype = Swift_lots_o_bugs;
1476 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1477 /*
1478 * Gee george, I wonder why Sun is so hush hush about
1479 * this hardware bug... really braindamage stuff going
1480 * on here. However I think we can find a way to avoid
1481 * all of the workaround overhead under Linux. Basically,
1482 * any page fault can cause kernel pages to become user
1483 * accessible (the mmu gets confused and clears some of
1484 * the ACC bits in kernel ptes). Aha, sounds pretty
1485 * horrible eh? But wait, after extensive testing it appears
1486 * that if you use pgd_t level large kernel pte's (like the
1487 * 4MB pages on the Pentium) the bug does not get tripped
1488 * at all. This avoids almost all of the major overhead.
1489 * Welcome to a world where your vendor tells you to,
1490 * "apply this kernel patch" instead of "sorry for the
1491 * broken hardware, send it back and we'll give you
1492 * properly functioning parts"
1493 */
1494 break;
1495 case 0x25:
1496 case 0x31:
1497 srmmu_modtype = Swift_bad_c;
1498 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1499 /*
1500 * You see Sun allude to this hardware bug but never
1501 * admit things directly, they'll say things like,
1502 * "the Swift chip cache problems" or similar.
1503 */
1504 break;
1505 default:
1506 srmmu_modtype = Swift_ok;
1507 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001508 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509
1510 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1511 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1512 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1513 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1514
1515
1516 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1517 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1518 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1519 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1520
1521 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1522 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1523 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1524
1525 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1526
1527 flush_page_for_dma_global = 0;
1528
1529 /*
1530 * Are you now convinced that the Swift is one of the
1531 * biggest VLSI abortions of all time? Bravo Fujitsu!
1532 * Fujitsu, the !#?!%$'d up processor people. I bet if
1533 * you examined the microcode of the Swift you'd find
1534 * XXX's all over the place.
1535 */
1536 poke_srmmu = poke_swift;
1537}
1538
1539static void turbosparc_flush_cache_all(void)
1540{
1541 flush_user_windows();
1542 turbosparc_idflash_clear();
1543}
1544
1545static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1546{
1547 FLUSH_BEGIN(mm)
1548 flush_user_windows();
1549 turbosparc_idflash_clear();
1550 FLUSH_END
1551}
1552
1553static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1554{
1555 FLUSH_BEGIN(vma->vm_mm)
1556 flush_user_windows();
1557 turbosparc_idflash_clear();
1558 FLUSH_END
1559}
1560
1561static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1562{
1563 FLUSH_BEGIN(vma->vm_mm)
1564 flush_user_windows();
1565 if (vma->vm_flags & VM_EXEC)
1566 turbosparc_flush_icache();
1567 turbosparc_flush_dcache();
1568 FLUSH_END
1569}
1570
1571/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1572static void turbosparc_flush_page_to_ram(unsigned long page)
1573{
1574#ifdef TURBOSPARC_WRITEBACK
1575 volatile unsigned long clear;
1576
1577 if (srmmu_hwprobe(page))
1578 turbosparc_flush_page_cache(page);
1579 clear = srmmu_get_fstatus();
1580#endif
1581}
1582
1583static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1584{
1585}
1586
1587static void turbosparc_flush_page_for_dma(unsigned long page)
1588{
1589 turbosparc_flush_dcache();
1590}
1591
1592static void turbosparc_flush_tlb_all(void)
1593{
1594 srmmu_flush_whole_tlb();
1595}
1596
1597static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1598{
1599 FLUSH_BEGIN(mm)
1600 srmmu_flush_whole_tlb();
1601 FLUSH_END
1602}
1603
1604static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1605{
1606 FLUSH_BEGIN(vma->vm_mm)
1607 srmmu_flush_whole_tlb();
1608 FLUSH_END
1609}
1610
1611static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1612{
1613 FLUSH_BEGIN(vma->vm_mm)
1614 srmmu_flush_whole_tlb();
1615 FLUSH_END
1616}
1617
1618
Al Viro409832f2008-11-22 17:33:54 +00001619static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001620{
1621 unsigned long mreg = srmmu_get_mmureg();
1622 unsigned long ccreg;
1623
1624 /* Clear any crap from the cache or else... */
1625 turbosparc_flush_cache_all();
1626 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1627 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1628 srmmu_set_mmureg(mreg);
1629
1630 ccreg = turbosparc_get_ccreg();
1631
1632#ifdef TURBOSPARC_WRITEBACK
1633 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1634 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1635 /* Write-back D-cache, emulate VLSI
1636 * abortion number three, not number one */
1637#else
1638 /* For now let's play safe, optimize later */
1639 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1640 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1641 ccreg &= ~(TURBOSPARC_uS2);
1642 /* Emulate VLSI abortion number three, not number one */
1643#endif
1644
1645 switch (ccreg & 7) {
1646 case 0: /* No SE cache */
1647 case 7: /* Test mode */
1648 break;
1649 default:
1650 ccreg |= (TURBOSPARC_SCENABLE);
1651 }
1652 turbosparc_set_ccreg (ccreg);
1653
1654 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1655 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1656 srmmu_set_mmureg(mreg);
1657}
1658
1659static void __init init_turbosparc(void)
1660{
1661 srmmu_name = "Fujitsu TurboSparc";
1662 srmmu_modtype = TurboSparc;
1663
1664 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1665 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1666 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1667 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1668
1669 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1670 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1671 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1672 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1673
1674 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1675
1676 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1677 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1678
1679 poke_srmmu = poke_turbosparc;
1680}
1681
Al Viro409832f2008-11-22 17:33:54 +00001682static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683{
1684 unsigned long mreg = srmmu_get_mmureg();
1685
1686 tsunami_flush_icache();
1687 tsunami_flush_dcache();
1688 mreg &= ~TSUNAMI_ITD;
1689 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1690 srmmu_set_mmureg(mreg);
1691}
1692
1693static void __init init_tsunami(void)
1694{
1695 /*
1696 * Tsunami's pretty sane, Sun and TI actually got it
1697 * somewhat right this time. Fujitsu should have
1698 * taken some lessons from them.
1699 */
1700
1701 srmmu_name = "TI Tsunami";
1702 srmmu_modtype = Tsunami;
1703
1704 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1705 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1706 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1707 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1708
1709
1710 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1711 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1712 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1713 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1714
1715 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1716 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1717 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1718
1719 poke_srmmu = poke_tsunami;
1720
1721 tsunami_setup_blockops();
1722}
1723
Al Viro409832f2008-11-22 17:33:54 +00001724static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725{
1726 unsigned long mreg = srmmu_get_mmureg();
1727 static int smp_catch;
1728
1729 if(viking_mxcc_present) {
1730 unsigned long mxcc_control = mxcc_get_creg();
1731
1732 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1733 mxcc_control &= ~(MXCC_CTL_RRC);
1734 mxcc_set_creg(mxcc_control);
1735
1736 /*
1737 * We don't need memory parity checks.
1738 * XXX This is a mess, have to dig out later. ecd.
1739 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1740 */
1741
1742 /* We do cache ptables on MXCC. */
1743 mreg |= VIKING_TCENABLE;
1744 } else {
1745 unsigned long bpreg;
1746
1747 mreg &= ~(VIKING_TCENABLE);
1748 if(smp_catch++) {
1749 /* Must disable mixed-cmd mode here for other cpu's. */
1750 bpreg = viking_get_bpreg();
1751 bpreg &= ~(VIKING_ACTION_MIX);
1752 viking_set_bpreg(bpreg);
1753
1754 /* Just in case PROM does something funny. */
1755 msi_set_sync();
1756 }
1757 }
1758
1759 mreg |= VIKING_SPENABLE;
1760 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1761 mreg |= VIKING_SBENABLE;
1762 mreg &= ~(VIKING_ACENABLE);
1763 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764}
1765
1766static void __init init_viking(void)
1767{
1768 unsigned long mreg = srmmu_get_mmureg();
1769
1770 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1771 if(mreg & VIKING_MMODE) {
1772 srmmu_name = "TI Viking";
1773 viking_mxcc_present = 0;
1774 msi_set_sync();
1775
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776 /*
1777 * We need this to make sure old viking takes no hits
1778 * on it's cache for dma snoops to workaround the
1779 * "load from non-cacheable memory" interrupt bug.
1780 * This is only necessary because of the new way in
1781 * which we use the IOMMU.
1782 */
1783 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1784
1785 flush_page_for_dma_global = 0;
1786 } else {
1787 srmmu_name = "TI Viking/MXCC";
1788 viking_mxcc_present = 1;
1789
1790 srmmu_cache_pagetables = 1;
1791
1792 /* MXCC vikings lack the DMA snooping bug. */
1793 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1794 }
1795
1796 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1797 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1798 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1799 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1800
1801#ifdef CONFIG_SMP
1802 if (sparc_cpu_model == sun4d) {
1803 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1804 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1805 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1806 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1807 } else
1808#endif
1809 {
1810 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1811 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1812 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1813 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1814 }
1815
1816 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1817 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1818
1819 poke_srmmu = poke_viking;
1820}
1821
Konrad Eisele75d9e342009-08-17 00:13:33 +00001822#ifdef CONFIG_SPARC_LEON
1823
1824void __init poke_leonsparc(void)
1825{
1826}
1827
1828void __init init_leon(void)
1829{
1830
Kristoffer Glemboc803ba92009-12-02 04:30:22 +00001831 srmmu_name = "LEON";
Konrad Eisele75d9e342009-08-17 00:13:33 +00001832
1833 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1834 BTFIXUPCALL_NORM);
1835 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1836 BTFIXUPCALL_NORM);
1837 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
1838 BTFIXUPCALL_NORM);
1839 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
1840 BTFIXUPCALL_NORM);
1841 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
1842 BTFIXUPCALL_NORM);
1843
1844 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1845 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1846 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1847 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1848
1849 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
1850 BTFIXUPCALL_NOP);
1851 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
1852
1853 poke_srmmu = poke_leonsparc;
1854
1855 srmmu_cache_pagetables = 0;
1856
1857 leon_flush_during_switch = leon_flush_needed();
1858}
1859#endif
1860
Linus Torvalds1da177e2005-04-16 15:20:36 -07001861/* Probe for the srmmu chip version. */
1862static void __init get_srmmu_type(void)
1863{
1864 unsigned long mreg, psr;
1865 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1866
1867 srmmu_modtype = SRMMU_INVAL_MOD;
1868 hwbug_bitmask = 0;
1869
1870 mreg = srmmu_get_mmureg(); psr = get_psr();
1871 mod_typ = (mreg & 0xf0000000) >> 28;
1872 mod_rev = (mreg & 0x0f000000) >> 24;
1873 psr_typ = (psr >> 28) & 0xf;
1874 psr_vers = (psr >> 24) & 0xf;
1875
Konrad Eisele75d9e342009-08-17 00:13:33 +00001876 /* First, check for sparc-leon. */
1877 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001878 init_leon();
1879 return;
1880 }
1881
1882 /* Second, check for HyperSparc or Cypress. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001883 if(mod_typ == 1) {
1884 switch(mod_rev) {
1885 case 7:
1886 /* UP or MP Hypersparc */
1887 init_hypersparc();
1888 break;
1889 case 0:
1890 case 2:
1891 /* Uniprocessor Cypress */
1892 init_cypress_604();
1893 break;
1894 case 10:
1895 case 11:
1896 case 12:
1897 /* _REALLY OLD_ Cypress MP chips... */
1898 case 13:
1899 case 14:
1900 case 15:
1901 /* MP Cypress mmu/cache-controller */
1902 init_cypress_605(mod_rev);
1903 break;
1904 default:
1905 /* Some other Cypress revision, assume a 605. */
1906 init_cypress_605(mod_rev);
1907 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001908 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909 return;
1910 }
1911
1912 /*
1913 * Now Fujitsu TurboSparc. It might happen that it is
1914 * in Swift emulation mode, so we will check later...
1915 */
1916 if (psr_typ == 0 && psr_vers == 5) {
1917 init_turbosparc();
1918 return;
1919 }
1920
1921 /* Next check for Fujitsu Swift. */
1922 if(psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07001923 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001924 char node_str[128];
1925
1926 /* Look if it is not a TurboSparc emulating Swift... */
1927 cpunode = prom_getchild(prom_root_node);
1928 while((cpunode = prom_getsibling(cpunode)) != 0) {
1929 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1930 if(!strcmp(node_str, "cpu")) {
1931 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1932 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1933 init_turbosparc();
1934 return;
1935 }
1936 break;
1937 }
1938 }
1939
1940 init_swift();
1941 return;
1942 }
1943
1944 /* Now the Viking family of srmmu. */
1945 if(psr_typ == 4 &&
1946 ((psr_vers == 0) ||
1947 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1948 init_viking();
1949 return;
1950 }
1951
1952 /* Finally the Tsunami. */
1953 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1954 init_tsunami();
1955 return;
1956 }
1957
1958 /* Oh well */
1959 srmmu_is_bad();
1960}
1961
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
1963 tsetup_mmu_patchme, rtrap_mmu_patchme;
1964
1965extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
1966 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
1967
Linus Torvalds1da177e2005-04-16 15:20:36 -07001968#ifdef CONFIG_SMP
1969/* Local cross-calls. */
1970static void smp_flush_page_for_dma(unsigned long page)
1971{
1972 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
1973 local_flush_page_for_dma(page);
1974}
1975
1976#endif
1977
Linus Torvalds1da177e2005-04-16 15:20:36 -07001978/* Load up routines and constants for sun4m and sun4d mmu */
Sam Ravnborga3c5c662012-05-12 20:35:52 +02001979void __init load_mmu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001980{
1981 extern void ld_mmu_iommu(void);
1982 extern void ld_mmu_iounit(void);
1983 extern void ___xchg32_sun4md(void);
1984
Linus Torvalds1da177e2005-04-16 15:20:36 -07001985 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001986#ifndef CONFIG_SMP
1987 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
1988#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
1991 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
1992
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001994
1995#ifdef CONFIG_SMP
1996 /* El switcheroo... */
1997
1998 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
1999 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
2000 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
2001 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
2002 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
2003 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2004 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2005 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2006 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2007 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2008 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2009
2010 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2011 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2012 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2013 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
Konrad Eisele84017072009-08-31 22:08:13 +00002014 if (sparc_cpu_model != sun4d &&
2015 sparc_cpu_model != sparc_leon) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2017 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2018 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2019 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2020 }
2021 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2022 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2023 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
David S. Miller64273d02008-11-26 01:00:58 -08002024
2025 if (poke_srmmu == poke_viking) {
2026 /* Avoid unnecessary cross calls. */
2027 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2028 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2029 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2030 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2031 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
2032 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2033 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2034 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002035#endif
2036
2037 if (sparc_cpu_model == sun4d)
2038 ld_mmu_iounit();
2039 else
2040 ld_mmu_iommu();
2041#ifdef CONFIG_SMP
2042 if (sparc_cpu_model == sun4d)
2043 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00002044 else if (sparc_cpu_model == sparc_leon)
2045 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002046 else
2047 sun4m_init_smp();
2048#endif
Sam Ravnborga3c5c662012-05-12 20:35:52 +02002049 btfixup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050}