blob: d9487d8518438b42d64b189bb91525a496978c0e [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/vmalloc.h>
14#include <linux/pagemap.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/bootmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070020#include <linux/kdebug.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000021#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/bitext.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/asi.h>
36#include <asm/msi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/mmu_context.h>
38#include <asm/io-unit.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Now the cpu specific definitions. */
43#include <asm/viking.h>
44#include <asm/mxcc.h>
45#include <asm/ross.h>
46#include <asm/tsunami.h>
47#include <asm/swift.h>
48#include <asm/turbosparc.h>
Konrad Eisele75d9e342009-08-17 00:13:33 +000049#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51#include <asm/btfixup.h>
52
53enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
Sam Ravnborga3c5c662012-05-12 20:35:52 +020058struct ctx_list *ctx_list_pool;
59struct ctx_list ctx_free;
60struct ctx_list ctx_used;
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern struct resource sparc_iomap;
63
64extern unsigned long last_valid_pfn;
65
Adrian Bunk50215d62008-06-05 11:41:51 -070066static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
68#ifdef CONFIG_SMP
69#define FLUSH_BEGIN(mm)
70#define FLUSH_END
71#else
72#define FLUSH_BEGIN(mm) if((mm)->context != NO_CONTEXT) {
73#define FLUSH_END }
74#endif
75
76BTFIXUPDEF_CALL(void, flush_page_for_dma, unsigned long)
77#define flush_page_for_dma(page) BTFIXUP_CALL(flush_page_for_dma)(page)
78
79int flush_page_for_dma_global = 1;
80
81#ifdef CONFIG_SMP
82BTFIXUPDEF_CALL(void, local_flush_page_for_dma, unsigned long)
83#define local_flush_page_for_dma(page) BTFIXUP_CALL(local_flush_page_for_dma)(page)
84#endif
85
86char *srmmu_name;
87
88ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070089static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
91int viking_mxcc_present;
92static DEFINE_SPINLOCK(srmmu_context_spinlock);
93
Adrian Bunk50215d62008-06-05 11:41:51 -070094static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
Adrian Bunk50215d62008-06-05 11:41:51 -070096static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070099static unsigned long srmmu_nocache_size;
100static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700101
102/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
103#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
104
105/* The context table is a nocache user with the biggest alignment needs. */
106#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
107
108void *srmmu_nocache_pool;
109void *srmmu_nocache_bitmap;
110static struct bit_map srmmu_nocache_map;
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static inline unsigned long srmmu_pgd_page(pgd_t pgd)
113{ return srmmu_device_memory(pgd_val(pgd))?~0:(unsigned long)__nocache_va((pgd_val(pgd) & SRMMU_PTD_PMASK) << 4); }
114
115
116static inline int srmmu_pte_none(pte_t pte)
117{ return !(pte_val(pte) & 0xFFFFFFF); }
118
Linus Torvalds1da177e2005-04-16 15:20:36 -0700119static inline int srmmu_pmd_none(pmd_t pmd)
120{ return !(pmd_val(pmd) & 0xFFFFFFF); }
121
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122static inline pte_t srmmu_pte_wrprotect(pte_t pte)
123{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
124
125static inline pte_t srmmu_pte_mkclean(pte_t pte)
126{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
127
128static inline pte_t srmmu_pte_mkold(pte_t pte)
129{ return __pte(pte_val(pte) & ~SRMMU_REF);}
130
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131/* XXX should we hyper_flush_whole_icache here - Anton */
132static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
David S. Miller62875cf2012-05-12 13:39:23 -0700133{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200135void pmd_set(pmd_t *pmdp, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136{
137 unsigned long ptp; /* Physical address, shifted right by 4 */
138 int i;
139
140 ptp = __nocache_pa((unsigned long) ptep) >> 4;
141 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700142 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
144 }
145}
146
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200147void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148{
149 unsigned long ptp; /* Physical address, shifted right by 4 */
150 int i;
151
152 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
153 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700154 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
156 }
157}
158
159static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
160{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
161
162/* to find an entry in a top-level page table... */
Adrian Bunk31156242005-10-03 17:37:02 -0700163static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
165
166/* Find an entry in the second-level page table.. */
167static inline pmd_t *srmmu_pmd_offset(pgd_t * dir, unsigned long address)
168{
169 return (pmd_t *) srmmu_pgd_page(*dir) +
170 ((address >> PMD_SHIFT) & (PTRS_PER_PMD - 1));
171}
172
173/* Find an entry in the third-level page table.. */
174static inline pte_t *srmmu_pte_offset(pmd_t * dir, unsigned long address)
175{
176 void *pte;
177
178 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
179 return (pte_t *) pte +
180 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
181}
182
183static unsigned long srmmu_swp_type(swp_entry_t entry)
184{
185 return (entry.val >> SRMMU_SWP_TYPE_SHIFT) & SRMMU_SWP_TYPE_MASK;
186}
187
188static unsigned long srmmu_swp_offset(swp_entry_t entry)
189{
190 return (entry.val >> SRMMU_SWP_OFF_SHIFT) & SRMMU_SWP_OFF_MASK;
191}
192
193static swp_entry_t srmmu_swp_entry(unsigned long type, unsigned long offset)
194{
195 return (swp_entry_t) {
196 (type & SRMMU_SWP_TYPE_MASK) << SRMMU_SWP_TYPE_SHIFT
197 | (offset & SRMMU_SWP_OFF_MASK) << SRMMU_SWP_OFF_SHIFT };
198}
199
200/*
201 * size: bytes to allocate in the nocache area.
202 * align: bytes, number to align at.
203 * Returns the virtual address of the allocated area.
204 */
205static unsigned long __srmmu_get_nocache(int size, int align)
206{
207 int offset;
208
209 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
210 printk("Size 0x%x too small for nocache request\n", size);
211 size = SRMMU_NOCACHE_BITMAP_SHIFT;
212 }
213 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
214 printk("Size 0x%x unaligned int nocache request\n", size);
215 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
216 }
217 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
218
219 offset = bit_map_string_get(&srmmu_nocache_map,
220 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
221 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
222 if (offset == -1) {
223 printk("srmmu: out of nocache %d: %d/%d\n",
224 size, (int) srmmu_nocache_size,
225 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
226 return 0;
227 }
228
229 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
230}
231
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200232unsigned long srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233{
234 unsigned long tmp;
235
236 tmp = __srmmu_get_nocache(size, align);
237
238 if (tmp)
239 memset((void *)tmp, 0, size);
240
241 return tmp;
242}
243
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200244void srmmu_free_nocache(unsigned long vaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245{
246 int offset;
247
248 if (vaddr < SRMMU_NOCACHE_VADDR) {
249 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
250 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
251 BUG();
252 }
253 if (vaddr+size > srmmu_nocache_end) {
254 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
255 vaddr, srmmu_nocache_end);
256 BUG();
257 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000258 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 printk("Size 0x%x is not a power of 2\n", size);
260 BUG();
261 }
262 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
263 printk("Size 0x%x is too small\n", size);
264 BUG();
265 }
266 if (vaddr & (size-1)) {
267 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
268 BUG();
269 }
270
271 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
272 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
273
274 bit_map_clear(&srmmu_nocache_map, offset, size);
275}
276
Adrian Bunk50215d62008-06-05 11:41:51 -0700277static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
278 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700279
280extern unsigned long probe_memory(void); /* in fault.c */
281
282/*
283 * Reserve nocache dynamically proportionally to the amount of
284 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
285 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700286static void srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700287{
288 unsigned long sysmemavail = probe_memory() / 1024;
289 int srmmu_nocache_npages;
290
291 srmmu_nocache_npages =
292 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
293
294 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
295 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
296 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
297 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
298
299 /* anything above 1280 blows up */
300 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
301 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
302
303 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
304 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
305}
306
Adrian Bunk50215d62008-06-05 11:41:51 -0700307static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308{
309 unsigned int bitmap_bits;
310 pgd_t *pgd;
311 pmd_t *pmd;
312 pte_t *pte;
313 unsigned long paddr, vaddr;
314 unsigned long pteval;
315
316 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
317
318 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
319 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
320 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
321
322 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
323 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
324
325 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
326 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
327 init_mm.pgd = srmmu_swapper_pg_dir;
328
329 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
330
331 paddr = __pa((unsigned long)srmmu_nocache_pool);
332 vaddr = SRMMU_NOCACHE_VADDR;
333
334 while (vaddr < srmmu_nocache_end) {
335 pgd = pgd_offset_k(vaddr);
336 pmd = srmmu_pmd_offset(__nocache_fix(pgd), vaddr);
337 pte = srmmu_pte_offset(__nocache_fix(pmd), vaddr);
338
339 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
340
341 if (srmmu_cache_pagetables)
342 pteval |= SRMMU_CACHE;
343
David S. Miller62875cf2012-05-12 13:39:23 -0700344 set_pte(__nocache_fix(pte), __pte(pteval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345
346 vaddr += PAGE_SIZE;
347 paddr += PAGE_SIZE;
348 }
349
350 flush_cache_all();
351 flush_tlb_all();
352}
353
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200354pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700355{
356 pgd_t *pgd = NULL;
357
358 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
359 if (pgd) {
360 pgd_t *init = pgd_offset_k(0);
361 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
362 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
363 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
364 }
365
366 return pgd;
367}
368
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369/*
370 * Hardware needs alignment to 256 only, but we align to whole page size
371 * to reduce fragmentation problems due to the buddy principle.
372 * XXX Provide actual fragmentation statistics in /proc.
373 *
374 * Alignments up to the page size are the same for physical and virtual
375 * addresses of the nocache area.
376 */
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200377pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700378{
379 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800380 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700381
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200382 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700383 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800384 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
385 pgtable_page_ctor(page);
386 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700387}
388
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200389void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390{
391 unsigned long p;
392
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800393 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700394 p = (unsigned long)page_address(pte); /* Cached address (for test) */
395 if (p == 0)
396 BUG();
397 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
398 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
399 srmmu_free_nocache(p, PTE_SIZE);
400}
401
402/*
403 */
404static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
405{
406 struct ctx_list *ctxp;
407
408 ctxp = ctx_free.next;
409 if(ctxp != &ctx_free) {
410 remove_from_ctx_list(ctxp);
411 add_to_used_ctxlist(ctxp);
412 mm->context = ctxp->ctx_number;
413 ctxp->ctx_mm = mm;
414 return;
415 }
416 ctxp = ctx_used.next;
417 if(ctxp->ctx_mm == old_mm)
418 ctxp = ctxp->next;
419 if(ctxp == &ctx_used)
420 panic("out of mmu contexts");
421 flush_cache_mm(ctxp->ctx_mm);
422 flush_tlb_mm(ctxp->ctx_mm);
423 remove_from_ctx_list(ctxp);
424 add_to_used_ctxlist(ctxp);
425 ctxp->ctx_mm->context = NO_CONTEXT;
426 ctxp->ctx_mm = mm;
427 mm->context = ctxp->ctx_number;
428}
429
430static inline void free_context(int context)
431{
432 struct ctx_list *ctx_old;
433
434 ctx_old = ctx_list_pool + context;
435 remove_from_ctx_list(ctx_old);
436 add_to_free_ctxlist(ctx_old);
437}
438
439
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000440void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
441 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 if(mm->context == NO_CONTEXT) {
444 spin_lock(&srmmu_context_spinlock);
445 alloc_context(old_mm, mm);
446 spin_unlock(&srmmu_context_spinlock);
447 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
448 }
449
Konrad Eisele75d9e342009-08-17 00:13:33 +0000450 if (sparc_cpu_model == sparc_leon)
451 leon_switch_mm();
452
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 if (is_hypersparc)
454 hyper_flush_whole_icache();
455
456 srmmu_set_context(mm->context);
457}
458
459/* Low level IO area allocation on the SRMMU. */
460static inline void srmmu_mapioaddr(unsigned long physaddr,
461 unsigned long virt_addr, int bus_type)
462{
463 pgd_t *pgdp;
464 pmd_t *pmdp;
465 pte_t *ptep;
466 unsigned long tmp;
467
468 physaddr &= PAGE_MASK;
469 pgdp = pgd_offset_k(virt_addr);
470 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
471 ptep = srmmu_pte_offset(pmdp, virt_addr);
472 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
473
474 /*
475 * I need to test whether this is consistent over all
476 * sun4m's. The bus_type represents the upper 4 bits of
477 * 36-bit physical address on the I/O space lines...
478 */
479 tmp |= (bus_type << 28);
480 tmp |= SRMMU_PRIV;
481 __flush_page_to_ram(virt_addr);
David S. Miller62875cf2012-05-12 13:39:23 -0700482 set_pte(ptep, __pte(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483}
484
485static void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
486 unsigned long xva, unsigned int len)
487{
488 while (len != 0) {
489 len -= PAGE_SIZE;
490 srmmu_mapioaddr(xpa, xva, bus);
491 xva += PAGE_SIZE;
492 xpa += PAGE_SIZE;
493 }
494 flush_tlb_all();
495}
496
497static inline void srmmu_unmapioaddr(unsigned long virt_addr)
498{
499 pgd_t *pgdp;
500 pmd_t *pmdp;
501 pte_t *ptep;
502
503 pgdp = pgd_offset_k(virt_addr);
504 pmdp = srmmu_pmd_offset(pgdp, virt_addr);
505 ptep = srmmu_pte_offset(pmdp, virt_addr);
506
507 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700508 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509}
510
511static void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
512{
513 while (len != 0) {
514 len -= PAGE_SIZE;
515 srmmu_unmapioaddr(virt_addr);
516 virt_addr += PAGE_SIZE;
517 }
518 flush_tlb_all();
519}
520
521/*
522 * On the SRMMU we do not have the problems with limited tlb entries
523 * for mapping kernel pages, so we just take things from the free page
524 * pool. As a side effect we are putting a little too much pressure
525 * on the gfp() subsystem. This setup also makes the logic of the
526 * iommu mapping code a lot easier as we can transparently handle
David S. Milleree906c92012-05-12 00:35:45 -0700527 * mappings on the kernel stack without any special code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528 */
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000529struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700530{
531 struct thread_info *ret;
532
533 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
534 THREAD_INFO_ORDER);
535#ifdef CONFIG_DEBUG_STACK_USAGE
536 if (ret)
537 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
538#endif /* DEBUG_STACK_USAGE */
539
540 return ret;
541}
542
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000543void free_thread_info(struct thread_info *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700544{
545 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
546}
547
548/* tsunami.S */
549extern void tsunami_flush_cache_all(void);
550extern void tsunami_flush_cache_mm(struct mm_struct *mm);
551extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
552extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
553extern void tsunami_flush_page_to_ram(unsigned long page);
554extern void tsunami_flush_page_for_dma(unsigned long page);
555extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
556extern void tsunami_flush_tlb_all(void);
557extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
558extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
559extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
560extern void tsunami_setup_blockops(void);
561
562/*
563 * Workaround, until we find what's going on with Swift. When low on memory,
564 * it sometimes loops in fault/handle_mm_fault incl. flush_tlb_page to find
565 * out it is already in page tables/ fault again on the same instruction.
566 * I really don't understand it, have checked it and contexts
567 * are right, flush_tlb_all is done as well, and it faults again...
568 * Strange. -jj
569 *
570 * The following code is a deadwood that may be necessary when
571 * we start to make precise page flushes again. --zaitcev
572 */
Russell King4b3073e2009-12-18 16:40:18 +0000573static void swift_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574{
575#if 0
576 static unsigned long last;
577 unsigned int val;
578 /* unsigned int n; */
579
580 if (address == last) {
581 val = srmmu_hwprobe(address);
Russell King4b3073e2009-12-18 16:40:18 +0000582 if (val != 0 && pte_val(*ptep) != val) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 printk("swift_update_mmu_cache: "
Joe Perchese9b57cc2012-02-28 16:08:02 -0500584 "addr %lx put %08x probed %08x from %pf\n",
Russell King4b3073e2009-12-18 16:40:18 +0000585 address, pte_val(*ptep), val,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700586 __builtin_return_address(0));
587 srmmu_flush_whole_tlb();
588 }
589 }
590 last = address;
591#endif
592}
593
594/* swift.S */
595extern void swift_flush_cache_all(void);
596extern void swift_flush_cache_mm(struct mm_struct *mm);
597extern void swift_flush_cache_range(struct vm_area_struct *vma,
598 unsigned long start, unsigned long end);
599extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
600extern void swift_flush_page_to_ram(unsigned long page);
601extern void swift_flush_page_for_dma(unsigned long page);
602extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
603extern void swift_flush_tlb_all(void);
604extern void swift_flush_tlb_mm(struct mm_struct *mm);
605extern void swift_flush_tlb_range(struct vm_area_struct *vma,
606 unsigned long start, unsigned long end);
607extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
608
609#if 0 /* P3: deadwood to debug precise flushes on Swift. */
610void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
611{
612 int cctx, ctx1;
613
614 page &= PAGE_MASK;
615 if ((ctx1 = vma->vm_mm->context) != -1) {
616 cctx = srmmu_get_context();
617/* Is context # ever different from current context? P3 */
618 if (cctx != ctx1) {
619 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
620 srmmu_set_context(ctx1);
621 swift_flush_page(page);
622 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
623 "r" (page), "i" (ASI_M_FLUSH_PROBE));
624 srmmu_set_context(cctx);
625 } else {
626 /* Rm. prot. bits from virt. c. */
627 /* swift_flush_cache_all(); */
628 /* swift_flush_cache_page(vma, page); */
629 swift_flush_page(page);
630
631 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
632 "r" (page), "i" (ASI_M_FLUSH_PROBE));
633 /* same as above: srmmu_flush_tlb_page() */
634 }
635 }
636}
637#endif
638
639/*
640 * The following are all MBUS based SRMMU modules, and therefore could
641 * be found in a multiprocessor configuration. On the whole, these
642 * chips seems to be much more touchy about DVMA and page tables
643 * with respect to cache coherency.
644 */
645
646/* Cypress flushes. */
647static void cypress_flush_cache_all(void)
648{
649 volatile unsigned long cypress_sucks;
650 unsigned long faddr, tagval;
651
652 flush_user_windows();
653 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
654 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
655 "=r" (tagval) :
656 "r" (faddr), "r" (0x40000),
657 "i" (ASI_M_DATAC_TAG));
658
659 /* If modified and valid, kick it. */
660 if((tagval & 0x60) == 0x60)
661 cypress_sucks = *(unsigned long *)(0xf0020000 + faddr);
662 }
663}
664
665static void cypress_flush_cache_mm(struct mm_struct *mm)
666{
667 register unsigned long a, b, c, d, e, f, g;
668 unsigned long flags, faddr;
669 int octx;
670
671 FLUSH_BEGIN(mm)
672 flush_user_windows();
673 local_irq_save(flags);
674 octx = srmmu_get_context();
675 srmmu_set_context(mm->context);
676 a = 0x20; b = 0x40; c = 0x60;
677 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
678
679 faddr = (0x10000 - 0x100);
680 goto inside;
681 do {
682 faddr -= 0x100;
683 inside:
684 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
685 "sta %%g0, [%0 + %2] %1\n\t"
686 "sta %%g0, [%0 + %3] %1\n\t"
687 "sta %%g0, [%0 + %4] %1\n\t"
688 "sta %%g0, [%0 + %5] %1\n\t"
689 "sta %%g0, [%0 + %6] %1\n\t"
690 "sta %%g0, [%0 + %7] %1\n\t"
691 "sta %%g0, [%0 + %8] %1\n\t" : :
692 "r" (faddr), "i" (ASI_M_FLUSH_CTX),
693 "r" (a), "r" (b), "r" (c), "r" (d),
694 "r" (e), "r" (f), "r" (g));
695 } while(faddr);
696 srmmu_set_context(octx);
697 local_irq_restore(flags);
698 FLUSH_END
699}
700
701static void cypress_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
702{
703 struct mm_struct *mm = vma->vm_mm;
704 register unsigned long a, b, c, d, e, f, g;
705 unsigned long flags, faddr;
706 int octx;
707
708 FLUSH_BEGIN(mm)
709 flush_user_windows();
710 local_irq_save(flags);
711 octx = srmmu_get_context();
712 srmmu_set_context(mm->context);
713 a = 0x20; b = 0x40; c = 0x60;
714 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
715
716 start &= SRMMU_REAL_PMD_MASK;
717 while(start < end) {
718 faddr = (start + (0x10000 - 0x100));
719 goto inside;
720 do {
721 faddr -= 0x100;
722 inside:
723 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
724 "sta %%g0, [%0 + %2] %1\n\t"
725 "sta %%g0, [%0 + %3] %1\n\t"
726 "sta %%g0, [%0 + %4] %1\n\t"
727 "sta %%g0, [%0 + %5] %1\n\t"
728 "sta %%g0, [%0 + %6] %1\n\t"
729 "sta %%g0, [%0 + %7] %1\n\t"
730 "sta %%g0, [%0 + %8] %1\n\t" : :
731 "r" (faddr),
732 "i" (ASI_M_FLUSH_SEG),
733 "r" (a), "r" (b), "r" (c), "r" (d),
734 "r" (e), "r" (f), "r" (g));
735 } while (faddr != start);
736 start += SRMMU_REAL_PMD_SIZE;
737 }
738 srmmu_set_context(octx);
739 local_irq_restore(flags);
740 FLUSH_END
741}
742
743static void cypress_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
744{
745 register unsigned long a, b, c, d, e, f, g;
746 struct mm_struct *mm = vma->vm_mm;
747 unsigned long flags, line;
748 int octx;
749
750 FLUSH_BEGIN(mm)
751 flush_user_windows();
752 local_irq_save(flags);
753 octx = srmmu_get_context();
754 srmmu_set_context(mm->context);
755 a = 0x20; b = 0x40; c = 0x60;
756 d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
757
758 page &= PAGE_MASK;
759 line = (page + PAGE_SIZE) - 0x100;
760 goto inside;
761 do {
762 line -= 0x100;
763 inside:
764 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
765 "sta %%g0, [%0 + %2] %1\n\t"
766 "sta %%g0, [%0 + %3] %1\n\t"
767 "sta %%g0, [%0 + %4] %1\n\t"
768 "sta %%g0, [%0 + %5] %1\n\t"
769 "sta %%g0, [%0 + %6] %1\n\t"
770 "sta %%g0, [%0 + %7] %1\n\t"
771 "sta %%g0, [%0 + %8] %1\n\t" : :
772 "r" (line),
773 "i" (ASI_M_FLUSH_PAGE),
774 "r" (a), "r" (b), "r" (c), "r" (d),
775 "r" (e), "r" (f), "r" (g));
776 } while(line != page);
777 srmmu_set_context(octx);
778 local_irq_restore(flags);
779 FLUSH_END
780}
781
782/* Cypress is copy-back, at least that is how we configure it. */
783static void cypress_flush_page_to_ram(unsigned long page)
784{
785 register unsigned long a, b, c, d, e, f, g;
786 unsigned long line;
787
788 a = 0x20; b = 0x40; c = 0x60; d = 0x80; e = 0xa0; f = 0xc0; g = 0xe0;
789 page &= PAGE_MASK;
790 line = (page + PAGE_SIZE) - 0x100;
791 goto inside;
792 do {
793 line -= 0x100;
794 inside:
795 __asm__ __volatile__("sta %%g0, [%0] %1\n\t"
796 "sta %%g0, [%0 + %2] %1\n\t"
797 "sta %%g0, [%0 + %3] %1\n\t"
798 "sta %%g0, [%0 + %4] %1\n\t"
799 "sta %%g0, [%0 + %5] %1\n\t"
800 "sta %%g0, [%0 + %6] %1\n\t"
801 "sta %%g0, [%0 + %7] %1\n\t"
802 "sta %%g0, [%0 + %8] %1\n\t" : :
803 "r" (line),
804 "i" (ASI_M_FLUSH_PAGE),
805 "r" (a), "r" (b), "r" (c), "r" (d),
806 "r" (e), "r" (f), "r" (g));
807 } while(line != page);
808}
809
810/* Cypress is also IO cache coherent. */
811static void cypress_flush_page_for_dma(unsigned long page)
812{
813}
814
815/* Cypress has unified L2 VIPT, from which both instructions and data
816 * are stored. It does not have an onboard icache of any sort, therefore
817 * no flush is necessary.
818 */
819static void cypress_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
820{
821}
822
823static void cypress_flush_tlb_all(void)
824{
825 srmmu_flush_whole_tlb();
826}
827
828static void cypress_flush_tlb_mm(struct mm_struct *mm)
829{
830 FLUSH_BEGIN(mm)
831 __asm__ __volatile__(
832 "lda [%0] %3, %%g5\n\t"
833 "sta %2, [%0] %3\n\t"
834 "sta %%g0, [%1] %4\n\t"
835 "sta %%g5, [%0] %3\n"
836 : /* no outputs */
837 : "r" (SRMMU_CTX_REG), "r" (0x300), "r" (mm->context),
838 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
839 : "g5");
840 FLUSH_END
841}
842
843static void cypress_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
844{
845 struct mm_struct *mm = vma->vm_mm;
846 unsigned long size;
847
848 FLUSH_BEGIN(mm)
849 start &= SRMMU_PGDIR_MASK;
850 size = SRMMU_PGDIR_ALIGN(end) - start;
851 __asm__ __volatile__(
852 "lda [%0] %5, %%g5\n\t"
853 "sta %1, [%0] %5\n"
854 "1:\n\t"
855 "subcc %3, %4, %3\n\t"
856 "bne 1b\n\t"
857 " sta %%g0, [%2 + %3] %6\n\t"
858 "sta %%g5, [%0] %5\n"
859 : /* no outputs */
860 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (start | 0x200),
861 "r" (size), "r" (SRMMU_PGDIR_SIZE), "i" (ASI_M_MMUREGS),
862 "i" (ASI_M_FLUSH_PROBE)
863 : "g5", "cc");
864 FLUSH_END
865}
866
867static void cypress_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
868{
869 struct mm_struct *mm = vma->vm_mm;
870
871 FLUSH_BEGIN(mm)
872 __asm__ __volatile__(
873 "lda [%0] %3, %%g5\n\t"
874 "sta %1, [%0] %3\n\t"
875 "sta %%g0, [%2] %4\n\t"
876 "sta %%g5, [%0] %3\n"
877 : /* no outputs */
878 : "r" (SRMMU_CTX_REG), "r" (mm->context), "r" (page & PAGE_MASK),
879 "i" (ASI_M_MMUREGS), "i" (ASI_M_FLUSH_PROBE)
880 : "g5");
881 FLUSH_END
882}
883
884/* viking.S */
885extern void viking_flush_cache_all(void);
886extern void viking_flush_cache_mm(struct mm_struct *mm);
887extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
888 unsigned long end);
889extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
890extern void viking_flush_page_to_ram(unsigned long page);
891extern void viking_flush_page_for_dma(unsigned long page);
892extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
893extern void viking_flush_page(unsigned long page);
894extern void viking_mxcc_flush_page(unsigned long page);
895extern void viking_flush_tlb_all(void);
896extern void viking_flush_tlb_mm(struct mm_struct *mm);
897extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
898 unsigned long end);
899extern void viking_flush_tlb_page(struct vm_area_struct *vma,
900 unsigned long page);
901extern void sun4dsmp_flush_tlb_all(void);
902extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
903extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
904 unsigned long end);
905extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
906 unsigned long page);
907
908/* hypersparc.S */
909extern void hypersparc_flush_cache_all(void);
910extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
911extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
912extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
913extern void hypersparc_flush_page_to_ram(unsigned long page);
914extern void hypersparc_flush_page_for_dma(unsigned long page);
915extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
916extern void hypersparc_flush_tlb_all(void);
917extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
918extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
919extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
920extern void hypersparc_setup_blockops(void);
921
922/*
923 * NOTE: All of this startup code assumes the low 16mb (approx.) of
924 * kernel mappings are done with one single contiguous chunk of
925 * ram. On small ram machines (classics mainly) we only get
926 * around 8mb mapped for us.
927 */
928
Adrian Bunk50215d62008-06-05 11:41:51 -0700929static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930{
931 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
932 prom_halt();
933}
934
Adrian Bunk50215d62008-06-05 11:41:51 -0700935static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
936 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700937{
938 pgd_t *pgdp;
939 pmd_t *pmdp;
940 pte_t *ptep;
941
942 while(start < end) {
943 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700944 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945 pmdp = (pmd_t *) __srmmu_get_nocache(
946 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
947 if (pmdp == NULL)
948 early_pgtable_allocfail("pmd");
949 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200950 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 }
952 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
953 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
954 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
955 if (ptep == NULL)
956 early_pgtable_allocfail("pte");
957 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200958 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700959 }
960 if (start > (0xffffffffUL - PMD_SIZE))
961 break;
962 start = (start + PMD_SIZE) & PMD_MASK;
963 }
964}
965
Adrian Bunk50215d62008-06-05 11:41:51 -0700966static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
967 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968{
969 pgd_t *pgdp;
970 pmd_t *pmdp;
971 pte_t *ptep;
972
973 while(start < end) {
974 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700975 if (pgd_none(*pgdp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700976 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
977 if (pmdp == NULL)
978 early_pgtable_allocfail("pmd");
979 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200980 pgd_set(pgdp, pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700981 }
982 pmdp = srmmu_pmd_offset(pgdp, start);
983 if(srmmu_pmd_none(*pmdp)) {
984 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
985 PTE_SIZE);
986 if (ptep == NULL)
987 early_pgtable_allocfail("pte");
988 memset(ptep, 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200989 pmd_set(pmdp, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 }
991 if (start > (0xffffffffUL - PMD_SIZE))
992 break;
993 start = (start + PMD_SIZE) & PMD_MASK;
994 }
995}
996
997/*
998 * This is much cleaner than poking around physical address space
999 * looking at the prom's page table directly which is what most
1000 * other OS's do. Yuck... this is much better.
1001 */
Adrian Bunk50215d62008-06-05 11:41:51 -07001002static void __init srmmu_inherit_prom_mappings(unsigned long start,
1003 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001004{
1005 pgd_t *pgdp;
1006 pmd_t *pmdp;
1007 pte_t *ptep;
1008 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
1009 unsigned long prompte;
1010
1011 while(start <= end) {
1012 if (start == 0)
1013 break; /* probably wrap around */
1014 if(start == 0xfef00000)
1015 start = KADB_DEBUGGER_BEGVM;
1016 if(!(prompte = srmmu_hwprobe(start))) {
1017 start += PAGE_SIZE;
1018 continue;
1019 }
1020
1021 /* A red snapper, see what it really is. */
1022 what = 0;
1023
1024 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
1025 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
1026 what = 1;
1027 }
1028
1029 if(!(start & ~(SRMMU_PGDIR_MASK))) {
1030 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
1031 prompte)
1032 what = 2;
1033 }
1034
1035 pgdp = pgd_offset_k(start);
1036 if(what == 2) {
1037 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
1038 start += SRMMU_PGDIR_SIZE;
1039 continue;
1040 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -07001041 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
1043 if (pmdp == NULL)
1044 early_pgtable_allocfail("pmd");
1045 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +02001046 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 }
1048 pmdp = srmmu_pmd_offset(__nocache_fix(pgdp), start);
1049 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
1050 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
1051 PTE_SIZE);
1052 if (ptep == NULL)
1053 early_pgtable_allocfail("pte");
1054 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +02001055 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001056 }
1057 if(what == 1) {
1058 /*
1059 * We bend the rule where all 16 PTPs in a pmd_t point
1060 * inside the same PTE page, and we leak a perfectly
1061 * good hardware PTE piece. Alternatives seem worse.
1062 */
1063 unsigned int x; /* Index of HW PMD in soft cluster */
1064 x = (start >> PMD_SHIFT) & 15;
1065 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
1066 start += SRMMU_REAL_PMD_SIZE;
1067 continue;
1068 }
1069 ptep = srmmu_pte_offset(__nocache_fix(pmdp), start);
1070 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
1071 start += PAGE_SIZE;
1072 }
1073}
1074
1075#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
1076
1077/* Create a third-level SRMMU 16MB page mapping. */
1078static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
1079{
1080 pgd_t *pgdp = pgd_offset_k(vaddr);
1081 unsigned long big_pte;
1082
1083 big_pte = KERNEL_PTE(phys_base >> 4);
1084 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
1085}
1086
1087/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
1088static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
1089{
1090 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
1091 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
1092 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
1093 /* Map "low" memory only */
1094 const unsigned long min_vaddr = PAGE_OFFSET;
1095 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
1096
1097 if (vstart < min_vaddr || vstart >= max_vaddr)
1098 return vstart;
1099
1100 if (vend > max_vaddr || vend < min_vaddr)
1101 vend = max_vaddr;
1102
1103 while(vstart < vend) {
1104 do_large_mapping(vstart, pstart);
1105 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
1106 }
1107 return vstart;
1108}
1109
1110static inline void memprobe_error(char *msg)
1111{
1112 prom_printf(msg);
1113 prom_printf("Halting now...\n");
1114 prom_halt();
1115}
1116
1117static inline void map_kernel(void)
1118{
1119 int i;
1120
1121 if (phys_base > 0) {
1122 do_large_mapping(PAGE_OFFSET, phys_base);
1123 }
1124
1125 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
1126 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
1127 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001128}
1129
1130/* Paging initialization on the Sparc Reference MMU. */
1131extern void sparc_context_init(int);
1132
Al Viro409832f2008-11-22 17:33:54 +00001133void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001134
1135extern unsigned long bootmem_init(unsigned long *pages_avail);
1136
1137void __init srmmu_paging_init(void)
1138{
Andres Salomon8d125562010-10-08 14:18:11 -07001139 int i;
1140 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001141 char node_str[128];
1142 pgd_t *pgd;
1143 pmd_t *pmd;
1144 pte_t *pte;
1145 unsigned long pages_avail;
1146
1147 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
1148
1149 if (sparc_cpu_model == sun4d)
1150 num_contexts = 65536; /* We know it is Viking */
1151 else {
1152 /* Find the number of contexts on the srmmu. */
1153 cpunode = prom_getchild(prom_root_node);
1154 num_contexts = 0;
1155 while(cpunode != 0) {
1156 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1157 if(!strcmp(node_str, "cpu")) {
1158 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
1159 break;
1160 }
1161 cpunode = prom_getsibling(cpunode);
1162 }
1163 }
1164
1165 if(!num_contexts) {
1166 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
1167 prom_halt();
1168 }
1169
1170 pages_avail = 0;
1171 last_valid_pfn = bootmem_init(&pages_avail);
1172
1173 srmmu_nocache_calcsize();
1174 srmmu_nocache_init();
1175 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
1176 map_kernel();
1177
1178 /* ctx table has to be physically aligned to its size */
1179 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
1180 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
1181
1182 for(i = 0; i < num_contexts; i++)
1183 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
1184
1185 flush_cache_all();
1186 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -08001187#ifdef CONFIG_SMP
1188 /* Stop from hanging here... */
1189 local_flush_tlb_all();
1190#else
Linus Torvalds1da177e2005-04-16 15:20:36 -07001191 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -08001192#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001193 poke_srmmu();
1194
Linus Torvalds1da177e2005-04-16 15:20:36 -07001195 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
1196 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001197
1198 srmmu_allocate_ptable_skeleton(
1199 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
1200 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
1201
1202 pgd = pgd_offset_k(PKMAP_BASE);
1203 pmd = srmmu_pmd_offset(pgd, PKMAP_BASE);
1204 pte = srmmu_pte_offset(pmd, PKMAP_BASE);
1205 pkmap_page_table = pte;
1206
1207 flush_cache_all();
1208 flush_tlb_all();
1209
1210 sparc_context_init(num_contexts);
1211
1212 kmap_init();
1213
1214 {
1215 unsigned long zones_size[MAX_NR_ZONES];
1216 unsigned long zholes_size[MAX_NR_ZONES];
1217 unsigned long npages;
1218 int znum;
1219
1220 for (znum = 0; znum < MAX_NR_ZONES; znum++)
1221 zones_size[znum] = zholes_size[znum] = 0;
1222
1223 npages = max_low_pfn - pfn_base;
1224
1225 zones_size[ZONE_DMA] = npages;
1226 zholes_size[ZONE_DMA] = npages - pages_avail;
1227
1228 npages = highend_pfn - max_low_pfn;
1229 zones_size[ZONE_HIGHMEM] = npages;
1230 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
1231
Johannes Weiner9109fb72008-07-23 21:27:20 -07001232 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 }
1234}
1235
1236static void srmmu_mmu_info(struct seq_file *m)
1237{
1238 seq_printf(m,
1239 "MMU type\t: %s\n"
1240 "contexts\t: %d\n"
1241 "nocache total\t: %ld\n"
1242 "nocache used\t: %d\n",
1243 srmmu_name,
1244 num_contexts,
1245 srmmu_nocache_size,
1246 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
1247}
1248
1249static void srmmu_update_mmu_cache(struct vm_area_struct * vma, unsigned long address, pte_t pte)
1250{
1251}
1252
1253static void srmmu_destroy_context(struct mm_struct *mm)
1254{
1255
1256 if(mm->context != NO_CONTEXT) {
1257 flush_cache_mm(mm);
1258 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
1259 flush_tlb_mm(mm);
1260 spin_lock(&srmmu_context_spinlock);
1261 free_context(mm->context);
1262 spin_unlock(&srmmu_context_spinlock);
1263 mm->context = NO_CONTEXT;
1264 }
1265}
1266
1267/* Init various srmmu chip types. */
1268static void __init srmmu_is_bad(void)
1269{
1270 prom_printf("Could not determine SRMMU chip type.\n");
1271 prom_halt();
1272}
1273
1274static void __init init_vac_layout(void)
1275{
Andres Salomon8d125562010-10-08 14:18:11 -07001276 phandle nd;
1277 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278 char node_str[128];
1279#ifdef CONFIG_SMP
1280 int cpu = 0;
1281 unsigned long max_size = 0;
1282 unsigned long min_line_size = 0x10000000;
1283#endif
1284
1285 nd = prom_getchild(prom_root_node);
1286 while((nd = prom_getsibling(nd)) != 0) {
1287 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
1288 if(!strcmp(node_str, "cpu")) {
1289 vac_line_size = prom_getint(nd, "cache-line-size");
1290 if (vac_line_size == -1) {
1291 prom_printf("can't determine cache-line-size, "
1292 "halting.\n");
1293 prom_halt();
1294 }
1295 cache_lines = prom_getint(nd, "cache-nlines");
1296 if (cache_lines == -1) {
1297 prom_printf("can't determine cache-nlines, halting.\n");
1298 prom_halt();
1299 }
1300
1301 vac_cache_size = cache_lines * vac_line_size;
1302#ifdef CONFIG_SMP
1303 if(vac_cache_size > max_size)
1304 max_size = vac_cache_size;
1305 if(vac_line_size < min_line_size)
1306 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -08001307 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +10301309 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310 break;
1311#else
1312 break;
1313#endif
1314 }
1315 }
1316 if(nd == 0) {
1317 prom_printf("No CPU nodes found, halting.\n");
1318 prom_halt();
1319 }
1320#ifdef CONFIG_SMP
1321 vac_cache_size = max_size;
1322 vac_line_size = min_line_size;
1323#endif
1324 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1325 (int)vac_cache_size, (int)vac_line_size);
1326}
1327
Al Viro409832f2008-11-22 17:33:54 +00001328static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001329{
1330 volatile unsigned long clear;
1331 unsigned long mreg = srmmu_get_mmureg();
1332
1333 hyper_flush_unconditional_combined();
1334
1335 mreg &= ~(HYPERSPARC_CWENABLE);
1336 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1337 mreg |= (HYPERSPARC_CMODE);
1338
1339 srmmu_set_mmureg(mreg);
1340
1341#if 0 /* XXX I think this is bad news... -DaveM */
1342 hyper_clear_all_tags();
1343#endif
1344
1345 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1346 hyper_flush_whole_icache();
1347 clear = srmmu_get_faddr();
1348 clear = srmmu_get_fstatus();
1349}
1350
1351static void __init init_hypersparc(void)
1352{
1353 srmmu_name = "ROSS HyperSparc";
1354 srmmu_modtype = HyperSparc;
1355
1356 init_vac_layout();
1357
1358 is_hypersparc = 1;
1359
Linus Torvalds1da177e2005-04-16 15:20:36 -07001360 BTFIXUPSET_CALL(flush_cache_all, hypersparc_flush_cache_all, BTFIXUPCALL_NORM);
1361 BTFIXUPSET_CALL(flush_cache_mm, hypersparc_flush_cache_mm, BTFIXUPCALL_NORM);
1362 BTFIXUPSET_CALL(flush_cache_range, hypersparc_flush_cache_range, BTFIXUPCALL_NORM);
1363 BTFIXUPSET_CALL(flush_cache_page, hypersparc_flush_cache_page, BTFIXUPCALL_NORM);
1364
1365 BTFIXUPSET_CALL(flush_tlb_all, hypersparc_flush_tlb_all, BTFIXUPCALL_NORM);
1366 BTFIXUPSET_CALL(flush_tlb_mm, hypersparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1367 BTFIXUPSET_CALL(flush_tlb_range, hypersparc_flush_tlb_range, BTFIXUPCALL_NORM);
1368 BTFIXUPSET_CALL(flush_tlb_page, hypersparc_flush_tlb_page, BTFIXUPCALL_NORM);
1369
1370 BTFIXUPSET_CALL(__flush_page_to_ram, hypersparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1371 BTFIXUPSET_CALL(flush_sig_insns, hypersparc_flush_sig_insns, BTFIXUPCALL_NORM);
1372 BTFIXUPSET_CALL(flush_page_for_dma, hypersparc_flush_page_for_dma, BTFIXUPCALL_NOP);
1373
1374
1375 poke_srmmu = poke_hypersparc;
1376
1377 hypersparc_setup_blockops();
1378}
1379
Al Viro409832f2008-11-22 17:33:54 +00001380static void __cpuinit poke_cypress(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001381{
1382 unsigned long mreg = srmmu_get_mmureg();
1383 unsigned long faddr, tagval;
1384 volatile unsigned long cypress_sucks;
1385 volatile unsigned long clear;
1386
1387 clear = srmmu_get_faddr();
1388 clear = srmmu_get_fstatus();
1389
1390 if (!(mreg & CYPRESS_CENABLE)) {
1391 for(faddr = 0x0; faddr < 0x10000; faddr += 20) {
1392 __asm__ __volatile__("sta %%g0, [%0 + %1] %2\n\t"
1393 "sta %%g0, [%0] %2\n\t" : :
1394 "r" (faddr), "r" (0x40000),
1395 "i" (ASI_M_DATAC_TAG));
1396 }
1397 } else {
1398 for(faddr = 0; faddr < 0x10000; faddr += 0x20) {
1399 __asm__ __volatile__("lda [%1 + %2] %3, %0\n\t" :
1400 "=r" (tagval) :
1401 "r" (faddr), "r" (0x40000),
1402 "i" (ASI_M_DATAC_TAG));
1403
1404 /* If modified and valid, kick it. */
1405 if((tagval & 0x60) == 0x60)
1406 cypress_sucks = *(unsigned long *)
1407 (0xf0020000 + faddr);
1408 }
1409 }
1410
1411 /* And one more, for our good neighbor, Mr. Broken Cypress. */
1412 clear = srmmu_get_faddr();
1413 clear = srmmu_get_fstatus();
1414
1415 mreg |= (CYPRESS_CENABLE | CYPRESS_CMODE);
1416 srmmu_set_mmureg(mreg);
1417}
1418
1419static void __init init_cypress_common(void)
1420{
1421 init_vac_layout();
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 BTFIXUPSET_CALL(flush_cache_all, cypress_flush_cache_all, BTFIXUPCALL_NORM);
1424 BTFIXUPSET_CALL(flush_cache_mm, cypress_flush_cache_mm, BTFIXUPCALL_NORM);
1425 BTFIXUPSET_CALL(flush_cache_range, cypress_flush_cache_range, BTFIXUPCALL_NORM);
1426 BTFIXUPSET_CALL(flush_cache_page, cypress_flush_cache_page, BTFIXUPCALL_NORM);
1427
1428 BTFIXUPSET_CALL(flush_tlb_all, cypress_flush_tlb_all, BTFIXUPCALL_NORM);
1429 BTFIXUPSET_CALL(flush_tlb_mm, cypress_flush_tlb_mm, BTFIXUPCALL_NORM);
1430 BTFIXUPSET_CALL(flush_tlb_page, cypress_flush_tlb_page, BTFIXUPCALL_NORM);
1431 BTFIXUPSET_CALL(flush_tlb_range, cypress_flush_tlb_range, BTFIXUPCALL_NORM);
1432
1433
1434 BTFIXUPSET_CALL(__flush_page_to_ram, cypress_flush_page_to_ram, BTFIXUPCALL_NORM);
1435 BTFIXUPSET_CALL(flush_sig_insns, cypress_flush_sig_insns, BTFIXUPCALL_NOP);
1436 BTFIXUPSET_CALL(flush_page_for_dma, cypress_flush_page_for_dma, BTFIXUPCALL_NOP);
1437
1438 poke_srmmu = poke_cypress;
1439}
1440
1441static void __init init_cypress_604(void)
1442{
1443 srmmu_name = "ROSS Cypress-604(UP)";
1444 srmmu_modtype = Cypress;
1445 init_cypress_common();
1446}
1447
1448static void __init init_cypress_605(unsigned long mrev)
1449{
1450 srmmu_name = "ROSS Cypress-605(MP)";
1451 if(mrev == 0xe) {
1452 srmmu_modtype = Cypress_vE;
1453 hwbug_bitmask |= HWBUG_COPYBACK_BROKEN;
1454 } else {
1455 if(mrev == 0xd) {
1456 srmmu_modtype = Cypress_vD;
1457 hwbug_bitmask |= HWBUG_ASIFLUSH_BROKEN;
1458 } else {
1459 srmmu_modtype = Cypress;
1460 }
1461 }
1462 init_cypress_common();
1463}
1464
Al Viro409832f2008-11-22 17:33:54 +00001465static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001466{
1467 unsigned long mreg;
1468
1469 /* Clear any crap from the cache or else... */
1470 swift_flush_cache_all();
1471
1472 /* Enable I & D caches */
1473 mreg = srmmu_get_mmureg();
1474 mreg |= (SWIFT_IE | SWIFT_DE);
1475 /*
1476 * The Swift branch folding logic is completely broken. At
1477 * trap time, if things are just right, if can mistakenly
1478 * think that a trap is coming from kernel mode when in fact
1479 * it is coming from user mode (it mis-executes the branch in
1480 * the trap code). So you see things like crashme completely
1481 * hosing your machine which is completely unacceptable. Turn
1482 * this shit off... nice job Fujitsu.
1483 */
1484 mreg &= ~(SWIFT_BF);
1485 srmmu_set_mmureg(mreg);
1486}
1487
1488#define SWIFT_MASKID_ADDR 0x10003018
1489static void __init init_swift(void)
1490{
1491 unsigned long swift_rev;
1492
1493 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1494 "srl %0, 0x18, %0\n\t" :
1495 "=r" (swift_rev) :
1496 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1497 srmmu_name = "Fujitsu Swift";
1498 switch(swift_rev) {
1499 case 0x11:
1500 case 0x20:
1501 case 0x23:
1502 case 0x30:
1503 srmmu_modtype = Swift_lots_o_bugs;
1504 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1505 /*
1506 * Gee george, I wonder why Sun is so hush hush about
1507 * this hardware bug... really braindamage stuff going
1508 * on here. However I think we can find a way to avoid
1509 * all of the workaround overhead under Linux. Basically,
1510 * any page fault can cause kernel pages to become user
1511 * accessible (the mmu gets confused and clears some of
1512 * the ACC bits in kernel ptes). Aha, sounds pretty
1513 * horrible eh? But wait, after extensive testing it appears
1514 * that if you use pgd_t level large kernel pte's (like the
1515 * 4MB pages on the Pentium) the bug does not get tripped
1516 * at all. This avoids almost all of the major overhead.
1517 * Welcome to a world where your vendor tells you to,
1518 * "apply this kernel patch" instead of "sorry for the
1519 * broken hardware, send it back and we'll give you
1520 * properly functioning parts"
1521 */
1522 break;
1523 case 0x25:
1524 case 0x31:
1525 srmmu_modtype = Swift_bad_c;
1526 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1527 /*
1528 * You see Sun allude to this hardware bug but never
1529 * admit things directly, they'll say things like,
1530 * "the Swift chip cache problems" or similar.
1531 */
1532 break;
1533 default:
1534 srmmu_modtype = Swift_ok;
1535 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001536 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 BTFIXUPSET_CALL(flush_cache_all, swift_flush_cache_all, BTFIXUPCALL_NORM);
1539 BTFIXUPSET_CALL(flush_cache_mm, swift_flush_cache_mm, BTFIXUPCALL_NORM);
1540 BTFIXUPSET_CALL(flush_cache_page, swift_flush_cache_page, BTFIXUPCALL_NORM);
1541 BTFIXUPSET_CALL(flush_cache_range, swift_flush_cache_range, BTFIXUPCALL_NORM);
1542
1543
1544 BTFIXUPSET_CALL(flush_tlb_all, swift_flush_tlb_all, BTFIXUPCALL_NORM);
1545 BTFIXUPSET_CALL(flush_tlb_mm, swift_flush_tlb_mm, BTFIXUPCALL_NORM);
1546 BTFIXUPSET_CALL(flush_tlb_page, swift_flush_tlb_page, BTFIXUPCALL_NORM);
1547 BTFIXUPSET_CALL(flush_tlb_range, swift_flush_tlb_range, BTFIXUPCALL_NORM);
1548
1549 BTFIXUPSET_CALL(__flush_page_to_ram, swift_flush_page_to_ram, BTFIXUPCALL_NORM);
1550 BTFIXUPSET_CALL(flush_sig_insns, swift_flush_sig_insns, BTFIXUPCALL_NORM);
1551 BTFIXUPSET_CALL(flush_page_for_dma, swift_flush_page_for_dma, BTFIXUPCALL_NORM);
1552
1553 BTFIXUPSET_CALL(update_mmu_cache, swift_update_mmu_cache, BTFIXUPCALL_NORM);
1554
1555 flush_page_for_dma_global = 0;
1556
1557 /*
1558 * Are you now convinced that the Swift is one of the
1559 * biggest VLSI abortions of all time? Bravo Fujitsu!
1560 * Fujitsu, the !#?!%$'d up processor people. I bet if
1561 * you examined the microcode of the Swift you'd find
1562 * XXX's all over the place.
1563 */
1564 poke_srmmu = poke_swift;
1565}
1566
1567static void turbosparc_flush_cache_all(void)
1568{
1569 flush_user_windows();
1570 turbosparc_idflash_clear();
1571}
1572
1573static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1574{
1575 FLUSH_BEGIN(mm)
1576 flush_user_windows();
1577 turbosparc_idflash_clear();
1578 FLUSH_END
1579}
1580
1581static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1582{
1583 FLUSH_BEGIN(vma->vm_mm)
1584 flush_user_windows();
1585 turbosparc_idflash_clear();
1586 FLUSH_END
1587}
1588
1589static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1590{
1591 FLUSH_BEGIN(vma->vm_mm)
1592 flush_user_windows();
1593 if (vma->vm_flags & VM_EXEC)
1594 turbosparc_flush_icache();
1595 turbosparc_flush_dcache();
1596 FLUSH_END
1597}
1598
1599/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1600static void turbosparc_flush_page_to_ram(unsigned long page)
1601{
1602#ifdef TURBOSPARC_WRITEBACK
1603 volatile unsigned long clear;
1604
1605 if (srmmu_hwprobe(page))
1606 turbosparc_flush_page_cache(page);
1607 clear = srmmu_get_fstatus();
1608#endif
1609}
1610
1611static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1612{
1613}
1614
1615static void turbosparc_flush_page_for_dma(unsigned long page)
1616{
1617 turbosparc_flush_dcache();
1618}
1619
1620static void turbosparc_flush_tlb_all(void)
1621{
1622 srmmu_flush_whole_tlb();
1623}
1624
1625static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1626{
1627 FLUSH_BEGIN(mm)
1628 srmmu_flush_whole_tlb();
1629 FLUSH_END
1630}
1631
1632static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1633{
1634 FLUSH_BEGIN(vma->vm_mm)
1635 srmmu_flush_whole_tlb();
1636 FLUSH_END
1637}
1638
1639static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1640{
1641 FLUSH_BEGIN(vma->vm_mm)
1642 srmmu_flush_whole_tlb();
1643 FLUSH_END
1644}
1645
1646
Al Viro409832f2008-11-22 17:33:54 +00001647static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001648{
1649 unsigned long mreg = srmmu_get_mmureg();
1650 unsigned long ccreg;
1651
1652 /* Clear any crap from the cache or else... */
1653 turbosparc_flush_cache_all();
1654 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1655 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1656 srmmu_set_mmureg(mreg);
1657
1658 ccreg = turbosparc_get_ccreg();
1659
1660#ifdef TURBOSPARC_WRITEBACK
1661 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1662 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1663 /* Write-back D-cache, emulate VLSI
1664 * abortion number three, not number one */
1665#else
1666 /* For now let's play safe, optimize later */
1667 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1668 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1669 ccreg &= ~(TURBOSPARC_uS2);
1670 /* Emulate VLSI abortion number three, not number one */
1671#endif
1672
1673 switch (ccreg & 7) {
1674 case 0: /* No SE cache */
1675 case 7: /* Test mode */
1676 break;
1677 default:
1678 ccreg |= (TURBOSPARC_SCENABLE);
1679 }
1680 turbosparc_set_ccreg (ccreg);
1681
1682 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1683 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1684 srmmu_set_mmureg(mreg);
1685}
1686
1687static void __init init_turbosparc(void)
1688{
1689 srmmu_name = "Fujitsu TurboSparc";
1690 srmmu_modtype = TurboSparc;
1691
1692 BTFIXUPSET_CALL(flush_cache_all, turbosparc_flush_cache_all, BTFIXUPCALL_NORM);
1693 BTFIXUPSET_CALL(flush_cache_mm, turbosparc_flush_cache_mm, BTFIXUPCALL_NORM);
1694 BTFIXUPSET_CALL(flush_cache_page, turbosparc_flush_cache_page, BTFIXUPCALL_NORM);
1695 BTFIXUPSET_CALL(flush_cache_range, turbosparc_flush_cache_range, BTFIXUPCALL_NORM);
1696
1697 BTFIXUPSET_CALL(flush_tlb_all, turbosparc_flush_tlb_all, BTFIXUPCALL_NORM);
1698 BTFIXUPSET_CALL(flush_tlb_mm, turbosparc_flush_tlb_mm, BTFIXUPCALL_NORM);
1699 BTFIXUPSET_CALL(flush_tlb_page, turbosparc_flush_tlb_page, BTFIXUPCALL_NORM);
1700 BTFIXUPSET_CALL(flush_tlb_range, turbosparc_flush_tlb_range, BTFIXUPCALL_NORM);
1701
1702 BTFIXUPSET_CALL(__flush_page_to_ram, turbosparc_flush_page_to_ram, BTFIXUPCALL_NORM);
1703
1704 BTFIXUPSET_CALL(flush_sig_insns, turbosparc_flush_sig_insns, BTFIXUPCALL_NOP);
1705 BTFIXUPSET_CALL(flush_page_for_dma, turbosparc_flush_page_for_dma, BTFIXUPCALL_NORM);
1706
1707 poke_srmmu = poke_turbosparc;
1708}
1709
Al Viro409832f2008-11-22 17:33:54 +00001710static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
1712 unsigned long mreg = srmmu_get_mmureg();
1713
1714 tsunami_flush_icache();
1715 tsunami_flush_dcache();
1716 mreg &= ~TSUNAMI_ITD;
1717 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1718 srmmu_set_mmureg(mreg);
1719}
1720
1721static void __init init_tsunami(void)
1722{
1723 /*
1724 * Tsunami's pretty sane, Sun and TI actually got it
1725 * somewhat right this time. Fujitsu should have
1726 * taken some lessons from them.
1727 */
1728
1729 srmmu_name = "TI Tsunami";
1730 srmmu_modtype = Tsunami;
1731
1732 BTFIXUPSET_CALL(flush_cache_all, tsunami_flush_cache_all, BTFIXUPCALL_NORM);
1733 BTFIXUPSET_CALL(flush_cache_mm, tsunami_flush_cache_mm, BTFIXUPCALL_NORM);
1734 BTFIXUPSET_CALL(flush_cache_page, tsunami_flush_cache_page, BTFIXUPCALL_NORM);
1735 BTFIXUPSET_CALL(flush_cache_range, tsunami_flush_cache_range, BTFIXUPCALL_NORM);
1736
1737
1738 BTFIXUPSET_CALL(flush_tlb_all, tsunami_flush_tlb_all, BTFIXUPCALL_NORM);
1739 BTFIXUPSET_CALL(flush_tlb_mm, tsunami_flush_tlb_mm, BTFIXUPCALL_NORM);
1740 BTFIXUPSET_CALL(flush_tlb_page, tsunami_flush_tlb_page, BTFIXUPCALL_NORM);
1741 BTFIXUPSET_CALL(flush_tlb_range, tsunami_flush_tlb_range, BTFIXUPCALL_NORM);
1742
1743 BTFIXUPSET_CALL(__flush_page_to_ram, tsunami_flush_page_to_ram, BTFIXUPCALL_NOP);
1744 BTFIXUPSET_CALL(flush_sig_insns, tsunami_flush_sig_insns, BTFIXUPCALL_NORM);
1745 BTFIXUPSET_CALL(flush_page_for_dma, tsunami_flush_page_for_dma, BTFIXUPCALL_NORM);
1746
1747 poke_srmmu = poke_tsunami;
1748
1749 tsunami_setup_blockops();
1750}
1751
Al Viro409832f2008-11-22 17:33:54 +00001752static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001753{
1754 unsigned long mreg = srmmu_get_mmureg();
1755 static int smp_catch;
1756
1757 if(viking_mxcc_present) {
1758 unsigned long mxcc_control = mxcc_get_creg();
1759
1760 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1761 mxcc_control &= ~(MXCC_CTL_RRC);
1762 mxcc_set_creg(mxcc_control);
1763
1764 /*
1765 * We don't need memory parity checks.
1766 * XXX This is a mess, have to dig out later. ecd.
1767 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1768 */
1769
1770 /* We do cache ptables on MXCC. */
1771 mreg |= VIKING_TCENABLE;
1772 } else {
1773 unsigned long bpreg;
1774
1775 mreg &= ~(VIKING_TCENABLE);
1776 if(smp_catch++) {
1777 /* Must disable mixed-cmd mode here for other cpu's. */
1778 bpreg = viking_get_bpreg();
1779 bpreg &= ~(VIKING_ACTION_MIX);
1780 viking_set_bpreg(bpreg);
1781
1782 /* Just in case PROM does something funny. */
1783 msi_set_sync();
1784 }
1785 }
1786
1787 mreg |= VIKING_SPENABLE;
1788 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1789 mreg |= VIKING_SBENABLE;
1790 mreg &= ~(VIKING_ACENABLE);
1791 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001792}
1793
1794static void __init init_viking(void)
1795{
1796 unsigned long mreg = srmmu_get_mmureg();
1797
1798 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1799 if(mreg & VIKING_MMODE) {
1800 srmmu_name = "TI Viking";
1801 viking_mxcc_present = 0;
1802 msi_set_sync();
1803
Linus Torvalds1da177e2005-04-16 15:20:36 -07001804 /*
1805 * We need this to make sure old viking takes no hits
1806 * on it's cache for dma snoops to workaround the
1807 * "load from non-cacheable memory" interrupt bug.
1808 * This is only necessary because of the new way in
1809 * which we use the IOMMU.
1810 */
1811 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page, BTFIXUPCALL_NORM);
1812
1813 flush_page_for_dma_global = 0;
1814 } else {
1815 srmmu_name = "TI Viking/MXCC";
1816 viking_mxcc_present = 1;
1817
1818 srmmu_cache_pagetables = 1;
1819
1820 /* MXCC vikings lack the DMA snooping bug. */
1821 BTFIXUPSET_CALL(flush_page_for_dma, viking_flush_page_for_dma, BTFIXUPCALL_NOP);
1822 }
1823
1824 BTFIXUPSET_CALL(flush_cache_all, viking_flush_cache_all, BTFIXUPCALL_NORM);
1825 BTFIXUPSET_CALL(flush_cache_mm, viking_flush_cache_mm, BTFIXUPCALL_NORM);
1826 BTFIXUPSET_CALL(flush_cache_page, viking_flush_cache_page, BTFIXUPCALL_NORM);
1827 BTFIXUPSET_CALL(flush_cache_range, viking_flush_cache_range, BTFIXUPCALL_NORM);
1828
1829#ifdef CONFIG_SMP
1830 if (sparc_cpu_model == sun4d) {
1831 BTFIXUPSET_CALL(flush_tlb_all, sun4dsmp_flush_tlb_all, BTFIXUPCALL_NORM);
1832 BTFIXUPSET_CALL(flush_tlb_mm, sun4dsmp_flush_tlb_mm, BTFIXUPCALL_NORM);
1833 BTFIXUPSET_CALL(flush_tlb_page, sun4dsmp_flush_tlb_page, BTFIXUPCALL_NORM);
1834 BTFIXUPSET_CALL(flush_tlb_range, sun4dsmp_flush_tlb_range, BTFIXUPCALL_NORM);
1835 } else
1836#endif
1837 {
1838 BTFIXUPSET_CALL(flush_tlb_all, viking_flush_tlb_all, BTFIXUPCALL_NORM);
1839 BTFIXUPSET_CALL(flush_tlb_mm, viking_flush_tlb_mm, BTFIXUPCALL_NORM);
1840 BTFIXUPSET_CALL(flush_tlb_page, viking_flush_tlb_page, BTFIXUPCALL_NORM);
1841 BTFIXUPSET_CALL(flush_tlb_range, viking_flush_tlb_range, BTFIXUPCALL_NORM);
1842 }
1843
1844 BTFIXUPSET_CALL(__flush_page_to_ram, viking_flush_page_to_ram, BTFIXUPCALL_NOP);
1845 BTFIXUPSET_CALL(flush_sig_insns, viking_flush_sig_insns, BTFIXUPCALL_NOP);
1846
1847 poke_srmmu = poke_viking;
1848}
1849
Konrad Eisele75d9e342009-08-17 00:13:33 +00001850#ifdef CONFIG_SPARC_LEON
1851
1852void __init poke_leonsparc(void)
1853{
1854}
1855
1856void __init init_leon(void)
1857{
1858
Kristoffer Glemboc803ba92009-12-02 04:30:22 +00001859 srmmu_name = "LEON";
Konrad Eisele75d9e342009-08-17 00:13:33 +00001860
1861 BTFIXUPSET_CALL(flush_cache_all, leon_flush_cache_all,
1862 BTFIXUPCALL_NORM);
1863 BTFIXUPSET_CALL(flush_cache_mm, leon_flush_cache_all,
1864 BTFIXUPCALL_NORM);
1865 BTFIXUPSET_CALL(flush_cache_page, leon_flush_pcache_all,
1866 BTFIXUPCALL_NORM);
1867 BTFIXUPSET_CALL(flush_cache_range, leon_flush_cache_all,
1868 BTFIXUPCALL_NORM);
1869 BTFIXUPSET_CALL(flush_page_for_dma, leon_flush_dcache_all,
1870 BTFIXUPCALL_NORM);
1871
1872 BTFIXUPSET_CALL(flush_tlb_all, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1873 BTFIXUPSET_CALL(flush_tlb_mm, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1874 BTFIXUPSET_CALL(flush_tlb_page, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1875 BTFIXUPSET_CALL(flush_tlb_range, leon_flush_tlb_all, BTFIXUPCALL_NORM);
1876
1877 BTFIXUPSET_CALL(__flush_page_to_ram, leon_flush_cache_all,
1878 BTFIXUPCALL_NOP);
1879 BTFIXUPSET_CALL(flush_sig_insns, leon_flush_cache_all, BTFIXUPCALL_NOP);
1880
1881 poke_srmmu = poke_leonsparc;
1882
1883 srmmu_cache_pagetables = 0;
1884
1885 leon_flush_during_switch = leon_flush_needed();
1886}
1887#endif
1888
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889/* Probe for the srmmu chip version. */
1890static void __init get_srmmu_type(void)
1891{
1892 unsigned long mreg, psr;
1893 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1894
1895 srmmu_modtype = SRMMU_INVAL_MOD;
1896 hwbug_bitmask = 0;
1897
1898 mreg = srmmu_get_mmureg(); psr = get_psr();
1899 mod_typ = (mreg & 0xf0000000) >> 28;
1900 mod_rev = (mreg & 0x0f000000) >> 24;
1901 psr_typ = (psr >> 28) & 0xf;
1902 psr_vers = (psr >> 24) & 0xf;
1903
Konrad Eisele75d9e342009-08-17 00:13:33 +00001904 /* First, check for sparc-leon. */
1905 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001906 init_leon();
1907 return;
1908 }
1909
1910 /* Second, check for HyperSparc or Cypress. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001911 if(mod_typ == 1) {
1912 switch(mod_rev) {
1913 case 7:
1914 /* UP or MP Hypersparc */
1915 init_hypersparc();
1916 break;
1917 case 0:
1918 case 2:
1919 /* Uniprocessor Cypress */
1920 init_cypress_604();
1921 break;
1922 case 10:
1923 case 11:
1924 case 12:
1925 /* _REALLY OLD_ Cypress MP chips... */
1926 case 13:
1927 case 14:
1928 case 15:
1929 /* MP Cypress mmu/cache-controller */
1930 init_cypress_605(mod_rev);
1931 break;
1932 default:
1933 /* Some other Cypress revision, assume a 605. */
1934 init_cypress_605(mod_rev);
1935 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001936 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001937 return;
1938 }
1939
1940 /*
1941 * Now Fujitsu TurboSparc. It might happen that it is
1942 * in Swift emulation mode, so we will check later...
1943 */
1944 if (psr_typ == 0 && psr_vers == 5) {
1945 init_turbosparc();
1946 return;
1947 }
1948
1949 /* Next check for Fujitsu Swift. */
1950 if(psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07001951 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001952 char node_str[128];
1953
1954 /* Look if it is not a TurboSparc emulating Swift... */
1955 cpunode = prom_getchild(prom_root_node);
1956 while((cpunode = prom_getsibling(cpunode)) != 0) {
1957 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1958 if(!strcmp(node_str, "cpu")) {
1959 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1960 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1961 init_turbosparc();
1962 return;
1963 }
1964 break;
1965 }
1966 }
1967
1968 init_swift();
1969 return;
1970 }
1971
1972 /* Now the Viking family of srmmu. */
1973 if(psr_typ == 4 &&
1974 ((psr_vers == 0) ||
1975 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1976 init_viking();
1977 return;
1978 }
1979
1980 /* Finally the Tsunami. */
1981 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1982 init_tsunami();
1983 return;
1984 }
1985
1986 /* Oh well */
1987 srmmu_is_bad();
1988}
1989
Linus Torvalds1da177e2005-04-16 15:20:36 -07001990extern unsigned long spwin_mmu_patchme, fwin_mmu_patchme,
1991 tsetup_mmu_patchme, rtrap_mmu_patchme;
1992
1993extern unsigned long spwin_srmmu_stackchk, srmmu_fwin_stackchk,
1994 tsetup_srmmu_stackchk, srmmu_rett_stackchk;
1995
Linus Torvalds1da177e2005-04-16 15:20:36 -07001996#ifdef CONFIG_SMP
1997/* Local cross-calls. */
1998static void smp_flush_page_for_dma(unsigned long page)
1999{
2000 xc1((smpfunc_t) BTFIXUP_CALL(local_flush_page_for_dma), page);
2001 local_flush_page_for_dma(page);
2002}
2003
2004#endif
2005
Linus Torvalds1da177e2005-04-16 15:20:36 -07002006/* Load up routines and constants for sun4m and sun4d mmu */
Sam Ravnborga3c5c662012-05-12 20:35:52 +02002007void __init load_mmu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002008{
2009 extern void ld_mmu_iommu(void);
2010 extern void ld_mmu_iounit(void);
2011 extern void ___xchg32_sun4md(void);
2012
Linus Torvalds1da177e2005-04-16 15:20:36 -07002013 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002014#ifndef CONFIG_SMP
2015 BTFIXUPSET_CALL(___xchg32, ___xchg32_sun4md, BTFIXUPCALL_SWAPG1G2);
2016#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002017
Dave McCracken46a82b22006-09-25 23:31:48 -07002018 BTFIXUPSET_CALL(pgd_page_vaddr, srmmu_pgd_page, BTFIXUPCALL_NORM);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002019
Linus Torvalds1da177e2005-04-16 15:20:36 -07002020 BTFIXUPSET_INT(pte_modify_mask, SRMMU_CHG_MASK);
2021 BTFIXUPSET_CALL(pmd_offset, srmmu_pmd_offset, BTFIXUPCALL_NORM);
2022 BTFIXUPSET_CALL(pte_offset_kernel, srmmu_pte_offset, BTFIXUPCALL_NORM);
2023
Linus Torvalds1da177e2005-04-16 15:20:36 -07002024 BTFIXUPSET_CALL(update_mmu_cache, srmmu_update_mmu_cache, BTFIXUPCALL_NOP);
2025 BTFIXUPSET_CALL(destroy_context, srmmu_destroy_context, BTFIXUPCALL_NORM);
2026
2027 BTFIXUPSET_CALL(sparc_mapiorange, srmmu_mapiorange, BTFIXUPCALL_NORM);
2028 BTFIXUPSET_CALL(sparc_unmapiorange, srmmu_unmapiorange, BTFIXUPCALL_NORM);
2029
2030 BTFIXUPSET_CALL(__swp_type, srmmu_swp_type, BTFIXUPCALL_NORM);
2031 BTFIXUPSET_CALL(__swp_offset, srmmu_swp_offset, BTFIXUPCALL_NORM);
2032 BTFIXUPSET_CALL(__swp_entry, srmmu_swp_entry, BTFIXUPCALL_NORM);
2033
2034 BTFIXUPSET_CALL(mmu_info, srmmu_mmu_info, BTFIXUPCALL_NORM);
2035
Linus Torvalds1da177e2005-04-16 15:20:36 -07002036 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002037
2038#ifdef CONFIG_SMP
2039 /* El switcheroo... */
2040
2041 BTFIXUPCOPY_CALL(local_flush_cache_all, flush_cache_all);
2042 BTFIXUPCOPY_CALL(local_flush_cache_mm, flush_cache_mm);
2043 BTFIXUPCOPY_CALL(local_flush_cache_range, flush_cache_range);
2044 BTFIXUPCOPY_CALL(local_flush_cache_page, flush_cache_page);
2045 BTFIXUPCOPY_CALL(local_flush_tlb_all, flush_tlb_all);
2046 BTFIXUPCOPY_CALL(local_flush_tlb_mm, flush_tlb_mm);
2047 BTFIXUPCOPY_CALL(local_flush_tlb_range, flush_tlb_range);
2048 BTFIXUPCOPY_CALL(local_flush_tlb_page, flush_tlb_page);
2049 BTFIXUPCOPY_CALL(local_flush_page_to_ram, __flush_page_to_ram);
2050 BTFIXUPCOPY_CALL(local_flush_sig_insns, flush_sig_insns);
2051 BTFIXUPCOPY_CALL(local_flush_page_for_dma, flush_page_for_dma);
2052
2053 BTFIXUPSET_CALL(flush_cache_all, smp_flush_cache_all, BTFIXUPCALL_NORM);
2054 BTFIXUPSET_CALL(flush_cache_mm, smp_flush_cache_mm, BTFIXUPCALL_NORM);
2055 BTFIXUPSET_CALL(flush_cache_range, smp_flush_cache_range, BTFIXUPCALL_NORM);
2056 BTFIXUPSET_CALL(flush_cache_page, smp_flush_cache_page, BTFIXUPCALL_NORM);
Konrad Eisele84017072009-08-31 22:08:13 +00002057 if (sparc_cpu_model != sun4d &&
2058 sparc_cpu_model != sparc_leon) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 BTFIXUPSET_CALL(flush_tlb_all, smp_flush_tlb_all, BTFIXUPCALL_NORM);
2060 BTFIXUPSET_CALL(flush_tlb_mm, smp_flush_tlb_mm, BTFIXUPCALL_NORM);
2061 BTFIXUPSET_CALL(flush_tlb_range, smp_flush_tlb_range, BTFIXUPCALL_NORM);
2062 BTFIXUPSET_CALL(flush_tlb_page, smp_flush_tlb_page, BTFIXUPCALL_NORM);
2063 }
2064 BTFIXUPSET_CALL(__flush_page_to_ram, smp_flush_page_to_ram, BTFIXUPCALL_NORM);
2065 BTFIXUPSET_CALL(flush_sig_insns, smp_flush_sig_insns, BTFIXUPCALL_NORM);
2066 BTFIXUPSET_CALL(flush_page_for_dma, smp_flush_page_for_dma, BTFIXUPCALL_NORM);
David S. Miller64273d02008-11-26 01:00:58 -08002067
2068 if (poke_srmmu == poke_viking) {
2069 /* Avoid unnecessary cross calls. */
2070 BTFIXUPCOPY_CALL(flush_cache_all, local_flush_cache_all);
2071 BTFIXUPCOPY_CALL(flush_cache_mm, local_flush_cache_mm);
2072 BTFIXUPCOPY_CALL(flush_cache_range, local_flush_cache_range);
2073 BTFIXUPCOPY_CALL(flush_cache_page, local_flush_cache_page);
2074 BTFIXUPCOPY_CALL(__flush_page_to_ram, local_flush_page_to_ram);
2075 BTFIXUPCOPY_CALL(flush_sig_insns, local_flush_sig_insns);
2076 BTFIXUPCOPY_CALL(flush_page_for_dma, local_flush_page_for_dma);
2077 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07002078#endif
2079
2080 if (sparc_cpu_model == sun4d)
2081 ld_mmu_iounit();
2082 else
2083 ld_mmu_iommu();
2084#ifdef CONFIG_SMP
2085 if (sparc_cpu_model == sun4d)
2086 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00002087 else if (sparc_cpu_model == sparc_leon)
2088 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002089 else
2090 sun4m_init_smp();
2091#endif
Sam Ravnborga3c5c662012-05-12 20:35:52 +02002092 btfixup();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002093}