blob: 4875fcd8fd7a7f87f7d3a2e5b4ae7bb818890f5f [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/kernel.h>
12#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/vmalloc.h>
14#include <linux/pagemap.h>
15#include <linux/init.h>
16#include <linux/spinlock.h>
17#include <linux/bootmem.h>
18#include <linux/fs.h>
19#include <linux/seq_file.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070020#include <linux/kdebug.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000021#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090022#include <linux/gfp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
24#include <asm/bitext.h>
25#include <asm/page.h>
26#include <asm/pgalloc.h>
27#include <asm/pgtable.h>
28#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070029#include <asm/vaddrs.h>
30#include <asm/traps.h>
31#include <asm/smp.h>
32#include <asm/mbus.h>
33#include <asm/cache.h>
34#include <asm/oplib.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070035#include <asm/asi.h>
36#include <asm/msi.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070037#include <asm/mmu_context.h>
38#include <asm/io-unit.h>
39#include <asm/cacheflush.h>
40#include <asm/tlbflush.h>
41
42/* Now the cpu specific definitions. */
43#include <asm/viking.h>
44#include <asm/mxcc.h>
45#include <asm/ross.h>
46#include <asm/tsunami.h>
47#include <asm/swift.h>
48#include <asm/turbosparc.h>
Konrad Eisele75d9e342009-08-17 00:13:33 +000049#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Linus Torvalds1da177e2005-04-16 15:20:36 -070051enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070052static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070053int vac_cache_size;
54int vac_line_size;
55
Sam Ravnborga3c5c662012-05-12 20:35:52 +020056struct ctx_list *ctx_list_pool;
57struct ctx_list ctx_free;
58struct ctx_list ctx_used;
59
Linus Torvalds1da177e2005-04-16 15:20:36 -070060extern struct resource sparc_iomap;
61
62extern unsigned long last_valid_pfn;
63
Adrian Bunk50215d62008-06-05 11:41:51 -070064static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070065
David S. Miller5d83d662012-05-13 20:49:31 -070066const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -070069const struct sparc32_cachetlb_ops *local_ops;
70
Linus Torvalds1da177e2005-04-16 15:20:36 -070071#define FLUSH_BEGIN(mm)
72#define FLUSH_END
73#else
David S. Miller5d83d662012-05-13 20:49:31 -070074#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070075#define FLUSH_END }
76#endif
77
Linus Torvalds1da177e2005-04-16 15:20:36 -070078int flush_page_for_dma_global = 1;
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080char *srmmu_name;
81
82ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070083static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070084
85int viking_mxcc_present;
86static DEFINE_SPINLOCK(srmmu_context_spinlock);
87
Adrian Bunk50215d62008-06-05 11:41:51 -070088static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070089
Adrian Bunk50215d62008-06-05 11:41:51 -070090static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
92/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070093static unsigned long srmmu_nocache_size;
94static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070095
96/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
97#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
98
99/* The context table is a nocache user with the biggest alignment needs. */
100#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
101
102void *srmmu_nocache_pool;
103void *srmmu_nocache_bitmap;
104static struct bit_map srmmu_nocache_map;
105
Linus Torvalds1da177e2005-04-16 15:20:36 -0700106static inline int srmmu_pte_none(pte_t pte)
107{ return !(pte_val(pte) & 0xFFFFFFF); }
108
Linus Torvalds1da177e2005-04-16 15:20:36 -0700109static inline int srmmu_pmd_none(pmd_t pmd)
110{ return !(pmd_val(pmd) & 0xFFFFFFF); }
111
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112static inline pte_t srmmu_pte_wrprotect(pte_t pte)
113{ return __pte(pte_val(pte) & ~SRMMU_WRITE);}
114
115static inline pte_t srmmu_pte_mkclean(pte_t pte)
116{ return __pte(pte_val(pte) & ~SRMMU_DIRTY);}
117
118static inline pte_t srmmu_pte_mkold(pte_t pte)
119{ return __pte(pte_val(pte) & ~SRMMU_REF);}
120
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121/* XXX should we hyper_flush_whole_icache here - Anton */
122static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
David S. Miller62875cf2012-05-12 13:39:23 -0700123{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200125void pmd_set(pmd_t *pmdp, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700126{
127 unsigned long ptp; /* Physical address, shifted right by 4 */
128 int i;
129
130 ptp = __nocache_pa((unsigned long) ptep) >> 4;
131 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700132 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700133 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
134 }
135}
136
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200137void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138{
139 unsigned long ptp; /* Physical address, shifted right by 4 */
140 int i;
141
142 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
143 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700144 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
146 }
147}
148
149static inline pte_t srmmu_pte_modify(pte_t pte, pgprot_t newprot)
150{ return __pte((pte_val(pte) & SRMMU_CHG_MASK) | pgprot_val(newprot)); }
151
152/* to find an entry in a top-level page table... */
Adrian Bunk31156242005-10-03 17:37:02 -0700153static inline pgd_t *srmmu_pgd_offset(struct mm_struct * mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154{ return mm->pgd + (address >> SRMMU_PGDIR_SHIFT); }
155
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156/* Find an entry in the third-level page table.. */
Sam Ravnborg9701b262012-05-13 10:21:25 +0200157pte_t *pte_offset_kernel(pmd_t * dir, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158{
159 void *pte;
160
161 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
162 return (pte_t *) pte +
163 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
164}
165
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166/*
167 * size: bytes to allocate in the nocache area.
168 * align: bytes, number to align at.
169 * Returns the virtual address of the allocated area.
170 */
171static unsigned long __srmmu_get_nocache(int size, int align)
172{
173 int offset;
174
175 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
176 printk("Size 0x%x too small for nocache request\n", size);
177 size = SRMMU_NOCACHE_BITMAP_SHIFT;
178 }
179 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT-1)) {
180 printk("Size 0x%x unaligned int nocache request\n", size);
181 size += SRMMU_NOCACHE_BITMAP_SHIFT-1;
182 }
183 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
184
185 offset = bit_map_string_get(&srmmu_nocache_map,
186 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
187 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
188 if (offset == -1) {
189 printk("srmmu: out of nocache %d: %d/%d\n",
190 size, (int) srmmu_nocache_size,
191 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
192 return 0;
193 }
194
195 return (SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT));
196}
197
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200198unsigned long srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700199{
200 unsigned long tmp;
201
202 tmp = __srmmu_get_nocache(size, align);
203
204 if (tmp)
205 memset((void *)tmp, 0, size);
206
207 return tmp;
208}
209
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200210void srmmu_free_nocache(unsigned long vaddr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700211{
212 int offset;
213
214 if (vaddr < SRMMU_NOCACHE_VADDR) {
215 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
216 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
217 BUG();
218 }
219 if (vaddr+size > srmmu_nocache_end) {
220 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
221 vaddr, srmmu_nocache_end);
222 BUG();
223 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000224 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 printk("Size 0x%x is not a power of 2\n", size);
226 BUG();
227 }
228 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
229 printk("Size 0x%x is too small\n", size);
230 BUG();
231 }
232 if (vaddr & (size-1)) {
233 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
234 BUG();
235 }
236
237 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
238 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
239
240 bit_map_clear(&srmmu_nocache_map, offset, size);
241}
242
Adrian Bunk50215d62008-06-05 11:41:51 -0700243static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
244 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700245
246extern unsigned long probe_memory(void); /* in fault.c */
247
248/*
249 * Reserve nocache dynamically proportionally to the amount of
250 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
251 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700252static void srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253{
254 unsigned long sysmemavail = probe_memory() / 1024;
255 int srmmu_nocache_npages;
256
257 srmmu_nocache_npages =
258 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
259
260 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
261 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
262 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
263 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
264
265 /* anything above 1280 blows up */
266 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
267 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
268
269 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
270 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
271}
272
Adrian Bunk50215d62008-06-05 11:41:51 -0700273static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700274{
275 unsigned int bitmap_bits;
276 pgd_t *pgd;
277 pmd_t *pmd;
278 pte_t *pte;
279 unsigned long paddr, vaddr;
280 unsigned long pteval;
281
282 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
283
284 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
285 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
286 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
287
288 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
289 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
290
291 srmmu_swapper_pg_dir = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
292 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
293 init_mm.pgd = srmmu_swapper_pg_dir;
294
295 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
296
297 paddr = __pa((unsigned long)srmmu_nocache_pool);
298 vaddr = SRMMU_NOCACHE_VADDR;
299
300 while (vaddr < srmmu_nocache_end) {
301 pgd = pgd_offset_k(vaddr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200302 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
303 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304
305 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
306
307 if (srmmu_cache_pagetables)
308 pteval |= SRMMU_CACHE;
309
David S. Miller62875cf2012-05-12 13:39:23 -0700310 set_pte(__nocache_fix(pte), __pte(pteval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700311
312 vaddr += PAGE_SIZE;
313 paddr += PAGE_SIZE;
314 }
315
316 flush_cache_all();
317 flush_tlb_all();
318}
319
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200320pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700321{
322 pgd_t *pgd = NULL;
323
324 pgd = (pgd_t *)__srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
325 if (pgd) {
326 pgd_t *init = pgd_offset_k(0);
327 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
328 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
329 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
330 }
331
332 return pgd;
333}
334
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335/*
336 * Hardware needs alignment to 256 only, but we align to whole page size
337 * to reduce fragmentation problems due to the buddy principle.
338 * XXX Provide actual fragmentation statistics in /proc.
339 *
340 * Alignments up to the page size are the same for physical and virtual
341 * addresses of the nocache area.
342 */
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200343pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344{
345 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800346 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700347
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200348 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 return NULL;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800350 page = pfn_to_page( __nocache_pa(pte) >> PAGE_SHIFT );
351 pgtable_page_ctor(page);
352 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700353}
354
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200355void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356{
357 unsigned long p;
358
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800359 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700360 p = (unsigned long)page_address(pte); /* Cached address (for test) */
361 if (p == 0)
362 BUG();
363 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
364 p = (unsigned long) __nocache_va(p); /* Nocached virtual */
365 srmmu_free_nocache(p, PTE_SIZE);
366}
367
368/*
369 */
370static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
371{
372 struct ctx_list *ctxp;
373
374 ctxp = ctx_free.next;
375 if(ctxp != &ctx_free) {
376 remove_from_ctx_list(ctxp);
377 add_to_used_ctxlist(ctxp);
378 mm->context = ctxp->ctx_number;
379 ctxp->ctx_mm = mm;
380 return;
381 }
382 ctxp = ctx_used.next;
383 if(ctxp->ctx_mm == old_mm)
384 ctxp = ctxp->next;
385 if(ctxp == &ctx_used)
386 panic("out of mmu contexts");
387 flush_cache_mm(ctxp->ctx_mm);
388 flush_tlb_mm(ctxp->ctx_mm);
389 remove_from_ctx_list(ctxp);
390 add_to_used_ctxlist(ctxp);
391 ctxp->ctx_mm->context = NO_CONTEXT;
392 ctxp->ctx_mm = mm;
393 mm->context = ctxp->ctx_number;
394}
395
396static inline void free_context(int context)
397{
398 struct ctx_list *ctx_old;
399
400 ctx_old = ctx_list_pool + context;
401 remove_from_ctx_list(ctx_old);
402 add_to_free_ctxlist(ctx_old);
403}
404
405
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000406void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
407 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
409 if(mm->context == NO_CONTEXT) {
410 spin_lock(&srmmu_context_spinlock);
411 alloc_context(old_mm, mm);
412 spin_unlock(&srmmu_context_spinlock);
413 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
414 }
415
Konrad Eisele75d9e342009-08-17 00:13:33 +0000416 if (sparc_cpu_model == sparc_leon)
417 leon_switch_mm();
418
Linus Torvalds1da177e2005-04-16 15:20:36 -0700419 if (is_hypersparc)
420 hyper_flush_whole_icache();
421
422 srmmu_set_context(mm->context);
423}
424
425/* Low level IO area allocation on the SRMMU. */
426static inline void srmmu_mapioaddr(unsigned long physaddr,
427 unsigned long virt_addr, int bus_type)
428{
429 pgd_t *pgdp;
430 pmd_t *pmdp;
431 pte_t *ptep;
432 unsigned long tmp;
433
434 physaddr &= PAGE_MASK;
435 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200436 pmdp = pmd_offset(pgdp, virt_addr);
437 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
439
440 /*
441 * I need to test whether this is consistent over all
442 * sun4m's. The bus_type represents the upper 4 bits of
443 * 36-bit physical address on the I/O space lines...
444 */
445 tmp |= (bus_type << 28);
446 tmp |= SRMMU_PRIV;
447 __flush_page_to_ram(virt_addr);
David S. Miller62875cf2012-05-12 13:39:23 -0700448 set_pte(ptep, __pte(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700449}
450
Sam Ravnborg9701b262012-05-13 10:21:25 +0200451void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
452 unsigned long xva, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453{
454 while (len != 0) {
455 len -= PAGE_SIZE;
456 srmmu_mapioaddr(xpa, xva, bus);
457 xva += PAGE_SIZE;
458 xpa += PAGE_SIZE;
459 }
460 flush_tlb_all();
461}
462
463static inline void srmmu_unmapioaddr(unsigned long virt_addr)
464{
465 pgd_t *pgdp;
466 pmd_t *pmdp;
467 pte_t *ptep;
468
469 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200470 pmdp = pmd_offset(pgdp, virt_addr);
471 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472
473 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700474 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700475}
476
Sam Ravnborg9701b262012-05-13 10:21:25 +0200477void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700478{
479 while (len != 0) {
480 len -= PAGE_SIZE;
481 srmmu_unmapioaddr(virt_addr);
482 virt_addr += PAGE_SIZE;
483 }
484 flush_tlb_all();
485}
486
487/*
488 * On the SRMMU we do not have the problems with limited tlb entries
489 * for mapping kernel pages, so we just take things from the free page
490 * pool. As a side effect we are putting a little too much pressure
491 * on the gfp() subsystem. This setup also makes the logic of the
492 * iommu mapping code a lot easier as we can transparently handle
David S. Milleree906c92012-05-12 00:35:45 -0700493 * mappings on the kernel stack without any special code.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700494 */
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000495struct thread_info *alloc_thread_info_node(struct task_struct *tsk, int node)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496{
497 struct thread_info *ret;
498
499 ret = (struct thread_info *)__get_free_pages(GFP_KERNEL,
500 THREAD_INFO_ORDER);
501#ifdef CONFIG_DEBUG_STACK_USAGE
502 if (ret)
503 memset(ret, 0, PAGE_SIZE << THREAD_INFO_ORDER);
504#endif /* DEBUG_STACK_USAGE */
505
506 return ret;
507}
508
Sam Ravnborge7b7e0c2012-05-11 11:35:16 +0000509void free_thread_info(struct thread_info *ti)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700510{
511 free_pages((unsigned long)ti, THREAD_INFO_ORDER);
512}
513
514/* tsunami.S */
515extern void tsunami_flush_cache_all(void);
516extern void tsunami_flush_cache_mm(struct mm_struct *mm);
517extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
518extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
519extern void tsunami_flush_page_to_ram(unsigned long page);
520extern void tsunami_flush_page_for_dma(unsigned long page);
521extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
522extern void tsunami_flush_tlb_all(void);
523extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
524extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
525extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
526extern void tsunami_setup_blockops(void);
527
Linus Torvalds1da177e2005-04-16 15:20:36 -0700528/* swift.S */
529extern void swift_flush_cache_all(void);
530extern void swift_flush_cache_mm(struct mm_struct *mm);
531extern void swift_flush_cache_range(struct vm_area_struct *vma,
532 unsigned long start, unsigned long end);
533extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
534extern void swift_flush_page_to_ram(unsigned long page);
535extern void swift_flush_page_for_dma(unsigned long page);
536extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
537extern void swift_flush_tlb_all(void);
538extern void swift_flush_tlb_mm(struct mm_struct *mm);
539extern void swift_flush_tlb_range(struct vm_area_struct *vma,
540 unsigned long start, unsigned long end);
541extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
542
543#if 0 /* P3: deadwood to debug precise flushes on Swift. */
544void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
545{
546 int cctx, ctx1;
547
548 page &= PAGE_MASK;
549 if ((ctx1 = vma->vm_mm->context) != -1) {
550 cctx = srmmu_get_context();
551/* Is context # ever different from current context? P3 */
552 if (cctx != ctx1) {
553 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
554 srmmu_set_context(ctx1);
555 swift_flush_page(page);
556 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
557 "r" (page), "i" (ASI_M_FLUSH_PROBE));
558 srmmu_set_context(cctx);
559 } else {
560 /* Rm. prot. bits from virt. c. */
561 /* swift_flush_cache_all(); */
562 /* swift_flush_cache_page(vma, page); */
563 swift_flush_page(page);
564
565 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
566 "r" (page), "i" (ASI_M_FLUSH_PROBE));
567 /* same as above: srmmu_flush_tlb_page() */
568 }
569 }
570}
571#endif
572
573/*
574 * The following are all MBUS based SRMMU modules, and therefore could
575 * be found in a multiprocessor configuration. On the whole, these
576 * chips seems to be much more touchy about DVMA and page tables
577 * with respect to cache coherency.
578 */
579
Linus Torvalds1da177e2005-04-16 15:20:36 -0700580/* viking.S */
581extern void viking_flush_cache_all(void);
582extern void viking_flush_cache_mm(struct mm_struct *mm);
583extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
584 unsigned long end);
585extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
586extern void viking_flush_page_to_ram(unsigned long page);
587extern void viking_flush_page_for_dma(unsigned long page);
588extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
589extern void viking_flush_page(unsigned long page);
590extern void viking_mxcc_flush_page(unsigned long page);
591extern void viking_flush_tlb_all(void);
592extern void viking_flush_tlb_mm(struct mm_struct *mm);
593extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
594 unsigned long end);
595extern void viking_flush_tlb_page(struct vm_area_struct *vma,
596 unsigned long page);
597extern void sun4dsmp_flush_tlb_all(void);
598extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
599extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
600 unsigned long end);
601extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
602 unsigned long page);
603
604/* hypersparc.S */
605extern void hypersparc_flush_cache_all(void);
606extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
607extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
608extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
609extern void hypersparc_flush_page_to_ram(unsigned long page);
610extern void hypersparc_flush_page_for_dma(unsigned long page);
611extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
612extern void hypersparc_flush_tlb_all(void);
613extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
614extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
615extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
616extern void hypersparc_setup_blockops(void);
617
618/*
619 * NOTE: All of this startup code assumes the low 16mb (approx.) of
620 * kernel mappings are done with one single contiguous chunk of
621 * ram. On small ram machines (classics mainly) we only get
622 * around 8mb mapped for us.
623 */
624
Adrian Bunk50215d62008-06-05 11:41:51 -0700625static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
628 prom_halt();
629}
630
Adrian Bunk50215d62008-06-05 11:41:51 -0700631static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
632 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700633{
634 pgd_t *pgdp;
635 pmd_t *pmdp;
636 pte_t *ptep;
637
638 while(start < end) {
639 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700640 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700641 pmdp = (pmd_t *) __srmmu_get_nocache(
642 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
643 if (pmdp == NULL)
644 early_pgtable_allocfail("pmd");
645 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200646 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700647 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200648 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700649 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
650 ptep = (pte_t *)__srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
651 if (ptep == NULL)
652 early_pgtable_allocfail("pte");
653 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200654 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700655 }
656 if (start > (0xffffffffUL - PMD_SIZE))
657 break;
658 start = (start + PMD_SIZE) & PMD_MASK;
659 }
660}
661
Adrian Bunk50215d62008-06-05 11:41:51 -0700662static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
663 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700664{
665 pgd_t *pgdp;
666 pmd_t *pmdp;
667 pte_t *ptep;
668
669 while(start < end) {
670 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700671 if (pgd_none(*pgdp)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
673 if (pmdp == NULL)
674 early_pgtable_allocfail("pmd");
675 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200676 pgd_set(pgdp, pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200678 pmdp = pmd_offset(pgdp, start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679 if(srmmu_pmd_none(*pmdp)) {
680 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
681 PTE_SIZE);
682 if (ptep == NULL)
683 early_pgtable_allocfail("pte");
684 memset(ptep, 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200685 pmd_set(pmdp, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686 }
687 if (start > (0xffffffffUL - PMD_SIZE))
688 break;
689 start = (start + PMD_SIZE) & PMD_MASK;
690 }
691}
692
693/*
694 * This is much cleaner than poking around physical address space
695 * looking at the prom's page table directly which is what most
696 * other OS's do. Yuck... this is much better.
697 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700698static void __init srmmu_inherit_prom_mappings(unsigned long start,
699 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700700{
701 pgd_t *pgdp;
702 pmd_t *pmdp;
703 pte_t *ptep;
704 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
705 unsigned long prompte;
706
707 while(start <= end) {
708 if (start == 0)
709 break; /* probably wrap around */
710 if(start == 0xfef00000)
711 start = KADB_DEBUGGER_BEGVM;
712 if(!(prompte = srmmu_hwprobe(start))) {
713 start += PAGE_SIZE;
714 continue;
715 }
716
717 /* A red snapper, see what it really is. */
718 what = 0;
719
720 if(!(start & ~(SRMMU_REAL_PMD_MASK))) {
721 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
722 what = 1;
723 }
724
725 if(!(start & ~(SRMMU_PGDIR_MASK))) {
726 if(srmmu_hwprobe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
727 prompte)
728 what = 2;
729 }
730
731 pgdp = pgd_offset_k(start);
732 if(what == 2) {
733 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
734 start += SRMMU_PGDIR_SIZE;
735 continue;
736 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700737 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700738 pmdp = (pmd_t *)__srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
739 if (pmdp == NULL)
740 early_pgtable_allocfail("pmd");
741 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200742 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200744 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 if(srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
746 ptep = (pte_t *) __srmmu_get_nocache(PTE_SIZE,
747 PTE_SIZE);
748 if (ptep == NULL)
749 early_pgtable_allocfail("pte");
750 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200751 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752 }
753 if(what == 1) {
754 /*
755 * We bend the rule where all 16 PTPs in a pmd_t point
756 * inside the same PTE page, and we leak a perfectly
757 * good hardware PTE piece. Alternatives seem worse.
758 */
759 unsigned int x; /* Index of HW PMD in soft cluster */
760 x = (start >> PMD_SHIFT) & 15;
761 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
762 start += SRMMU_REAL_PMD_SIZE;
763 continue;
764 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200765 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700766 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
767 start += PAGE_SIZE;
768 }
769}
770
771#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
772
773/* Create a third-level SRMMU 16MB page mapping. */
774static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
775{
776 pgd_t *pgdp = pgd_offset_k(vaddr);
777 unsigned long big_pte;
778
779 big_pte = KERNEL_PTE(phys_base >> 4);
780 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
781}
782
783/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
784static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
785{
786 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
787 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
788 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
789 /* Map "low" memory only */
790 const unsigned long min_vaddr = PAGE_OFFSET;
791 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
792
793 if (vstart < min_vaddr || vstart >= max_vaddr)
794 return vstart;
795
796 if (vend > max_vaddr || vend < min_vaddr)
797 vend = max_vaddr;
798
799 while(vstart < vend) {
800 do_large_mapping(vstart, pstart);
801 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
802 }
803 return vstart;
804}
805
806static inline void memprobe_error(char *msg)
807{
808 prom_printf(msg);
809 prom_printf("Halting now...\n");
810 prom_halt();
811}
812
813static inline void map_kernel(void)
814{
815 int i;
816
817 if (phys_base > 0) {
818 do_large_mapping(PAGE_OFFSET, phys_base);
819 }
820
821 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
822 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
823 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824}
825
826/* Paging initialization on the Sparc Reference MMU. */
827extern void sparc_context_init(int);
828
Al Viro409832f2008-11-22 17:33:54 +0000829void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830
831extern unsigned long bootmem_init(unsigned long *pages_avail);
832
833void __init srmmu_paging_init(void)
834{
Andres Salomon8d125562010-10-08 14:18:11 -0700835 int i;
836 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700837 char node_str[128];
838 pgd_t *pgd;
839 pmd_t *pmd;
840 pte_t *pte;
841 unsigned long pages_avail;
842
843 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
844
845 if (sparc_cpu_model == sun4d)
846 num_contexts = 65536; /* We know it is Viking */
847 else {
848 /* Find the number of contexts on the srmmu. */
849 cpunode = prom_getchild(prom_root_node);
850 num_contexts = 0;
851 while(cpunode != 0) {
852 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
853 if(!strcmp(node_str, "cpu")) {
854 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
855 break;
856 }
857 cpunode = prom_getsibling(cpunode);
858 }
859 }
860
861 if(!num_contexts) {
862 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
863 prom_halt();
864 }
865
866 pages_avail = 0;
867 last_valid_pfn = bootmem_init(&pages_avail);
868
869 srmmu_nocache_calcsize();
870 srmmu_nocache_init();
871 srmmu_inherit_prom_mappings(0xfe400000,(LINUX_OPPROM_ENDVM-PAGE_SIZE));
872 map_kernel();
873
874 /* ctx table has to be physically aligned to its size */
875 srmmu_context_table = (ctxd_t *)__srmmu_get_nocache(num_contexts*sizeof(ctxd_t), num_contexts*sizeof(ctxd_t));
876 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
877
878 for(i = 0; i < num_contexts; i++)
879 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
880
881 flush_cache_all();
882 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -0800883#ifdef CONFIG_SMP
884 /* Stop from hanging here... */
David S. Miller5d83d662012-05-13 20:49:31 -0700885 local_ops->tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800886#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700887 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800888#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700889 poke_srmmu();
890
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
892 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700893
894 srmmu_allocate_ptable_skeleton(
895 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
896 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
897
898 pgd = pgd_offset_k(PKMAP_BASE);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200899 pmd = pmd_offset(pgd, PKMAP_BASE);
900 pte = pte_offset_kernel(pmd, PKMAP_BASE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700901 pkmap_page_table = pte;
902
903 flush_cache_all();
904 flush_tlb_all();
905
906 sparc_context_init(num_contexts);
907
908 kmap_init();
909
910 {
911 unsigned long zones_size[MAX_NR_ZONES];
912 unsigned long zholes_size[MAX_NR_ZONES];
913 unsigned long npages;
914 int znum;
915
916 for (znum = 0; znum < MAX_NR_ZONES; znum++)
917 zones_size[znum] = zholes_size[znum] = 0;
918
919 npages = max_low_pfn - pfn_base;
920
921 zones_size[ZONE_DMA] = npages;
922 zholes_size[ZONE_DMA] = npages - pages_avail;
923
924 npages = highend_pfn - max_low_pfn;
925 zones_size[ZONE_HIGHMEM] = npages;
926 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
927
Johannes Weiner9109fb72008-07-23 21:27:20 -0700928 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700929 }
930}
931
Sam Ravnborg9701b262012-05-13 10:21:25 +0200932void mmu_info(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700933{
934 seq_printf(m,
935 "MMU type\t: %s\n"
936 "contexts\t: %d\n"
937 "nocache total\t: %ld\n"
938 "nocache used\t: %d\n",
939 srmmu_name,
940 num_contexts,
941 srmmu_nocache_size,
942 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
943}
944
Sam Ravnborgb796c6d2012-05-13 10:30:54 +0200945void destroy_context(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946{
947
948 if(mm->context != NO_CONTEXT) {
949 flush_cache_mm(mm);
950 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
951 flush_tlb_mm(mm);
952 spin_lock(&srmmu_context_spinlock);
953 free_context(mm->context);
954 spin_unlock(&srmmu_context_spinlock);
955 mm->context = NO_CONTEXT;
956 }
957}
958
959/* Init various srmmu chip types. */
960static void __init srmmu_is_bad(void)
961{
962 prom_printf("Could not determine SRMMU chip type.\n");
963 prom_halt();
964}
965
966static void __init init_vac_layout(void)
967{
Andres Salomon8d125562010-10-08 14:18:11 -0700968 phandle nd;
969 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970 char node_str[128];
971#ifdef CONFIG_SMP
972 int cpu = 0;
973 unsigned long max_size = 0;
974 unsigned long min_line_size = 0x10000000;
975#endif
976
977 nd = prom_getchild(prom_root_node);
978 while((nd = prom_getsibling(nd)) != 0) {
979 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
980 if(!strcmp(node_str, "cpu")) {
981 vac_line_size = prom_getint(nd, "cache-line-size");
982 if (vac_line_size == -1) {
983 prom_printf("can't determine cache-line-size, "
984 "halting.\n");
985 prom_halt();
986 }
987 cache_lines = prom_getint(nd, "cache-nlines");
988 if (cache_lines == -1) {
989 prom_printf("can't determine cache-nlines, halting.\n");
990 prom_halt();
991 }
992
993 vac_cache_size = cache_lines * vac_line_size;
994#ifdef CONFIG_SMP
995 if(vac_cache_size > max_size)
996 max_size = vac_cache_size;
997 if(vac_line_size < min_line_size)
998 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -0800999 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -07001000 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +10301001 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002 break;
1003#else
1004 break;
1005#endif
1006 }
1007 }
1008 if(nd == 0) {
1009 prom_printf("No CPU nodes found, halting.\n");
1010 prom_halt();
1011 }
1012#ifdef CONFIG_SMP
1013 vac_cache_size = max_size;
1014 vac_line_size = min_line_size;
1015#endif
1016 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
1017 (int)vac_cache_size, (int)vac_line_size);
1018}
1019
Al Viro409832f2008-11-22 17:33:54 +00001020static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021{
1022 volatile unsigned long clear;
1023 unsigned long mreg = srmmu_get_mmureg();
1024
1025 hyper_flush_unconditional_combined();
1026
1027 mreg &= ~(HYPERSPARC_CWENABLE);
1028 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1029 mreg |= (HYPERSPARC_CMODE);
1030
1031 srmmu_set_mmureg(mreg);
1032
1033#if 0 /* XXX I think this is bad news... -DaveM */
1034 hyper_clear_all_tags();
1035#endif
1036
1037 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1038 hyper_flush_whole_icache();
1039 clear = srmmu_get_faddr();
1040 clear = srmmu_get_fstatus();
1041}
1042
David S. Miller5d83d662012-05-13 20:49:31 -07001043static const struct sparc32_cachetlb_ops hypersparc_ops = {
1044 .cache_all = hypersparc_flush_cache_all,
1045 .cache_mm = hypersparc_flush_cache_mm,
1046 .cache_page = hypersparc_flush_cache_page,
1047 .cache_range = hypersparc_flush_cache_range,
1048 .tlb_all = hypersparc_flush_tlb_all,
1049 .tlb_mm = hypersparc_flush_tlb_mm,
1050 .tlb_page = hypersparc_flush_tlb_page,
1051 .tlb_range = hypersparc_flush_tlb_range,
1052 .page_to_ram = hypersparc_flush_page_to_ram,
1053 .sig_insns = hypersparc_flush_sig_insns,
1054 .page_for_dma = hypersparc_flush_page_for_dma,
1055};
1056
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057static void __init init_hypersparc(void)
1058{
1059 srmmu_name = "ROSS HyperSparc";
1060 srmmu_modtype = HyperSparc;
1061
1062 init_vac_layout();
1063
1064 is_hypersparc = 1;
David S. Miller5d83d662012-05-13 20:49:31 -07001065 sparc32_cachetlb_ops = &hypersparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001066
1067 poke_srmmu = poke_hypersparc;
1068
1069 hypersparc_setup_blockops();
1070}
1071
Al Viro409832f2008-11-22 17:33:54 +00001072static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073{
1074 unsigned long mreg;
1075
1076 /* Clear any crap from the cache or else... */
1077 swift_flush_cache_all();
1078
1079 /* Enable I & D caches */
1080 mreg = srmmu_get_mmureg();
1081 mreg |= (SWIFT_IE | SWIFT_DE);
1082 /*
1083 * The Swift branch folding logic is completely broken. At
1084 * trap time, if things are just right, if can mistakenly
1085 * think that a trap is coming from kernel mode when in fact
1086 * it is coming from user mode (it mis-executes the branch in
1087 * the trap code). So you see things like crashme completely
1088 * hosing your machine which is completely unacceptable. Turn
1089 * this shit off... nice job Fujitsu.
1090 */
1091 mreg &= ~(SWIFT_BF);
1092 srmmu_set_mmureg(mreg);
1093}
1094
David S. Miller5d83d662012-05-13 20:49:31 -07001095static const struct sparc32_cachetlb_ops swift_ops = {
1096 .cache_all = swift_flush_cache_all,
1097 .cache_mm = swift_flush_cache_mm,
1098 .cache_page = swift_flush_cache_page,
1099 .cache_range = swift_flush_cache_range,
1100 .tlb_all = swift_flush_tlb_all,
1101 .tlb_mm = swift_flush_tlb_mm,
1102 .tlb_page = swift_flush_tlb_page,
1103 .tlb_range = swift_flush_tlb_range,
1104 .page_to_ram = swift_flush_page_to_ram,
1105 .sig_insns = swift_flush_sig_insns,
1106 .page_for_dma = swift_flush_page_for_dma,
1107};
1108
Linus Torvalds1da177e2005-04-16 15:20:36 -07001109#define SWIFT_MASKID_ADDR 0x10003018
1110static void __init init_swift(void)
1111{
1112 unsigned long swift_rev;
1113
1114 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1115 "srl %0, 0x18, %0\n\t" :
1116 "=r" (swift_rev) :
1117 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1118 srmmu_name = "Fujitsu Swift";
1119 switch(swift_rev) {
1120 case 0x11:
1121 case 0x20:
1122 case 0x23:
1123 case 0x30:
1124 srmmu_modtype = Swift_lots_o_bugs;
1125 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1126 /*
1127 * Gee george, I wonder why Sun is so hush hush about
1128 * this hardware bug... really braindamage stuff going
1129 * on here. However I think we can find a way to avoid
1130 * all of the workaround overhead under Linux. Basically,
1131 * any page fault can cause kernel pages to become user
1132 * accessible (the mmu gets confused and clears some of
1133 * the ACC bits in kernel ptes). Aha, sounds pretty
1134 * horrible eh? But wait, after extensive testing it appears
1135 * that if you use pgd_t level large kernel pte's (like the
1136 * 4MB pages on the Pentium) the bug does not get tripped
1137 * at all. This avoids almost all of the major overhead.
1138 * Welcome to a world where your vendor tells you to,
1139 * "apply this kernel patch" instead of "sorry for the
1140 * broken hardware, send it back and we'll give you
1141 * properly functioning parts"
1142 */
1143 break;
1144 case 0x25:
1145 case 0x31:
1146 srmmu_modtype = Swift_bad_c;
1147 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1148 /*
1149 * You see Sun allude to this hardware bug but never
1150 * admit things directly, they'll say things like,
1151 * "the Swift chip cache problems" or similar.
1152 */
1153 break;
1154 default:
1155 srmmu_modtype = Swift_ok;
1156 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001157 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001158
David S. Miller5d83d662012-05-13 20:49:31 -07001159 sparc32_cachetlb_ops = &swift_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160 flush_page_for_dma_global = 0;
1161
1162 /*
1163 * Are you now convinced that the Swift is one of the
1164 * biggest VLSI abortions of all time? Bravo Fujitsu!
1165 * Fujitsu, the !#?!%$'d up processor people. I bet if
1166 * you examined the microcode of the Swift you'd find
1167 * XXX's all over the place.
1168 */
1169 poke_srmmu = poke_swift;
1170}
1171
1172static void turbosparc_flush_cache_all(void)
1173{
1174 flush_user_windows();
1175 turbosparc_idflash_clear();
1176}
1177
1178static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1179{
1180 FLUSH_BEGIN(mm)
1181 flush_user_windows();
1182 turbosparc_idflash_clear();
1183 FLUSH_END
1184}
1185
1186static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1187{
1188 FLUSH_BEGIN(vma->vm_mm)
1189 flush_user_windows();
1190 turbosparc_idflash_clear();
1191 FLUSH_END
1192}
1193
1194static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1195{
1196 FLUSH_BEGIN(vma->vm_mm)
1197 flush_user_windows();
1198 if (vma->vm_flags & VM_EXEC)
1199 turbosparc_flush_icache();
1200 turbosparc_flush_dcache();
1201 FLUSH_END
1202}
1203
1204/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1205static void turbosparc_flush_page_to_ram(unsigned long page)
1206{
1207#ifdef TURBOSPARC_WRITEBACK
1208 volatile unsigned long clear;
1209
1210 if (srmmu_hwprobe(page))
1211 turbosparc_flush_page_cache(page);
1212 clear = srmmu_get_fstatus();
1213#endif
1214}
1215
1216static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1217{
1218}
1219
1220static void turbosparc_flush_page_for_dma(unsigned long page)
1221{
1222 turbosparc_flush_dcache();
1223}
1224
1225static void turbosparc_flush_tlb_all(void)
1226{
1227 srmmu_flush_whole_tlb();
1228}
1229
1230static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1231{
1232 FLUSH_BEGIN(mm)
1233 srmmu_flush_whole_tlb();
1234 FLUSH_END
1235}
1236
1237static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1238{
1239 FLUSH_BEGIN(vma->vm_mm)
1240 srmmu_flush_whole_tlb();
1241 FLUSH_END
1242}
1243
1244static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1245{
1246 FLUSH_BEGIN(vma->vm_mm)
1247 srmmu_flush_whole_tlb();
1248 FLUSH_END
1249}
1250
1251
Al Viro409832f2008-11-22 17:33:54 +00001252static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
1254 unsigned long mreg = srmmu_get_mmureg();
1255 unsigned long ccreg;
1256
1257 /* Clear any crap from the cache or else... */
1258 turbosparc_flush_cache_all();
1259 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* Temporarily disable I & D caches */
1260 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1261 srmmu_set_mmureg(mreg);
1262
1263 ccreg = turbosparc_get_ccreg();
1264
1265#ifdef TURBOSPARC_WRITEBACK
1266 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1267 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1268 /* Write-back D-cache, emulate VLSI
1269 * abortion number three, not number one */
1270#else
1271 /* For now let's play safe, optimize later */
1272 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1273 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1274 ccreg &= ~(TURBOSPARC_uS2);
1275 /* Emulate VLSI abortion number three, not number one */
1276#endif
1277
1278 switch (ccreg & 7) {
1279 case 0: /* No SE cache */
1280 case 7: /* Test mode */
1281 break;
1282 default:
1283 ccreg |= (TURBOSPARC_SCENABLE);
1284 }
1285 turbosparc_set_ccreg (ccreg);
1286
1287 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1288 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1289 srmmu_set_mmureg(mreg);
1290}
1291
David S. Miller5d83d662012-05-13 20:49:31 -07001292static const struct sparc32_cachetlb_ops turbosparc_ops = {
1293 .cache_all = turbosparc_flush_cache_all,
1294 .cache_mm = turbosparc_flush_cache_mm,
1295 .cache_page = turbosparc_flush_cache_page,
1296 .cache_range = turbosparc_flush_cache_range,
1297 .tlb_all = turbosparc_flush_tlb_all,
1298 .tlb_mm = turbosparc_flush_tlb_mm,
1299 .tlb_page = turbosparc_flush_tlb_page,
1300 .tlb_range = turbosparc_flush_tlb_range,
1301 .page_to_ram = turbosparc_flush_page_to_ram,
1302 .sig_insns = turbosparc_flush_sig_insns,
1303 .page_for_dma = turbosparc_flush_page_for_dma,
1304};
1305
Linus Torvalds1da177e2005-04-16 15:20:36 -07001306static void __init init_turbosparc(void)
1307{
1308 srmmu_name = "Fujitsu TurboSparc";
1309 srmmu_modtype = TurboSparc;
David S. Miller5d83d662012-05-13 20:49:31 -07001310 sparc32_cachetlb_ops = &turbosparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001311 poke_srmmu = poke_turbosparc;
1312}
1313
Al Viro409832f2008-11-22 17:33:54 +00001314static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315{
1316 unsigned long mreg = srmmu_get_mmureg();
1317
1318 tsunami_flush_icache();
1319 tsunami_flush_dcache();
1320 mreg &= ~TSUNAMI_ITD;
1321 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1322 srmmu_set_mmureg(mreg);
1323}
1324
David S. Miller5d83d662012-05-13 20:49:31 -07001325static const struct sparc32_cachetlb_ops tsunami_ops = {
1326 .cache_all = tsunami_flush_cache_all,
1327 .cache_mm = tsunami_flush_cache_mm,
1328 .cache_page = tsunami_flush_cache_page,
1329 .cache_range = tsunami_flush_cache_range,
1330 .tlb_all = tsunami_flush_tlb_all,
1331 .tlb_mm = tsunami_flush_tlb_mm,
1332 .tlb_page = tsunami_flush_tlb_page,
1333 .tlb_range = tsunami_flush_tlb_range,
1334 .page_to_ram = tsunami_flush_page_to_ram,
1335 .sig_insns = tsunami_flush_sig_insns,
1336 .page_for_dma = tsunami_flush_page_for_dma,
1337};
1338
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339static void __init init_tsunami(void)
1340{
1341 /*
1342 * Tsunami's pretty sane, Sun and TI actually got it
1343 * somewhat right this time. Fujitsu should have
1344 * taken some lessons from them.
1345 */
1346
1347 srmmu_name = "TI Tsunami";
1348 srmmu_modtype = Tsunami;
David S. Miller5d83d662012-05-13 20:49:31 -07001349 sparc32_cachetlb_ops = &tsunami_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 poke_srmmu = poke_tsunami;
1351
1352 tsunami_setup_blockops();
1353}
1354
Al Viro409832f2008-11-22 17:33:54 +00001355static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356{
1357 unsigned long mreg = srmmu_get_mmureg();
1358 static int smp_catch;
1359
David S. Miller5d83d662012-05-13 20:49:31 -07001360 if (viking_mxcc_present) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 unsigned long mxcc_control = mxcc_get_creg();
1362
1363 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1364 mxcc_control &= ~(MXCC_CTL_RRC);
1365 mxcc_set_creg(mxcc_control);
1366
1367 /*
1368 * We don't need memory parity checks.
1369 * XXX This is a mess, have to dig out later. ecd.
1370 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1371 */
1372
1373 /* We do cache ptables on MXCC. */
1374 mreg |= VIKING_TCENABLE;
1375 } else {
1376 unsigned long bpreg;
1377
1378 mreg &= ~(VIKING_TCENABLE);
1379 if(smp_catch++) {
1380 /* Must disable mixed-cmd mode here for other cpu's. */
1381 bpreg = viking_get_bpreg();
1382 bpreg &= ~(VIKING_ACTION_MIX);
1383 viking_set_bpreg(bpreg);
1384
1385 /* Just in case PROM does something funny. */
1386 msi_set_sync();
1387 }
1388 }
1389
1390 mreg |= VIKING_SPENABLE;
1391 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1392 mreg |= VIKING_SBENABLE;
1393 mreg &= ~(VIKING_ACENABLE);
1394 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395}
1396
David S. Miller5d83d662012-05-13 20:49:31 -07001397static struct sparc32_cachetlb_ops viking_ops = {
1398 .cache_all = viking_flush_cache_all,
1399 .cache_mm = viking_flush_cache_mm,
1400 .cache_page = viking_flush_cache_page,
1401 .cache_range = viking_flush_cache_range,
1402 .tlb_all = viking_flush_tlb_all,
1403 .tlb_mm = viking_flush_tlb_mm,
1404 .tlb_page = viking_flush_tlb_page,
1405 .tlb_range = viking_flush_tlb_range,
1406 .page_to_ram = viking_flush_page_to_ram,
1407 .sig_insns = viking_flush_sig_insns,
1408 .page_for_dma = viking_flush_page_for_dma,
1409};
1410
1411#ifdef CONFIG_SMP
1412/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1413 * perform the local TLB flush and all the other cpus will see it.
1414 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1415 * that requires that we add some synchronization to these flushes.
1416 *
1417 * The bug is that the fifo which keeps track of all the pending TLB
1418 * broadcasts in the system is an entry or two too small, so if we
1419 * have too many going at once we'll overflow that fifo and lose a TLB
1420 * flush resulting in corruption.
1421 *
1422 * Our workaround is to take a global spinlock around the TLB flushes,
1423 * which guarentees we won't ever have too many pending. It's a big
1424 * hammer, but a semaphore like system to make sure we only have N TLB
1425 * flushes going at once will require SMP locking anyways so there's
1426 * no real value in trying any harder than this.
1427 */
1428static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1429 .cache_all = viking_flush_cache_all,
1430 .cache_mm = viking_flush_cache_mm,
1431 .cache_page = viking_flush_cache_page,
1432 .cache_range = viking_flush_cache_range,
1433 .tlb_all = sun4dsmp_flush_tlb_all,
1434 .tlb_mm = sun4dsmp_flush_tlb_mm,
1435 .tlb_page = sun4dsmp_flush_tlb_page,
1436 .tlb_range = sun4dsmp_flush_tlb_range,
1437 .page_to_ram = viking_flush_page_to_ram,
1438 .sig_insns = viking_flush_sig_insns,
1439 .page_for_dma = viking_flush_page_for_dma,
1440};
1441#endif
1442
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443static void __init init_viking(void)
1444{
1445 unsigned long mreg = srmmu_get_mmureg();
1446
1447 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
1448 if(mreg & VIKING_MMODE) {
1449 srmmu_name = "TI Viking";
1450 viking_mxcc_present = 0;
1451 msi_set_sync();
1452
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453 /*
1454 * We need this to make sure old viking takes no hits
1455 * on it's cache for dma snoops to workaround the
1456 * "load from non-cacheable memory" interrupt bug.
1457 * This is only necessary because of the new way in
1458 * which we use the IOMMU.
1459 */
David S. Miller5d83d662012-05-13 20:49:31 -07001460 viking_ops.page_for_dma = viking_flush_page;
1461#ifdef CONFIG_SMP
1462 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1463#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001464 flush_page_for_dma_global = 0;
1465 } else {
1466 srmmu_name = "TI Viking/MXCC";
1467 viking_mxcc_present = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001468 srmmu_cache_pagetables = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001469 }
1470
David S. Miller5d83d662012-05-13 20:49:31 -07001471 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1472 &viking_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001473#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -07001474 if (sparc_cpu_model == sun4d)
1475 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1476 &viking_sun4d_smp_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001477#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001478
1479 poke_srmmu = poke_viking;
1480}
1481
Konrad Eisele75d9e342009-08-17 00:13:33 +00001482#ifdef CONFIG_SPARC_LEON
David S. Miller5d83d662012-05-13 20:49:31 -07001483static void leon_flush_cache_mm(struct mm_struct *mm)
1484{
1485 leon_flush_cache_all();
1486}
1487
1488static void leon_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1489{
1490 leon_flush_pcache_all(vma, page);
1491}
1492
1493static void leon_flush_cache_range(struct vm_area_struct *vma,
1494 unsigned long start,
1495 unsigned long end)
1496{
1497 leon_flush_cache_all();
1498}
1499
1500static void leon_flush_tlb_mm(struct mm_struct *mm)
1501{
1502 leon_flush_tlb_all();
1503}
1504
1505static void leon_flush_tlb_page(struct vm_area_struct *vma,
1506 unsigned long page)
1507{
1508 leon_flush_tlb_all();
1509}
1510
1511static void leon_flush_tlb_range(struct vm_area_struct *vma,
1512 unsigned long start,
1513 unsigned long end)
1514{
1515 leon_flush_tlb_all();
1516}
1517
1518static void leon_flush_page_to_ram(unsigned long page)
1519{
1520 leon_flush_cache_all();
1521}
1522
1523static void leon_flush_sig_insns(struct mm_struct *mm, unsigned long page)
1524{
1525 leon_flush_cache_all();
1526}
1527
1528static void leon_flush_page_for_dma(unsigned long page)
1529{
1530 leon_flush_dcache_all();
1531}
Konrad Eisele75d9e342009-08-17 00:13:33 +00001532
1533void __init poke_leonsparc(void)
1534{
1535}
1536
David S. Miller5d83d662012-05-13 20:49:31 -07001537static const struct sparc32_cachetlb_ops leon_ops = {
1538 .cache_all = leon_flush_cache_all,
1539 .cache_mm = leon_flush_cache_mm,
1540 .cache_page = leon_flush_cache_page,
1541 .cache_range = leon_flush_cache_range,
1542 .tlb_all = leon_flush_tlb_all,
1543 .tlb_mm = leon_flush_tlb_mm,
1544 .tlb_page = leon_flush_tlb_page,
1545 .tlb_range = leon_flush_tlb_range,
1546 .page_to_ram = leon_flush_page_to_ram,
1547 .sig_insns = leon_flush_sig_insns,
1548 .page_for_dma = leon_flush_page_for_dma,
1549};
1550
Konrad Eisele75d9e342009-08-17 00:13:33 +00001551void __init init_leon(void)
1552{
Kristoffer Glemboc803ba92009-12-02 04:30:22 +00001553 srmmu_name = "LEON";
David S. Miller5d83d662012-05-13 20:49:31 -07001554 sparc32_cachetlb_ops = &leon_ops;
Konrad Eisele75d9e342009-08-17 00:13:33 +00001555 poke_srmmu = poke_leonsparc;
1556
1557 srmmu_cache_pagetables = 0;
1558
1559 leon_flush_during_switch = leon_flush_needed();
1560}
1561#endif
1562
Linus Torvalds1da177e2005-04-16 15:20:36 -07001563/* Probe for the srmmu chip version. */
1564static void __init get_srmmu_type(void)
1565{
1566 unsigned long mreg, psr;
1567 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1568
1569 srmmu_modtype = SRMMU_INVAL_MOD;
1570 hwbug_bitmask = 0;
1571
1572 mreg = srmmu_get_mmureg(); psr = get_psr();
1573 mod_typ = (mreg & 0xf0000000) >> 28;
1574 mod_rev = (mreg & 0x0f000000) >> 24;
1575 psr_typ = (psr >> 28) & 0xf;
1576 psr_vers = (psr >> 24) & 0xf;
1577
Konrad Eisele75d9e342009-08-17 00:13:33 +00001578 /* First, check for sparc-leon. */
1579 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001580 init_leon();
1581 return;
1582 }
1583
1584 /* Second, check for HyperSparc or Cypress. */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001585 if(mod_typ == 1) {
1586 switch(mod_rev) {
1587 case 7:
1588 /* UP or MP Hypersparc */
1589 init_hypersparc();
1590 break;
1591 case 0:
1592 case 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001593 case 10:
1594 case 11:
1595 case 12:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 case 13:
1597 case 14:
1598 case 15:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001599 default:
David S. Millerc7020eb2012-05-14 22:02:08 -07001600 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1601 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001602 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001603 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 return;
1605 }
1606
1607 /*
1608 * Now Fujitsu TurboSparc. It might happen that it is
1609 * in Swift emulation mode, so we will check later...
1610 */
1611 if (psr_typ == 0 && psr_vers == 5) {
1612 init_turbosparc();
1613 return;
1614 }
1615
1616 /* Next check for Fujitsu Swift. */
1617 if(psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07001618 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001619 char node_str[128];
1620
1621 /* Look if it is not a TurboSparc emulating Swift... */
1622 cpunode = prom_getchild(prom_root_node);
1623 while((cpunode = prom_getsibling(cpunode)) != 0) {
1624 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
1625 if(!strcmp(node_str, "cpu")) {
1626 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1627 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1628 init_turbosparc();
1629 return;
1630 }
1631 break;
1632 }
1633 }
1634
1635 init_swift();
1636 return;
1637 }
1638
1639 /* Now the Viking family of srmmu. */
1640 if(psr_typ == 4 &&
1641 ((psr_vers == 0) ||
1642 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1643 init_viking();
1644 return;
1645 }
1646
1647 /* Finally the Tsunami. */
1648 if(psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
1649 init_tsunami();
1650 return;
1651 }
1652
1653 /* Oh well */
1654 srmmu_is_bad();
1655}
1656
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657#ifdef CONFIG_SMP
1658/* Local cross-calls. */
1659static void smp_flush_page_for_dma(unsigned long page)
1660{
David S. Miller5d83d662012-05-13 20:49:31 -07001661 xc1((smpfunc_t) local_ops->page_for_dma, page);
1662 local_ops->page_for_dma(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001663}
1664
David S. Miller5d83d662012-05-13 20:49:31 -07001665static void smp_flush_cache_all(void)
1666{
1667 xc0((smpfunc_t) local_ops->cache_all);
1668 local_ops->cache_all();
1669}
1670
1671static void smp_flush_tlb_all(void)
1672{
1673 xc0((smpfunc_t) local_ops->tlb_all);
1674 local_ops->tlb_all();
1675}
1676
1677static void smp_flush_cache_mm(struct mm_struct *mm)
1678{
1679 if (mm->context != NO_CONTEXT) {
1680 cpumask_t cpu_mask;
1681 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1682 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1683 if (!cpumask_empty(&cpu_mask))
1684 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1685 local_ops->cache_mm(mm);
1686 }
1687}
1688
1689static void smp_flush_tlb_mm(struct mm_struct *mm)
1690{
1691 if (mm->context != NO_CONTEXT) {
1692 cpumask_t cpu_mask;
1693 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1694 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1695 if (!cpumask_empty(&cpu_mask)) {
1696 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1697 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1698 cpumask_copy(mm_cpumask(mm),
1699 cpumask_of(smp_processor_id()));
1700 }
1701 local_ops->tlb_mm(mm);
1702 }
1703}
1704
1705static void smp_flush_cache_range(struct vm_area_struct *vma,
1706 unsigned long start,
1707 unsigned long end)
1708{
1709 struct mm_struct *mm = vma->vm_mm;
1710
1711 if (mm->context != NO_CONTEXT) {
1712 cpumask_t cpu_mask;
1713 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1714 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1715 if (!cpumask_empty(&cpu_mask))
1716 xc3((smpfunc_t) local_ops->cache_range,
1717 (unsigned long) vma, start, end);
1718 local_ops->cache_range(vma, start, end);
1719 }
1720}
1721
1722static void smp_flush_tlb_range(struct vm_area_struct *vma,
1723 unsigned long start,
1724 unsigned long end)
1725{
1726 struct mm_struct *mm = vma->vm_mm;
1727
1728 if (mm->context != NO_CONTEXT) {
1729 cpumask_t cpu_mask;
1730 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1731 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1732 if (!cpumask_empty(&cpu_mask))
1733 xc3((smpfunc_t) local_ops->tlb_range,
1734 (unsigned long) vma, start, end);
1735 local_ops->tlb_range(vma, start, end);
1736 }
1737}
1738
1739static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1740{
1741 struct mm_struct *mm = vma->vm_mm;
1742
1743 if (mm->context != NO_CONTEXT) {
1744 cpumask_t cpu_mask;
1745 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1746 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1747 if (!cpumask_empty(&cpu_mask))
1748 xc2((smpfunc_t) local_ops->cache_page,
1749 (unsigned long) vma, page);
1750 local_ops->cache_page(vma, page);
1751 }
1752}
1753
1754static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1755{
1756 struct mm_struct *mm = vma->vm_mm;
1757
1758 if (mm->context != NO_CONTEXT) {
1759 cpumask_t cpu_mask;
1760 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1761 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1762 if (!cpumask_empty(&cpu_mask))
1763 xc2((smpfunc_t) local_ops->tlb_page,
1764 (unsigned long) vma, page);
1765 local_ops->tlb_page(vma, page);
1766 }
1767}
1768
1769static void smp_flush_page_to_ram(unsigned long page)
1770{
1771 /* Current theory is that those who call this are the one's
1772 * who have just dirtied their cache with the pages contents
1773 * in kernel space, therefore we only run this on local cpu.
1774 *
1775 * XXX This experiment failed, research further... -DaveM
1776 */
1777#if 1
1778 xc1((smpfunc_t) local_ops->page_to_ram, page);
1779#endif
1780 local_ops->page_to_ram(page);
1781}
1782
1783static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1784{
1785 cpumask_t cpu_mask;
1786 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1787 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1788 if (!cpumask_empty(&cpu_mask))
1789 xc2((smpfunc_t) local_ops->sig_insns,
1790 (unsigned long) mm, insn_addr);
1791 local_ops->sig_insns(mm, insn_addr);
1792}
1793
1794static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1795 .cache_all = smp_flush_cache_all,
1796 .cache_mm = smp_flush_cache_mm,
1797 .cache_page = smp_flush_cache_page,
1798 .cache_range = smp_flush_cache_range,
1799 .tlb_all = smp_flush_tlb_all,
1800 .tlb_mm = smp_flush_tlb_mm,
1801 .tlb_page = smp_flush_tlb_page,
1802 .tlb_range = smp_flush_tlb_range,
1803 .page_to_ram = smp_flush_page_to_ram,
1804 .sig_insns = smp_flush_sig_insns,
1805 .page_for_dma = smp_flush_page_for_dma,
1806};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001807#endif
1808
Linus Torvalds1da177e2005-04-16 15:20:36 -07001809/* Load up routines and constants for sun4m and sun4d mmu */
Sam Ravnborga3c5c662012-05-12 20:35:52 +02001810void __init load_mmu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001811{
1812 extern void ld_mmu_iommu(void);
1813 extern void ld_mmu_iounit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001814
Linus Torvalds1da177e2005-04-16 15:20:36 -07001815 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001817
1818#ifdef CONFIG_SMP
1819 /* El switcheroo... */
David S. Miller5d83d662012-05-13 20:49:31 -07001820 local_ops = sparc32_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821
David S. Miller5d83d662012-05-13 20:49:31 -07001822 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1823 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1824 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1825 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1826 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001827 }
David S. Miller64273d02008-11-26 01:00:58 -08001828
1829 if (poke_srmmu == poke_viking) {
1830 /* Avoid unnecessary cross calls. */
David S. Miller5d83d662012-05-13 20:49:31 -07001831 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1832 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1833 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1834 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1835
1836 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1837 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1838 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
David S. Miller64273d02008-11-26 01:00:58 -08001839 }
David S. Miller5d83d662012-05-13 20:49:31 -07001840
1841 /* It really is const after this point. */
1842 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1843 &smp_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844#endif
1845
1846 if (sparc_cpu_model == sun4d)
1847 ld_mmu_iounit();
1848 else
1849 ld_mmu_iommu();
1850#ifdef CONFIG_SMP
1851 if (sparc_cpu_model == sun4d)
1852 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00001853 else if (sparc_cpu_model == sparc_leon)
1854 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001855 else
1856 sun4m_init_smp();
1857#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001858}