blob: ab62595cf366984d4bca9deda828e1c38f021b86 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Sam Ravnborg4a049b02012-07-26 11:02:12 +000011#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/spinlock.h>
13#include <linux/bootmem.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000014#include <linux/pagemap.h>
15#include <linux/vmalloc.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070016#include <linux/kdebug.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000017#include <linux/kernel.h>
18#include <linux/init.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000019#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000021#include <linux/fs.h>
22#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000027#include <asm/io-unit.h>
28#include <asm/pgalloc.h>
29#include <asm/pgtable.h>
30#include <asm/bitext.h>
31#include <asm/vaddrs.h>
32#include <asm/cache.h>
33#include <asm/traps.h>
34#include <asm/oplib.h>
35#include <asm/mbus.h>
36#include <asm/page.h>
37#include <asm/asi.h>
38#include <asm/msi.h>
39#include <asm/smp.h>
40#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* Now the cpu specific definitions. */
Sam Ravnborg4a049b02012-07-26 11:02:12 +000043#include <asm/turbosparc.h>
44#include <asm/tsunami.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/viking.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000046#include <asm/swift.h>
47#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/mxcc.h>
49#include <asm/ross.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Sam Ravnborgaccf0322012-05-19 20:02:49 +000051#include "srmmu.h"
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
Sam Ravnborga3c5c662012-05-12 20:35:52 +020058struct ctx_list *ctx_list_pool;
59struct ctx_list ctx_free;
60struct ctx_list ctx_used;
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern struct resource sparc_iomap;
63
64extern unsigned long last_valid_pfn;
65
Adrian Bunk50215d62008-06-05 11:41:51 -070066static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
David S. Miller5d83d662012-05-13 20:49:31 -070068const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -070071const struct sparc32_cachetlb_ops *local_ops;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define FLUSH_BEGIN(mm)
74#define FLUSH_END
75#else
David S. Miller5d83d662012-05-13 20:49:31 -070076#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#define FLUSH_END }
78#endif
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080int flush_page_for_dma_global = 1;
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082char *srmmu_name;
83
84ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070085static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87int viking_mxcc_present;
88static DEFINE_SPINLOCK(srmmu_context_spinlock);
89
Adrian Bunk50215d62008-06-05 11:41:51 -070090static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Adrian Bunk50215d62008-06-05 11:41:51 -070092static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070095static unsigned long srmmu_nocache_size;
96static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
100
101/* The context table is a nocache user with the biggest alignment needs. */
102#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
103
104void *srmmu_nocache_pool;
105void *srmmu_nocache_bitmap;
106static struct bit_map srmmu_nocache_map;
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static inline int srmmu_pmd_none(pmd_t pmd)
109{ return !(pmd_val(pmd) & 0xFFFFFFF); }
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/* XXX should we hyper_flush_whole_icache here - Anton */
112static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
David S. Miller62875cf2012-05-12 13:39:23 -0700113{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200115void pmd_set(pmd_t *pmdp, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
117 unsigned long ptp; /* Physical address, shifted right by 4 */
118 int i;
119
120 ptp = __nocache_pa((unsigned long) ptep) >> 4;
121 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700122 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
124 }
125}
126
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200127void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 unsigned long ptp; /* Physical address, shifted right by 4 */
130 int i;
131
132 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
133 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700134 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
136 }
137}
138
Sam Ravnborg605ae962012-07-26 11:02:13 +0000139/* Find an entry in the third-level page table.. */
140pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 void *pte;
143
144 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
145 return (pte_t *) pte +
146 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
153 */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000154static void *__srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
156 int offset;
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000157 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000160 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
161 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 size = SRMMU_NOCACHE_BITMAP_SHIFT;
163 }
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000164 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
165 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
166 size);
167 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 }
169 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
170
171 offset = bit_map_string_get(&srmmu_nocache_map,
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000172 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
173 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 if (offset == -1) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000175 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
176 size, (int) srmmu_nocache_size,
177 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return 0;
179 }
180
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000181 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
182 return (void *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000185void *srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000187 void *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 tmp = __srmmu_get_nocache(size, align);
190
191 if (tmp)
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000192 memset(tmp, 0, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 return tmp;
195}
196
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000197void srmmu_free_nocache(void *addr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000199 unsigned long vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int offset;
201
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000202 vaddr = (unsigned long)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (vaddr < SRMMU_NOCACHE_VADDR) {
204 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
205 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
206 BUG();
207 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000208 if (vaddr + size > srmmu_nocache_end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
210 vaddr, srmmu_nocache_end);
211 BUG();
212 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000213 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 printk("Size 0x%x is not a power of 2\n", size);
215 BUG();
216 }
217 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
218 printk("Size 0x%x is too small\n", size);
219 BUG();
220 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000221 if (vaddr & (size - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
223 BUG();
224 }
225
226 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
227 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
228
229 bit_map_clear(&srmmu_nocache_map, offset, size);
230}
231
Adrian Bunk50215d62008-06-05 11:41:51 -0700232static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
233 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235extern unsigned long probe_memory(void); /* in fault.c */
236
237/*
238 * Reserve nocache dynamically proportionally to the amount of
239 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
240 */
Sam Ravnborg32442462012-07-26 11:02:11 +0000241static void __init srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 unsigned long sysmemavail = probe_memory() / 1024;
244 int srmmu_nocache_npages;
245
246 srmmu_nocache_npages =
247 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
248
249 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
250 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
251 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
252 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
253
254 /* anything above 1280 blows up */
255 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
256 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
257
258 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
259 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
260}
261
Adrian Bunk50215d62008-06-05 11:41:51 -0700262static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 unsigned int bitmap_bits;
265 pgd_t *pgd;
266 pmd_t *pmd;
267 pte_t *pte;
268 unsigned long paddr, vaddr;
269 unsigned long pteval;
270
271 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
272
273 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
274 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
275 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
276
277 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
278 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
279
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000280 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
282 init_mm.pgd = srmmu_swapper_pg_dir;
283
284 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
285
286 paddr = __pa((unsigned long)srmmu_nocache_pool);
287 vaddr = SRMMU_NOCACHE_VADDR;
288
289 while (vaddr < srmmu_nocache_end) {
290 pgd = pgd_offset_k(vaddr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200291 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
292 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
295
296 if (srmmu_cache_pagetables)
297 pteval |= SRMMU_CACHE;
298
David S. Miller62875cf2012-05-12 13:39:23 -0700299 set_pte(__nocache_fix(pte), __pte(pteval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 vaddr += PAGE_SIZE;
302 paddr += PAGE_SIZE;
303 }
304
305 flush_cache_all();
306 flush_tlb_all();
307}
308
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200309pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 pgd_t *pgd = NULL;
312
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000313 pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (pgd) {
315 pgd_t *init = pgd_offset_k(0);
316 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
317 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
318 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
319 }
320
321 return pgd;
322}
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/*
325 * Hardware needs alignment to 256 only, but we align to whole page size
326 * to reduce fragmentation problems due to the buddy principle.
327 * XXX Provide actual fragmentation statistics in /proc.
328 *
329 * Alignments up to the page size are the same for physical and virtual
330 * addresses of the nocache area.
331 */
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200332pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800335 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200337 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return NULL;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000339 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800340 pgtable_page_ctor(page);
341 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200344void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 unsigned long p;
347
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800348 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 p = (unsigned long)page_address(pte); /* Cached address (for test) */
350 if (p == 0)
351 BUG();
352 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000353
354 /* free non cached virtual address*/
355 srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
358/*
359 */
360static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
361{
362 struct ctx_list *ctxp;
363
364 ctxp = ctx_free.next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000365 if (ctxp != &ctx_free) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 remove_from_ctx_list(ctxp);
367 add_to_used_ctxlist(ctxp);
368 mm->context = ctxp->ctx_number;
369 ctxp->ctx_mm = mm;
370 return;
371 }
372 ctxp = ctx_used.next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000373 if (ctxp->ctx_mm == old_mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 ctxp = ctxp->next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000375 if (ctxp == &ctx_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 panic("out of mmu contexts");
377 flush_cache_mm(ctxp->ctx_mm);
378 flush_tlb_mm(ctxp->ctx_mm);
379 remove_from_ctx_list(ctxp);
380 add_to_used_ctxlist(ctxp);
381 ctxp->ctx_mm->context = NO_CONTEXT;
382 ctxp->ctx_mm = mm;
383 mm->context = ctxp->ctx_number;
384}
385
386static inline void free_context(int context)
387{
388 struct ctx_list *ctx_old;
389
390 ctx_old = ctx_list_pool + context;
391 remove_from_ctx_list(ctx_old);
392 add_to_free_ctxlist(ctx_old);
393}
394
395
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000396void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
397 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Sam Ravnborg605ae962012-07-26 11:02:13 +0000399 if (mm->context == NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 spin_lock(&srmmu_context_spinlock);
401 alloc_context(old_mm, mm);
402 spin_unlock(&srmmu_context_spinlock);
403 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
404 }
405
Konrad Eisele75d9e342009-08-17 00:13:33 +0000406 if (sparc_cpu_model == sparc_leon)
407 leon_switch_mm();
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (is_hypersparc)
410 hyper_flush_whole_icache();
411
412 srmmu_set_context(mm->context);
413}
414
415/* Low level IO area allocation on the SRMMU. */
416static inline void srmmu_mapioaddr(unsigned long physaddr,
Sam Ravnborg605ae962012-07-26 11:02:13 +0000417 unsigned long virt_addr, int bus_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
419 pgd_t *pgdp;
420 pmd_t *pmdp;
421 pte_t *ptep;
422 unsigned long tmp;
423
424 physaddr &= PAGE_MASK;
425 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200426 pmdp = pmd_offset(pgdp, virt_addr);
427 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
429
Sam Ravnborg605ae962012-07-26 11:02:13 +0000430 /* I need to test whether this is consistent over all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 * sun4m's. The bus_type represents the upper 4 bits of
432 * 36-bit physical address on the I/O space lines...
433 */
434 tmp |= (bus_type << 28);
435 tmp |= SRMMU_PRIV;
436 __flush_page_to_ram(virt_addr);
David S. Miller62875cf2012-05-12 13:39:23 -0700437 set_pte(ptep, __pte(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438}
439
Sam Ravnborg9701b262012-05-13 10:21:25 +0200440void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
441 unsigned long xva, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 while (len != 0) {
444 len -= PAGE_SIZE;
445 srmmu_mapioaddr(xpa, xva, bus);
446 xva += PAGE_SIZE;
447 xpa += PAGE_SIZE;
448 }
449 flush_tlb_all();
450}
451
452static inline void srmmu_unmapioaddr(unsigned long virt_addr)
453{
454 pgd_t *pgdp;
455 pmd_t *pmdp;
456 pte_t *ptep;
457
458 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200459 pmdp = pmd_offset(pgdp, virt_addr);
460 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700463 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Sam Ravnborg9701b262012-05-13 10:21:25 +0200466void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
468 while (len != 0) {
469 len -= PAGE_SIZE;
470 srmmu_unmapioaddr(virt_addr);
471 virt_addr += PAGE_SIZE;
472 }
473 flush_tlb_all();
474}
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/* tsunami.S */
477extern void tsunami_flush_cache_all(void);
478extern void tsunami_flush_cache_mm(struct mm_struct *mm);
479extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
480extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
481extern void tsunami_flush_page_to_ram(unsigned long page);
482extern void tsunami_flush_page_for_dma(unsigned long page);
483extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
484extern void tsunami_flush_tlb_all(void);
485extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
486extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
487extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
488extern void tsunami_setup_blockops(void);
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490/* swift.S */
491extern void swift_flush_cache_all(void);
492extern void swift_flush_cache_mm(struct mm_struct *mm);
493extern void swift_flush_cache_range(struct vm_area_struct *vma,
494 unsigned long start, unsigned long end);
495extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
496extern void swift_flush_page_to_ram(unsigned long page);
497extern void swift_flush_page_for_dma(unsigned long page);
498extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
499extern void swift_flush_tlb_all(void);
500extern void swift_flush_tlb_mm(struct mm_struct *mm);
501extern void swift_flush_tlb_range(struct vm_area_struct *vma,
502 unsigned long start, unsigned long end);
503extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
504
505#if 0 /* P3: deadwood to debug precise flushes on Swift. */
506void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
507{
508 int cctx, ctx1;
509
510 page &= PAGE_MASK;
511 if ((ctx1 = vma->vm_mm->context) != -1) {
512 cctx = srmmu_get_context();
513/* Is context # ever different from current context? P3 */
514 if (cctx != ctx1) {
515 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
516 srmmu_set_context(ctx1);
517 swift_flush_page(page);
518 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
519 "r" (page), "i" (ASI_M_FLUSH_PROBE));
520 srmmu_set_context(cctx);
521 } else {
522 /* Rm. prot. bits from virt. c. */
523 /* swift_flush_cache_all(); */
524 /* swift_flush_cache_page(vma, page); */
525 swift_flush_page(page);
526
527 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
528 "r" (page), "i" (ASI_M_FLUSH_PROBE));
529 /* same as above: srmmu_flush_tlb_page() */
530 }
531 }
532}
533#endif
534
535/*
536 * The following are all MBUS based SRMMU modules, and therefore could
537 * be found in a multiprocessor configuration. On the whole, these
538 * chips seems to be much more touchy about DVMA and page tables
539 * with respect to cache coherency.
540 */
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542/* viking.S */
543extern void viking_flush_cache_all(void);
544extern void viking_flush_cache_mm(struct mm_struct *mm);
545extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
546 unsigned long end);
547extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
548extern void viking_flush_page_to_ram(unsigned long page);
549extern void viking_flush_page_for_dma(unsigned long page);
550extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
551extern void viking_flush_page(unsigned long page);
552extern void viking_mxcc_flush_page(unsigned long page);
553extern void viking_flush_tlb_all(void);
554extern void viking_flush_tlb_mm(struct mm_struct *mm);
555extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
556 unsigned long end);
557extern void viking_flush_tlb_page(struct vm_area_struct *vma,
558 unsigned long page);
559extern void sun4dsmp_flush_tlb_all(void);
560extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
561extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
562 unsigned long end);
563extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
564 unsigned long page);
565
566/* hypersparc.S */
567extern void hypersparc_flush_cache_all(void);
568extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
569extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
570extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
571extern void hypersparc_flush_page_to_ram(unsigned long page);
572extern void hypersparc_flush_page_for_dma(unsigned long page);
573extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
574extern void hypersparc_flush_tlb_all(void);
575extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
576extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
577extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
578extern void hypersparc_setup_blockops(void);
579
580/*
581 * NOTE: All of this startup code assumes the low 16mb (approx.) of
582 * kernel mappings are done with one single contiguous chunk of
583 * ram. On small ram machines (classics mainly) we only get
584 * around 8mb mapped for us.
585 */
586
Adrian Bunk50215d62008-06-05 11:41:51 -0700587static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
590 prom_halt();
591}
592
Adrian Bunk50215d62008-06-05 11:41:51 -0700593static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
594 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
596 pgd_t *pgdp;
597 pmd_t *pmdp;
598 pte_t *ptep;
599
Sam Ravnborg605ae962012-07-26 11:02:13 +0000600 while (start < end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700602 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000603 pmdp = __srmmu_get_nocache(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
605 if (pmdp == NULL)
606 early_pgtable_allocfail("pmd");
607 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200608 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200610 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000611 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000612 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (ptep == NULL)
614 early_pgtable_allocfail("pte");
615 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200616 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
618 if (start > (0xffffffffUL - PMD_SIZE))
619 break;
620 start = (start + PMD_SIZE) & PMD_MASK;
621 }
622}
623
Adrian Bunk50215d62008-06-05 11:41:51 -0700624static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
625 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 pgd_t *pgdp;
628 pmd_t *pmdp;
629 pte_t *ptep;
630
Sam Ravnborg605ae962012-07-26 11:02:13 +0000631 while (start < end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700633 if (pgd_none(*pgdp)) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000634 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (pmdp == NULL)
636 early_pgtable_allocfail("pmd");
637 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200638 pgd_set(pgdp, pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200640 pmdp = pmd_offset(pgdp, start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000641 if (srmmu_pmd_none(*pmdp)) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000642 ptep = __srmmu_get_nocache(PTE_SIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 PTE_SIZE);
644 if (ptep == NULL)
645 early_pgtable_allocfail("pte");
646 memset(ptep, 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200647 pmd_set(pmdp, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
649 if (start > (0xffffffffUL - PMD_SIZE))
650 break;
651 start = (start + PMD_SIZE) & PMD_MASK;
652 }
653}
654
Sam Ravnborg805918f2012-05-25 21:20:19 +0000655/* These flush types are not available on all chips... */
656static inline unsigned long srmmu_probe(unsigned long vaddr)
657{
658 unsigned long retval;
659
660 if (sparc_cpu_model != sparc_leon) {
661
662 vaddr &= PAGE_MASK;
663 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
664 "=r" (retval) :
665 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
666 } else {
667 retval = leon_swprobe(vaddr, 0);
668 }
669 return retval;
670}
671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672/*
673 * This is much cleaner than poking around physical address space
674 * looking at the prom's page table directly which is what most
675 * other OS's do. Yuck... this is much better.
676 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700677static void __init srmmu_inherit_prom_mappings(unsigned long start,
678 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000680 unsigned long probed;
681 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700682 pgd_t *pgdp;
683 pmd_t *pmdp;
684 pte_t *ptep;
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000685 int what; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700686
Sam Ravnborg605ae962012-07-26 11:02:13 +0000687 while (start <= end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700688 if (start == 0)
689 break; /* probably wrap around */
Sam Ravnborg605ae962012-07-26 11:02:13 +0000690 if (start == 0xfef00000)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691 start = KADB_DEBUGGER_BEGVM;
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000692 probed = srmmu_probe(start);
693 if (!probed) {
694 /* continue probing until we find an entry */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700695 start += PAGE_SIZE;
696 continue;
697 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000698
Linus Torvalds1da177e2005-04-16 15:20:36 -0700699 /* A red snapper, see what it really is. */
700 what = 0;
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000701 addr = start - PAGE_SIZE;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000702
703 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000704 if (srmmu_probe(addr + SRMMU_REAL_PMD_SIZE) == probed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700705 what = 1;
706 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000707
708 if (!(start & ~(SRMMU_PGDIR_MASK))) {
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000709 if (srmmu_probe(addr + SRMMU_PGDIR_SIZE) == probed)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 what = 2;
711 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000712
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713 pgdp = pgd_offset_k(start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000714 if (what == 2) {
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000715 *(pgd_t *)__nocache_fix(pgdp) = __pgd(probed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700716 start += SRMMU_PGDIR_SIZE;
717 continue;
718 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700719 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000720 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE,
721 SRMMU_PMD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 if (pmdp == NULL)
723 early_pgtable_allocfail("pmd");
724 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200725 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200727 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000728 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000729 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 if (ptep == NULL)
731 early_pgtable_allocfail("pte");
732 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200733 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700734 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000735 if (what == 1) {
736 /* We bend the rule where all 16 PTPs in a pmd_t point
Linus Torvalds1da177e2005-04-16 15:20:36 -0700737 * inside the same PTE page, and we leak a perfectly
738 * good hardware PTE piece. Alternatives seem worse.
739 */
740 unsigned int x; /* Index of HW PMD in soft cluster */
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000741 unsigned long *val;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700742 x = (start >> PMD_SHIFT) & 15;
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000743 val = &pmdp->pmdv[x];
744 *(unsigned long *)__nocache_fix(val) = probed;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745 start += SRMMU_REAL_PMD_SIZE;
746 continue;
747 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200748 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
Sam Ravnborg7cdfbc72012-07-26 11:02:15 +0000749 *(pte_t *)__nocache_fix(ptep) = __pte(probed);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700750 start += PAGE_SIZE;
751 }
752}
753
754#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
755
756/* Create a third-level SRMMU 16MB page mapping. */
757static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
758{
759 pgd_t *pgdp = pgd_offset_k(vaddr);
760 unsigned long big_pte;
761
762 big_pte = KERNEL_PTE(phys_base >> 4);
763 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
764}
765
766/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
767static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
768{
769 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
770 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
771 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
772 /* Map "low" memory only */
773 const unsigned long min_vaddr = PAGE_OFFSET;
774 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
775
776 if (vstart < min_vaddr || vstart >= max_vaddr)
777 return vstart;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000778
Linus Torvalds1da177e2005-04-16 15:20:36 -0700779 if (vend > max_vaddr || vend < min_vaddr)
780 vend = max_vaddr;
781
Sam Ravnborg605ae962012-07-26 11:02:13 +0000782 while (vstart < vend) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700783 do_large_mapping(vstart, pstart);
784 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
785 }
786 return vstart;
787}
788
Sam Ravnborg32442462012-07-26 11:02:11 +0000789static void __init map_kernel(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700790{
791 int i;
792
793 if (phys_base > 0) {
794 do_large_mapping(PAGE_OFFSET, phys_base);
795 }
796
797 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
798 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
799 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800}
801
802/* Paging initialization on the Sparc Reference MMU. */
803extern void sparc_context_init(int);
804
Al Viro409832f2008-11-22 17:33:54 +0000805void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806
807extern unsigned long bootmem_init(unsigned long *pages_avail);
808
809void __init srmmu_paging_init(void)
810{
Andres Salomon8d125562010-10-08 14:18:11 -0700811 int i;
812 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700813 char node_str[128];
814 pgd_t *pgd;
815 pmd_t *pmd;
816 pte_t *pte;
817 unsigned long pages_avail;
818
819 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
820
821 if (sparc_cpu_model == sun4d)
822 num_contexts = 65536; /* We know it is Viking */
823 else {
824 /* Find the number of contexts on the srmmu. */
825 cpunode = prom_getchild(prom_root_node);
826 num_contexts = 0;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000827 while (cpunode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700828 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +0000829 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
831 break;
832 }
833 cpunode = prom_getsibling(cpunode);
834 }
835 }
836
Sam Ravnborg605ae962012-07-26 11:02:13 +0000837 if (!num_contexts) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700838 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
839 prom_halt();
840 }
841
842 pages_avail = 0;
843 last_valid_pfn = bootmem_init(&pages_avail);
844
845 srmmu_nocache_calcsize();
846 srmmu_nocache_init();
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000847 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700848 map_kernel();
849
850 /* ctx table has to be physically aligned to its size */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000851 srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700852 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
853
Sam Ravnborg605ae962012-07-26 11:02:13 +0000854 for (i = 0; i < num_contexts; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700855 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
856
857 flush_cache_all();
858 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -0800859#ifdef CONFIG_SMP
860 /* Stop from hanging here... */
David S. Miller5d83d662012-05-13 20:49:31 -0700861 local_ops->tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800862#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800864#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700865 poke_srmmu();
866
Linus Torvalds1da177e2005-04-16 15:20:36 -0700867 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
868 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869
870 srmmu_allocate_ptable_skeleton(
871 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
872 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
873
874 pgd = pgd_offset_k(PKMAP_BASE);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200875 pmd = pmd_offset(pgd, PKMAP_BASE);
876 pte = pte_offset_kernel(pmd, PKMAP_BASE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700877 pkmap_page_table = pte;
878
879 flush_cache_all();
880 flush_tlb_all();
881
882 sparc_context_init(num_contexts);
883
884 kmap_init();
885
886 {
887 unsigned long zones_size[MAX_NR_ZONES];
888 unsigned long zholes_size[MAX_NR_ZONES];
889 unsigned long npages;
890 int znum;
891
892 for (znum = 0; znum < MAX_NR_ZONES; znum++)
893 zones_size[znum] = zholes_size[znum] = 0;
894
895 npages = max_low_pfn - pfn_base;
896
897 zones_size[ZONE_DMA] = npages;
898 zholes_size[ZONE_DMA] = npages - pages_avail;
899
900 npages = highend_pfn - max_low_pfn;
901 zones_size[ZONE_HIGHMEM] = npages;
902 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
903
Johannes Weiner9109fb72008-07-23 21:27:20 -0700904 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 }
906}
907
Sam Ravnborg9701b262012-05-13 10:21:25 +0200908void mmu_info(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700909{
Sam Ravnborg605ae962012-07-26 11:02:13 +0000910 seq_printf(m,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 "MMU type\t: %s\n"
912 "contexts\t: %d\n"
913 "nocache total\t: %ld\n"
914 "nocache used\t: %d\n",
915 srmmu_name,
916 num_contexts,
917 srmmu_nocache_size,
918 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
919}
920
Sam Ravnborgb796c6d2012-05-13 10:30:54 +0200921void destroy_context(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700922{
923
Sam Ravnborg605ae962012-07-26 11:02:13 +0000924 if (mm->context != NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 flush_cache_mm(mm);
926 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
927 flush_tlb_mm(mm);
928 spin_lock(&srmmu_context_spinlock);
929 free_context(mm->context);
930 spin_unlock(&srmmu_context_spinlock);
931 mm->context = NO_CONTEXT;
932 }
933}
934
935/* Init various srmmu chip types. */
936static void __init srmmu_is_bad(void)
937{
938 prom_printf("Could not determine SRMMU chip type.\n");
939 prom_halt();
940}
941
942static void __init init_vac_layout(void)
943{
Andres Salomon8d125562010-10-08 14:18:11 -0700944 phandle nd;
945 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700946 char node_str[128];
947#ifdef CONFIG_SMP
948 int cpu = 0;
949 unsigned long max_size = 0;
950 unsigned long min_line_size = 0x10000000;
951#endif
952
953 nd = prom_getchild(prom_root_node);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000954 while ((nd = prom_getsibling(nd)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700955 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +0000956 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700957 vac_line_size = prom_getint(nd, "cache-line-size");
958 if (vac_line_size == -1) {
Sam Ravnborg605ae962012-07-26 11:02:13 +0000959 prom_printf("can't determine cache-line-size, halting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960 prom_halt();
961 }
962 cache_lines = prom_getint(nd, "cache-nlines");
963 if (cache_lines == -1) {
964 prom_printf("can't determine cache-nlines, halting.\n");
965 prom_halt();
966 }
967
968 vac_cache_size = cache_lines * vac_line_size;
969#ifdef CONFIG_SMP
Sam Ravnborg605ae962012-07-26 11:02:13 +0000970 if (vac_cache_size > max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 max_size = vac_cache_size;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000972 if (vac_line_size < min_line_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -0800974 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +1030976 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977 break;
978#else
979 break;
980#endif
981 }
982 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000983 if (nd == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984 prom_printf("No CPU nodes found, halting.\n");
985 prom_halt();
986 }
987#ifdef CONFIG_SMP
988 vac_cache_size = max_size;
989 vac_line_size = min_line_size;
990#endif
991 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
992 (int)vac_cache_size, (int)vac_line_size);
993}
994
Al Viro409832f2008-11-22 17:33:54 +0000995static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996{
997 volatile unsigned long clear;
998 unsigned long mreg = srmmu_get_mmureg();
999
1000 hyper_flush_unconditional_combined();
1001
1002 mreg &= ~(HYPERSPARC_CWENABLE);
1003 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
1004 mreg |= (HYPERSPARC_CMODE);
1005
1006 srmmu_set_mmureg(mreg);
1007
1008#if 0 /* XXX I think this is bad news... -DaveM */
1009 hyper_clear_all_tags();
1010#endif
1011
1012 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1013 hyper_flush_whole_icache();
1014 clear = srmmu_get_faddr();
1015 clear = srmmu_get_fstatus();
1016}
1017
David S. Miller5d83d662012-05-13 20:49:31 -07001018static const struct sparc32_cachetlb_ops hypersparc_ops = {
1019 .cache_all = hypersparc_flush_cache_all,
1020 .cache_mm = hypersparc_flush_cache_mm,
1021 .cache_page = hypersparc_flush_cache_page,
1022 .cache_range = hypersparc_flush_cache_range,
1023 .tlb_all = hypersparc_flush_tlb_all,
1024 .tlb_mm = hypersparc_flush_tlb_mm,
1025 .tlb_page = hypersparc_flush_tlb_page,
1026 .tlb_range = hypersparc_flush_tlb_range,
1027 .page_to_ram = hypersparc_flush_page_to_ram,
1028 .sig_insns = hypersparc_flush_sig_insns,
1029 .page_for_dma = hypersparc_flush_page_for_dma,
1030};
1031
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032static void __init init_hypersparc(void)
1033{
1034 srmmu_name = "ROSS HyperSparc";
1035 srmmu_modtype = HyperSparc;
1036
1037 init_vac_layout();
1038
1039 is_hypersparc = 1;
David S. Miller5d83d662012-05-13 20:49:31 -07001040 sparc32_cachetlb_ops = &hypersparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041
1042 poke_srmmu = poke_hypersparc;
1043
1044 hypersparc_setup_blockops();
1045}
1046
Al Viro409832f2008-11-22 17:33:54 +00001047static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001048{
1049 unsigned long mreg;
1050
1051 /* Clear any crap from the cache or else... */
1052 swift_flush_cache_all();
1053
1054 /* Enable I & D caches */
1055 mreg = srmmu_get_mmureg();
1056 mreg |= (SWIFT_IE | SWIFT_DE);
1057 /*
1058 * The Swift branch folding logic is completely broken. At
1059 * trap time, if things are just right, if can mistakenly
1060 * think that a trap is coming from kernel mode when in fact
1061 * it is coming from user mode (it mis-executes the branch in
1062 * the trap code). So you see things like crashme completely
1063 * hosing your machine which is completely unacceptable. Turn
1064 * this shit off... nice job Fujitsu.
1065 */
1066 mreg &= ~(SWIFT_BF);
1067 srmmu_set_mmureg(mreg);
1068}
1069
David S. Miller5d83d662012-05-13 20:49:31 -07001070static const struct sparc32_cachetlb_ops swift_ops = {
1071 .cache_all = swift_flush_cache_all,
1072 .cache_mm = swift_flush_cache_mm,
1073 .cache_page = swift_flush_cache_page,
1074 .cache_range = swift_flush_cache_range,
1075 .tlb_all = swift_flush_tlb_all,
1076 .tlb_mm = swift_flush_tlb_mm,
1077 .tlb_page = swift_flush_tlb_page,
1078 .tlb_range = swift_flush_tlb_range,
1079 .page_to_ram = swift_flush_page_to_ram,
1080 .sig_insns = swift_flush_sig_insns,
1081 .page_for_dma = swift_flush_page_for_dma,
1082};
1083
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084#define SWIFT_MASKID_ADDR 0x10003018
1085static void __init init_swift(void)
1086{
1087 unsigned long swift_rev;
1088
1089 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1090 "srl %0, 0x18, %0\n\t" :
1091 "=r" (swift_rev) :
1092 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1093 srmmu_name = "Fujitsu Swift";
Sam Ravnborg605ae962012-07-26 11:02:13 +00001094 switch (swift_rev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001095 case 0x11:
1096 case 0x20:
1097 case 0x23:
1098 case 0x30:
1099 srmmu_modtype = Swift_lots_o_bugs;
1100 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1101 /*
1102 * Gee george, I wonder why Sun is so hush hush about
1103 * this hardware bug... really braindamage stuff going
1104 * on here. However I think we can find a way to avoid
1105 * all of the workaround overhead under Linux. Basically,
1106 * any page fault can cause kernel pages to become user
1107 * accessible (the mmu gets confused and clears some of
1108 * the ACC bits in kernel ptes). Aha, sounds pretty
1109 * horrible eh? But wait, after extensive testing it appears
1110 * that if you use pgd_t level large kernel pte's (like the
1111 * 4MB pages on the Pentium) the bug does not get tripped
1112 * at all. This avoids almost all of the major overhead.
1113 * Welcome to a world where your vendor tells you to,
1114 * "apply this kernel patch" instead of "sorry for the
1115 * broken hardware, send it back and we'll give you
1116 * properly functioning parts"
1117 */
1118 break;
1119 case 0x25:
1120 case 0x31:
1121 srmmu_modtype = Swift_bad_c;
1122 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1123 /*
1124 * You see Sun allude to this hardware bug but never
1125 * admit things directly, they'll say things like,
1126 * "the Swift chip cache problems" or similar.
1127 */
1128 break;
1129 default:
1130 srmmu_modtype = Swift_ok;
1131 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001132 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001133
David S. Miller5d83d662012-05-13 20:49:31 -07001134 sparc32_cachetlb_ops = &swift_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135 flush_page_for_dma_global = 0;
1136
1137 /*
1138 * Are you now convinced that the Swift is one of the
1139 * biggest VLSI abortions of all time? Bravo Fujitsu!
1140 * Fujitsu, the !#?!%$'d up processor people. I bet if
1141 * you examined the microcode of the Swift you'd find
1142 * XXX's all over the place.
1143 */
1144 poke_srmmu = poke_swift;
1145}
1146
1147static void turbosparc_flush_cache_all(void)
1148{
1149 flush_user_windows();
1150 turbosparc_idflash_clear();
1151}
1152
1153static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1154{
1155 FLUSH_BEGIN(mm)
1156 flush_user_windows();
1157 turbosparc_idflash_clear();
1158 FLUSH_END
1159}
1160
1161static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1162{
1163 FLUSH_BEGIN(vma->vm_mm)
1164 flush_user_windows();
1165 turbosparc_idflash_clear();
1166 FLUSH_END
1167}
1168
1169static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1170{
1171 FLUSH_BEGIN(vma->vm_mm)
1172 flush_user_windows();
1173 if (vma->vm_flags & VM_EXEC)
1174 turbosparc_flush_icache();
1175 turbosparc_flush_dcache();
1176 FLUSH_END
1177}
1178
1179/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1180static void turbosparc_flush_page_to_ram(unsigned long page)
1181{
1182#ifdef TURBOSPARC_WRITEBACK
1183 volatile unsigned long clear;
1184
Sam Ravnborg805918f2012-05-25 21:20:19 +00001185 if (srmmu_probe(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001186 turbosparc_flush_page_cache(page);
1187 clear = srmmu_get_fstatus();
1188#endif
1189}
1190
1191static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1192{
1193}
1194
1195static void turbosparc_flush_page_for_dma(unsigned long page)
1196{
1197 turbosparc_flush_dcache();
1198}
1199
1200static void turbosparc_flush_tlb_all(void)
1201{
1202 srmmu_flush_whole_tlb();
1203}
1204
1205static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1206{
1207 FLUSH_BEGIN(mm)
1208 srmmu_flush_whole_tlb();
1209 FLUSH_END
1210}
1211
1212static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1213{
1214 FLUSH_BEGIN(vma->vm_mm)
1215 srmmu_flush_whole_tlb();
1216 FLUSH_END
1217}
1218
1219static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1220{
1221 FLUSH_BEGIN(vma->vm_mm)
1222 srmmu_flush_whole_tlb();
1223 FLUSH_END
1224}
1225
1226
Al Viro409832f2008-11-22 17:33:54 +00001227static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001228{
1229 unsigned long mreg = srmmu_get_mmureg();
1230 unsigned long ccreg;
1231
1232 /* Clear any crap from the cache or else... */
1233 turbosparc_flush_cache_all();
Sam Ravnborg605ae962012-07-26 11:02:13 +00001234 /* Temporarily disable I & D caches */
1235 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001236 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1237 srmmu_set_mmureg(mreg);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001238
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 ccreg = turbosparc_get_ccreg();
1240
1241#ifdef TURBOSPARC_WRITEBACK
1242 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1243 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1244 /* Write-back D-cache, emulate VLSI
1245 * abortion number three, not number one */
1246#else
1247 /* For now let's play safe, optimize later */
1248 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1249 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1250 ccreg &= ~(TURBOSPARC_uS2);
1251 /* Emulate VLSI abortion number three, not number one */
1252#endif
1253
1254 switch (ccreg & 7) {
1255 case 0: /* No SE cache */
1256 case 7: /* Test mode */
1257 break;
1258 default:
1259 ccreg |= (TURBOSPARC_SCENABLE);
1260 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001261 turbosparc_set_ccreg(ccreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001262
1263 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1264 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1265 srmmu_set_mmureg(mreg);
1266}
1267
David S. Miller5d83d662012-05-13 20:49:31 -07001268static const struct sparc32_cachetlb_ops turbosparc_ops = {
1269 .cache_all = turbosparc_flush_cache_all,
1270 .cache_mm = turbosparc_flush_cache_mm,
1271 .cache_page = turbosparc_flush_cache_page,
1272 .cache_range = turbosparc_flush_cache_range,
1273 .tlb_all = turbosparc_flush_tlb_all,
1274 .tlb_mm = turbosparc_flush_tlb_mm,
1275 .tlb_page = turbosparc_flush_tlb_page,
1276 .tlb_range = turbosparc_flush_tlb_range,
1277 .page_to_ram = turbosparc_flush_page_to_ram,
1278 .sig_insns = turbosparc_flush_sig_insns,
1279 .page_for_dma = turbosparc_flush_page_for_dma,
1280};
1281
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282static void __init init_turbosparc(void)
1283{
1284 srmmu_name = "Fujitsu TurboSparc";
1285 srmmu_modtype = TurboSparc;
David S. Miller5d83d662012-05-13 20:49:31 -07001286 sparc32_cachetlb_ops = &turbosparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287 poke_srmmu = poke_turbosparc;
1288}
1289
Al Viro409832f2008-11-22 17:33:54 +00001290static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291{
1292 unsigned long mreg = srmmu_get_mmureg();
1293
1294 tsunami_flush_icache();
1295 tsunami_flush_dcache();
1296 mreg &= ~TSUNAMI_ITD;
1297 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1298 srmmu_set_mmureg(mreg);
1299}
1300
David S. Miller5d83d662012-05-13 20:49:31 -07001301static const struct sparc32_cachetlb_ops tsunami_ops = {
1302 .cache_all = tsunami_flush_cache_all,
1303 .cache_mm = tsunami_flush_cache_mm,
1304 .cache_page = tsunami_flush_cache_page,
1305 .cache_range = tsunami_flush_cache_range,
1306 .tlb_all = tsunami_flush_tlb_all,
1307 .tlb_mm = tsunami_flush_tlb_mm,
1308 .tlb_page = tsunami_flush_tlb_page,
1309 .tlb_range = tsunami_flush_tlb_range,
1310 .page_to_ram = tsunami_flush_page_to_ram,
1311 .sig_insns = tsunami_flush_sig_insns,
1312 .page_for_dma = tsunami_flush_page_for_dma,
1313};
1314
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315static void __init init_tsunami(void)
1316{
1317 /*
1318 * Tsunami's pretty sane, Sun and TI actually got it
1319 * somewhat right this time. Fujitsu should have
1320 * taken some lessons from them.
1321 */
1322
1323 srmmu_name = "TI Tsunami";
1324 srmmu_modtype = Tsunami;
David S. Miller5d83d662012-05-13 20:49:31 -07001325 sparc32_cachetlb_ops = &tsunami_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 poke_srmmu = poke_tsunami;
1327
1328 tsunami_setup_blockops();
1329}
1330
Al Viro409832f2008-11-22 17:33:54 +00001331static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001332{
1333 unsigned long mreg = srmmu_get_mmureg();
1334 static int smp_catch;
1335
David S. Miller5d83d662012-05-13 20:49:31 -07001336 if (viking_mxcc_present) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001337 unsigned long mxcc_control = mxcc_get_creg();
1338
1339 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1340 mxcc_control &= ~(MXCC_CTL_RRC);
1341 mxcc_set_creg(mxcc_control);
1342
1343 /*
1344 * We don't need memory parity checks.
1345 * XXX This is a mess, have to dig out later. ecd.
1346 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1347 */
1348
1349 /* We do cache ptables on MXCC. */
1350 mreg |= VIKING_TCENABLE;
1351 } else {
1352 unsigned long bpreg;
1353
1354 mreg &= ~(VIKING_TCENABLE);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001355 if (smp_catch++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001356 /* Must disable mixed-cmd mode here for other cpu's. */
1357 bpreg = viking_get_bpreg();
1358 bpreg &= ~(VIKING_ACTION_MIX);
1359 viking_set_bpreg(bpreg);
1360
1361 /* Just in case PROM does something funny. */
1362 msi_set_sync();
1363 }
1364 }
1365
1366 mreg |= VIKING_SPENABLE;
1367 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1368 mreg |= VIKING_SBENABLE;
1369 mreg &= ~(VIKING_ACENABLE);
1370 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371}
1372
David S. Miller5d83d662012-05-13 20:49:31 -07001373static struct sparc32_cachetlb_ops viking_ops = {
1374 .cache_all = viking_flush_cache_all,
1375 .cache_mm = viking_flush_cache_mm,
1376 .cache_page = viking_flush_cache_page,
1377 .cache_range = viking_flush_cache_range,
1378 .tlb_all = viking_flush_tlb_all,
1379 .tlb_mm = viking_flush_tlb_mm,
1380 .tlb_page = viking_flush_tlb_page,
1381 .tlb_range = viking_flush_tlb_range,
1382 .page_to_ram = viking_flush_page_to_ram,
1383 .sig_insns = viking_flush_sig_insns,
1384 .page_for_dma = viking_flush_page_for_dma,
1385};
1386
1387#ifdef CONFIG_SMP
1388/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1389 * perform the local TLB flush and all the other cpus will see it.
1390 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1391 * that requires that we add some synchronization to these flushes.
1392 *
1393 * The bug is that the fifo which keeps track of all the pending TLB
1394 * broadcasts in the system is an entry or two too small, so if we
1395 * have too many going at once we'll overflow that fifo and lose a TLB
1396 * flush resulting in corruption.
1397 *
1398 * Our workaround is to take a global spinlock around the TLB flushes,
1399 * which guarentees we won't ever have too many pending. It's a big
1400 * hammer, but a semaphore like system to make sure we only have N TLB
1401 * flushes going at once will require SMP locking anyways so there's
1402 * no real value in trying any harder than this.
1403 */
1404static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1405 .cache_all = viking_flush_cache_all,
1406 .cache_mm = viking_flush_cache_mm,
1407 .cache_page = viking_flush_cache_page,
1408 .cache_range = viking_flush_cache_range,
1409 .tlb_all = sun4dsmp_flush_tlb_all,
1410 .tlb_mm = sun4dsmp_flush_tlb_mm,
1411 .tlb_page = sun4dsmp_flush_tlb_page,
1412 .tlb_range = sun4dsmp_flush_tlb_range,
1413 .page_to_ram = viking_flush_page_to_ram,
1414 .sig_insns = viking_flush_sig_insns,
1415 .page_for_dma = viking_flush_page_for_dma,
1416};
1417#endif
1418
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419static void __init init_viking(void)
1420{
1421 unsigned long mreg = srmmu_get_mmureg();
1422
1423 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001424 if (mreg & VIKING_MMODE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001425 srmmu_name = "TI Viking";
1426 viking_mxcc_present = 0;
1427 msi_set_sync();
1428
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 /*
1430 * We need this to make sure old viking takes no hits
1431 * on it's cache for dma snoops to workaround the
1432 * "load from non-cacheable memory" interrupt bug.
1433 * This is only necessary because of the new way in
1434 * which we use the IOMMU.
1435 */
David S. Miller5d83d662012-05-13 20:49:31 -07001436 viking_ops.page_for_dma = viking_flush_page;
1437#ifdef CONFIG_SMP
1438 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1439#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001440 flush_page_for_dma_global = 0;
1441 } else {
1442 srmmu_name = "TI Viking/MXCC";
1443 viking_mxcc_present = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001444 srmmu_cache_pagetables = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 }
1446
David S. Miller5d83d662012-05-13 20:49:31 -07001447 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1448 &viking_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001449#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -07001450 if (sparc_cpu_model == sun4d)
1451 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1452 &viking_sun4d_smp_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001453#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001454
1455 poke_srmmu = poke_viking;
1456}
1457
1458/* Probe for the srmmu chip version. */
1459static void __init get_srmmu_type(void)
1460{
1461 unsigned long mreg, psr;
1462 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1463
1464 srmmu_modtype = SRMMU_INVAL_MOD;
1465 hwbug_bitmask = 0;
1466
1467 mreg = srmmu_get_mmureg(); psr = get_psr();
1468 mod_typ = (mreg & 0xf0000000) >> 28;
1469 mod_rev = (mreg & 0x0f000000) >> 24;
1470 psr_typ = (psr >> 28) & 0xf;
1471 psr_vers = (psr >> 24) & 0xf;
1472
Konrad Eisele75d9e342009-08-17 00:13:33 +00001473 /* First, check for sparc-leon. */
1474 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001475 init_leon();
1476 return;
1477 }
1478
1479 /* Second, check for HyperSparc or Cypress. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001480 if (mod_typ == 1) {
1481 switch (mod_rev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 case 7:
1483 /* UP or MP Hypersparc */
1484 init_hypersparc();
1485 break;
1486 case 0:
1487 case 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 case 10:
1489 case 11:
1490 case 12:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 case 13:
1492 case 14:
1493 case 15:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001494 default:
David S. Millerc7020eb2012-05-14 22:02:08 -07001495 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1496 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001498 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001499 return;
1500 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001501
1502 /* Now Fujitsu TurboSparc. It might happen that it is
Linus Torvalds1da177e2005-04-16 15:20:36 -07001503 * in Swift emulation mode, so we will check later...
1504 */
1505 if (psr_typ == 0 && psr_vers == 5) {
1506 init_turbosparc();
1507 return;
1508 }
1509
1510 /* Next check for Fujitsu Swift. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001511 if (psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07001512 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001513 char node_str[128];
1514
1515 /* Look if it is not a TurboSparc emulating Swift... */
1516 cpunode = prom_getchild(prom_root_node);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001517 while ((cpunode = prom_getsibling(cpunode)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001518 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +00001519 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001520 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1521 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1522 init_turbosparc();
1523 return;
1524 }
1525 break;
1526 }
1527 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001528
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 init_swift();
1530 return;
1531 }
1532
1533 /* Now the Viking family of srmmu. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001534 if (psr_typ == 4 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001535 ((psr_vers == 0) ||
1536 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1537 init_viking();
1538 return;
1539 }
1540
1541 /* Finally the Tsunami. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001542 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543 init_tsunami();
1544 return;
1545 }
1546
1547 /* Oh well */
1548 srmmu_is_bad();
1549}
1550
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551#ifdef CONFIG_SMP
1552/* Local cross-calls. */
1553static void smp_flush_page_for_dma(unsigned long page)
1554{
David S. Miller5d83d662012-05-13 20:49:31 -07001555 xc1((smpfunc_t) local_ops->page_for_dma, page);
1556 local_ops->page_for_dma(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001557}
1558
David S. Miller5d83d662012-05-13 20:49:31 -07001559static void smp_flush_cache_all(void)
1560{
1561 xc0((smpfunc_t) local_ops->cache_all);
1562 local_ops->cache_all();
1563}
1564
1565static void smp_flush_tlb_all(void)
1566{
1567 xc0((smpfunc_t) local_ops->tlb_all);
1568 local_ops->tlb_all();
1569}
1570
1571static void smp_flush_cache_mm(struct mm_struct *mm)
1572{
1573 if (mm->context != NO_CONTEXT) {
1574 cpumask_t cpu_mask;
1575 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1576 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1577 if (!cpumask_empty(&cpu_mask))
1578 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1579 local_ops->cache_mm(mm);
1580 }
1581}
1582
1583static void smp_flush_tlb_mm(struct mm_struct *mm)
1584{
1585 if (mm->context != NO_CONTEXT) {
1586 cpumask_t cpu_mask;
1587 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1588 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1589 if (!cpumask_empty(&cpu_mask)) {
1590 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1591 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1592 cpumask_copy(mm_cpumask(mm),
1593 cpumask_of(smp_processor_id()));
1594 }
1595 local_ops->tlb_mm(mm);
1596 }
1597}
1598
1599static void smp_flush_cache_range(struct vm_area_struct *vma,
1600 unsigned long start,
1601 unsigned long end)
1602{
1603 struct mm_struct *mm = vma->vm_mm;
1604
1605 if (mm->context != NO_CONTEXT) {
1606 cpumask_t cpu_mask;
1607 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1608 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1609 if (!cpumask_empty(&cpu_mask))
1610 xc3((smpfunc_t) local_ops->cache_range,
1611 (unsigned long) vma, start, end);
1612 local_ops->cache_range(vma, start, end);
1613 }
1614}
1615
1616static void smp_flush_tlb_range(struct vm_area_struct *vma,
1617 unsigned long start,
1618 unsigned long end)
1619{
1620 struct mm_struct *mm = vma->vm_mm;
1621
1622 if (mm->context != NO_CONTEXT) {
1623 cpumask_t cpu_mask;
1624 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1625 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1626 if (!cpumask_empty(&cpu_mask))
1627 xc3((smpfunc_t) local_ops->tlb_range,
1628 (unsigned long) vma, start, end);
1629 local_ops->tlb_range(vma, start, end);
1630 }
1631}
1632
1633static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1634{
1635 struct mm_struct *mm = vma->vm_mm;
1636
1637 if (mm->context != NO_CONTEXT) {
1638 cpumask_t cpu_mask;
1639 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1640 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1641 if (!cpumask_empty(&cpu_mask))
1642 xc2((smpfunc_t) local_ops->cache_page,
1643 (unsigned long) vma, page);
1644 local_ops->cache_page(vma, page);
1645 }
1646}
1647
1648static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1649{
1650 struct mm_struct *mm = vma->vm_mm;
1651
1652 if (mm->context != NO_CONTEXT) {
1653 cpumask_t cpu_mask;
1654 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1655 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1656 if (!cpumask_empty(&cpu_mask))
1657 xc2((smpfunc_t) local_ops->tlb_page,
1658 (unsigned long) vma, page);
1659 local_ops->tlb_page(vma, page);
1660 }
1661}
1662
1663static void smp_flush_page_to_ram(unsigned long page)
1664{
1665 /* Current theory is that those who call this are the one's
1666 * who have just dirtied their cache with the pages contents
1667 * in kernel space, therefore we only run this on local cpu.
1668 *
1669 * XXX This experiment failed, research further... -DaveM
1670 */
1671#if 1
1672 xc1((smpfunc_t) local_ops->page_to_ram, page);
1673#endif
1674 local_ops->page_to_ram(page);
1675}
1676
1677static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1678{
1679 cpumask_t cpu_mask;
1680 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1681 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1682 if (!cpumask_empty(&cpu_mask))
1683 xc2((smpfunc_t) local_ops->sig_insns,
1684 (unsigned long) mm, insn_addr);
1685 local_ops->sig_insns(mm, insn_addr);
1686}
1687
1688static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1689 .cache_all = smp_flush_cache_all,
1690 .cache_mm = smp_flush_cache_mm,
1691 .cache_page = smp_flush_cache_page,
1692 .cache_range = smp_flush_cache_range,
1693 .tlb_all = smp_flush_tlb_all,
1694 .tlb_mm = smp_flush_tlb_mm,
1695 .tlb_page = smp_flush_tlb_page,
1696 .tlb_range = smp_flush_tlb_range,
1697 .page_to_ram = smp_flush_page_to_ram,
1698 .sig_insns = smp_flush_sig_insns,
1699 .page_for_dma = smp_flush_page_for_dma,
1700};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001701#endif
1702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703/* Load up routines and constants for sun4m and sun4d mmu */
Sam Ravnborga3c5c662012-05-12 20:35:52 +02001704void __init load_mmu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705{
1706 extern void ld_mmu_iommu(void);
1707 extern void ld_mmu_iounit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001708
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001710 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711
1712#ifdef CONFIG_SMP
1713 /* El switcheroo... */
David S. Miller5d83d662012-05-13 20:49:31 -07001714 local_ops = sparc32_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715
David S. Miller5d83d662012-05-13 20:49:31 -07001716 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1717 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1718 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1719 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1720 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001721 }
David S. Miller64273d02008-11-26 01:00:58 -08001722
1723 if (poke_srmmu == poke_viking) {
1724 /* Avoid unnecessary cross calls. */
David S. Miller5d83d662012-05-13 20:49:31 -07001725 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1726 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1727 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1728 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1729
1730 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1731 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1732 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
David S. Miller64273d02008-11-26 01:00:58 -08001733 }
David S. Miller5d83d662012-05-13 20:49:31 -07001734
1735 /* It really is const after this point. */
1736 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1737 &smp_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738#endif
1739
1740 if (sparc_cpu_model == sun4d)
1741 ld_mmu_iounit();
1742 else
1743 ld_mmu_iommu();
1744#ifdef CONFIG_SMP
1745 if (sparc_cpu_model == sun4d)
1746 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00001747 else if (sparc_cpu_model == sparc_leon)
1748 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001749 else
1750 sun4m_init_smp();
1751#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001752}