blob: 146742bee39a6255351bf6809f7796f36cbe4b2d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * srmmu.c: SRMMU specific routines for memory management.
3 *
4 * Copyright (C) 1995 David S. Miller (davem@caip.rutgers.edu)
5 * Copyright (C) 1995,2002 Pete Zaitcev (zaitcev@yahoo.com)
6 * Copyright (C) 1996 Eddie C. Dost (ecd@skynet.be)
7 * Copyright (C) 1997,1998 Jakub Jelinek (jj@sunsite.mff.cuni.cz)
8 * Copyright (C) 1999,2000 Anton Blanchard (anton@samba.org)
9 */
10
Sam Ravnborg4a049b02012-07-26 11:02:12 +000011#include <linux/seq_file.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070012#include <linux/spinlock.h>
13#include <linux/bootmem.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000014#include <linux/pagemap.h>
15#include <linux/vmalloc.h>
Christoph Hellwig1eeb66a2007-05-08 00:27:03 -070016#include <linux/kdebug.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000017#include <linux/kernel.h>
18#include <linux/init.h>
Robert P. J. Day949e8272009-04-24 03:58:24 +000019#include <linux/log2.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090020#include <linux/gfp.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000021#include <linux/fs.h>
22#include <linux/mm.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070023
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <asm/cacheflush.h>
26#include <asm/tlbflush.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000027#include <asm/io-unit.h>
28#include <asm/pgalloc.h>
29#include <asm/pgtable.h>
30#include <asm/bitext.h>
31#include <asm/vaddrs.h>
32#include <asm/cache.h>
33#include <asm/traps.h>
34#include <asm/oplib.h>
35#include <asm/mbus.h>
36#include <asm/page.h>
37#include <asm/asi.h>
38#include <asm/msi.h>
39#include <asm/smp.h>
40#include <asm/io.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070041
42/* Now the cpu specific definitions. */
Sam Ravnborg4a049b02012-07-26 11:02:12 +000043#include <asm/turbosparc.h>
44#include <asm/tsunami.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070045#include <asm/viking.h>
Sam Ravnborg4a049b02012-07-26 11:02:12 +000046#include <asm/swift.h>
47#include <asm/leon.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070048#include <asm/mxcc.h>
49#include <asm/ross.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
Sam Ravnborgaccf0322012-05-19 20:02:49 +000051#include "srmmu.h"
52
Linus Torvalds1da177e2005-04-16 15:20:36 -070053enum mbus_module srmmu_modtype;
Adrian Bunk50215d62008-06-05 11:41:51 -070054static unsigned int hwbug_bitmask;
Linus Torvalds1da177e2005-04-16 15:20:36 -070055int vac_cache_size;
56int vac_line_size;
57
Sam Ravnborga3c5c662012-05-12 20:35:52 +020058struct ctx_list *ctx_list_pool;
59struct ctx_list ctx_free;
60struct ctx_list ctx_used;
61
Linus Torvalds1da177e2005-04-16 15:20:36 -070062extern struct resource sparc_iomap;
63
64extern unsigned long last_valid_pfn;
65
Adrian Bunk50215d62008-06-05 11:41:51 -070066static pgd_t *srmmu_swapper_pg_dir;
Linus Torvalds1da177e2005-04-16 15:20:36 -070067
David S. Miller5d83d662012-05-13 20:49:31 -070068const struct sparc32_cachetlb_ops *sparc32_cachetlb_ops;
69
Linus Torvalds1da177e2005-04-16 15:20:36 -070070#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -070071const struct sparc32_cachetlb_ops *local_ops;
72
Linus Torvalds1da177e2005-04-16 15:20:36 -070073#define FLUSH_BEGIN(mm)
74#define FLUSH_END
75#else
David S. Miller5d83d662012-05-13 20:49:31 -070076#define FLUSH_BEGIN(mm) if ((mm)->context != NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -070077#define FLUSH_END }
78#endif
79
Linus Torvalds1da177e2005-04-16 15:20:36 -070080int flush_page_for_dma_global = 1;
81
Linus Torvalds1da177e2005-04-16 15:20:36 -070082char *srmmu_name;
83
84ctxd_t *srmmu_ctx_table_phys;
Adrian Bunk50215d62008-06-05 11:41:51 -070085static ctxd_t *srmmu_context_table;
Linus Torvalds1da177e2005-04-16 15:20:36 -070086
87int viking_mxcc_present;
88static DEFINE_SPINLOCK(srmmu_context_spinlock);
89
Adrian Bunk50215d62008-06-05 11:41:51 -070090static int is_hypersparc;
Linus Torvalds1da177e2005-04-16 15:20:36 -070091
Adrian Bunk50215d62008-06-05 11:41:51 -070092static int srmmu_cache_pagetables;
Linus Torvalds1da177e2005-04-16 15:20:36 -070093
94/* these will be initialized in srmmu_nocache_calcsize() */
Adrian Bunk50215d62008-06-05 11:41:51 -070095static unsigned long srmmu_nocache_size;
96static unsigned long srmmu_nocache_end;
Linus Torvalds1da177e2005-04-16 15:20:36 -070097
98/* 1 bit <=> 256 bytes of nocache <=> 64 PTEs */
99#define SRMMU_NOCACHE_BITMAP_SHIFT (PAGE_SHIFT - 4)
100
101/* The context table is a nocache user with the biggest alignment needs. */
102#define SRMMU_NOCACHE_ALIGN_MAX (sizeof(ctxd_t)*SRMMU_MAX_CONTEXTS)
103
104void *srmmu_nocache_pool;
105void *srmmu_nocache_bitmap;
106static struct bit_map srmmu_nocache_map;
107
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108static inline int srmmu_pmd_none(pmd_t pmd)
109{ return !(pmd_val(pmd) & 0xFFFFFFF); }
110
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111/* XXX should we hyper_flush_whole_icache here - Anton */
112static inline void srmmu_ctxd_set(ctxd_t *ctxp, pgd_t *pgdp)
David S. Miller62875cf2012-05-12 13:39:23 -0700113{ set_pte((pte_t *)ctxp, (SRMMU_ET_PTD | (__nocache_pa((unsigned long) pgdp) >> 4))); }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700114
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200115void pmd_set(pmd_t *pmdp, pte_t *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116{
117 unsigned long ptp; /* Physical address, shifted right by 4 */
118 int i;
119
120 ptp = __nocache_pa((unsigned long) ptep) >> 4;
121 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700122 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700123 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
124 }
125}
126
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200127void pmd_populate(struct mm_struct *mm, pmd_t *pmdp, struct page *ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700128{
129 unsigned long ptp; /* Physical address, shifted right by 4 */
130 int i;
131
132 ptp = page_to_pfn(ptep) << (PAGE_SHIFT-4); /* watch for overflow */
133 for (i = 0; i < PTRS_PER_PTE/SRMMU_REAL_PTRS_PER_PTE; i++) {
David S. Miller62875cf2012-05-12 13:39:23 -0700134 set_pte((pte_t *)&pmdp->pmdv[i], SRMMU_ET_PTD | ptp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700135 ptp += (SRMMU_REAL_PTRS_PER_PTE*sizeof(pte_t) >> 4);
136 }
137}
138
Sam Ravnborg605ae962012-07-26 11:02:13 +0000139/* Find an entry in the third-level page table.. */
140pte_t *pte_offset_kernel(pmd_t *dir, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141{
142 void *pte;
143
144 pte = __nocache_va((dir->pmdv[0] & SRMMU_PTD_PMASK) << 4);
145 return (pte_t *) pte +
146 ((address >> PAGE_SHIFT) & (PTRS_PER_PTE - 1));
147}
148
Linus Torvalds1da177e2005-04-16 15:20:36 -0700149/*
150 * size: bytes to allocate in the nocache area.
151 * align: bytes, number to align at.
152 * Returns the virtual address of the allocated area.
153 */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000154static void *__srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155{
156 int offset;
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000157 unsigned long addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158
159 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000160 printk(KERN_ERR "Size 0x%x too small for nocache request\n",
161 size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700162 size = SRMMU_NOCACHE_BITMAP_SHIFT;
163 }
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000164 if (size & (SRMMU_NOCACHE_BITMAP_SHIFT - 1)) {
165 printk(KERN_ERR "Size 0x%x unaligned int nocache request\n",
166 size);
167 size += SRMMU_NOCACHE_BITMAP_SHIFT - 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 }
169 BUG_ON(align > SRMMU_NOCACHE_ALIGN_MAX);
170
171 offset = bit_map_string_get(&srmmu_nocache_map,
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000172 size >> SRMMU_NOCACHE_BITMAP_SHIFT,
173 align >> SRMMU_NOCACHE_BITMAP_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700174 if (offset == -1) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000175 printk(KERN_ERR "srmmu: out of nocache %d: %d/%d\n",
176 size, (int) srmmu_nocache_size,
177 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700178 return 0;
179 }
180
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000181 addr = SRMMU_NOCACHE_VADDR + (offset << SRMMU_NOCACHE_BITMAP_SHIFT);
182 return (void *)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183}
184
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000185void *srmmu_get_nocache(int size, int align)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000187 void *tmp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188
189 tmp = __srmmu_get_nocache(size, align);
190
191 if (tmp)
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000192 memset(tmp, 0, size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193
194 return tmp;
195}
196
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000197void srmmu_free_nocache(void *addr, int size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700198{
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000199 unsigned long vaddr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200 int offset;
201
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000202 vaddr = (unsigned long)addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700203 if (vaddr < SRMMU_NOCACHE_VADDR) {
204 printk("Vaddr %lx is smaller than nocache base 0x%lx\n",
205 vaddr, (unsigned long)SRMMU_NOCACHE_VADDR);
206 BUG();
207 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000208 if (vaddr + size > srmmu_nocache_end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700209 printk("Vaddr %lx is bigger than nocache end 0x%lx\n",
210 vaddr, srmmu_nocache_end);
211 BUG();
212 }
Robert P. J. Day949e8272009-04-24 03:58:24 +0000213 if (!is_power_of_2(size)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 printk("Size 0x%x is not a power of 2\n", size);
215 BUG();
216 }
217 if (size < SRMMU_NOCACHE_BITMAP_SHIFT) {
218 printk("Size 0x%x is too small\n", size);
219 BUG();
220 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000221 if (vaddr & (size - 1)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222 printk("Vaddr %lx is not aligned to size 0x%x\n", vaddr, size);
223 BUG();
224 }
225
226 offset = (vaddr - SRMMU_NOCACHE_VADDR) >> SRMMU_NOCACHE_BITMAP_SHIFT;
227 size = size >> SRMMU_NOCACHE_BITMAP_SHIFT;
228
229 bit_map_clear(&srmmu_nocache_map, offset, size);
230}
231
Adrian Bunk50215d62008-06-05 11:41:51 -0700232static void srmmu_early_allocate_ptable_skeleton(unsigned long start,
233 unsigned long end);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234
235extern unsigned long probe_memory(void); /* in fault.c */
236
237/*
238 * Reserve nocache dynamically proportionally to the amount of
239 * system RAM. -- Tomas Szepe <szepe@pinerecords.com>, June 2002
240 */
Sam Ravnborg32442462012-07-26 11:02:11 +0000241static void __init srmmu_nocache_calcsize(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242{
243 unsigned long sysmemavail = probe_memory() / 1024;
244 int srmmu_nocache_npages;
245
246 srmmu_nocache_npages =
247 sysmemavail / SRMMU_NOCACHE_ALCRATIO / 1024 * 256;
248
249 /* P3 XXX The 4x overuse: corroborated by /proc/meminfo. */
250 // if (srmmu_nocache_npages < 256) srmmu_nocache_npages = 256;
251 if (srmmu_nocache_npages < SRMMU_MIN_NOCACHE_PAGES)
252 srmmu_nocache_npages = SRMMU_MIN_NOCACHE_PAGES;
253
254 /* anything above 1280 blows up */
255 if (srmmu_nocache_npages > SRMMU_MAX_NOCACHE_PAGES)
256 srmmu_nocache_npages = SRMMU_MAX_NOCACHE_PAGES;
257
258 srmmu_nocache_size = srmmu_nocache_npages * PAGE_SIZE;
259 srmmu_nocache_end = SRMMU_NOCACHE_VADDR + srmmu_nocache_size;
260}
261
Adrian Bunk50215d62008-06-05 11:41:51 -0700262static void __init srmmu_nocache_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700263{
264 unsigned int bitmap_bits;
265 pgd_t *pgd;
266 pmd_t *pmd;
267 pte_t *pte;
268 unsigned long paddr, vaddr;
269 unsigned long pteval;
270
271 bitmap_bits = srmmu_nocache_size >> SRMMU_NOCACHE_BITMAP_SHIFT;
272
273 srmmu_nocache_pool = __alloc_bootmem(srmmu_nocache_size,
274 SRMMU_NOCACHE_ALIGN_MAX, 0UL);
275 memset(srmmu_nocache_pool, 0, srmmu_nocache_size);
276
277 srmmu_nocache_bitmap = __alloc_bootmem(bitmap_bits >> 3, SMP_CACHE_BYTES, 0UL);
278 bit_map_init(&srmmu_nocache_map, srmmu_nocache_bitmap, bitmap_bits);
279
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000280 srmmu_swapper_pg_dir = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 memset(__nocache_fix(srmmu_swapper_pg_dir), 0, SRMMU_PGD_TABLE_SIZE);
282 init_mm.pgd = srmmu_swapper_pg_dir;
283
284 srmmu_early_allocate_ptable_skeleton(SRMMU_NOCACHE_VADDR, srmmu_nocache_end);
285
286 paddr = __pa((unsigned long)srmmu_nocache_pool);
287 vaddr = SRMMU_NOCACHE_VADDR;
288
289 while (vaddr < srmmu_nocache_end) {
290 pgd = pgd_offset_k(vaddr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200291 pmd = pmd_offset(__nocache_fix(pgd), vaddr);
292 pte = pte_offset_kernel(__nocache_fix(pmd), vaddr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293
294 pteval = ((paddr >> 4) | SRMMU_ET_PTE | SRMMU_PRIV);
295
296 if (srmmu_cache_pagetables)
297 pteval |= SRMMU_CACHE;
298
David S. Miller62875cf2012-05-12 13:39:23 -0700299 set_pte(__nocache_fix(pte), __pte(pteval));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700300
301 vaddr += PAGE_SIZE;
302 paddr += PAGE_SIZE;
303 }
304
305 flush_cache_all();
306 flush_tlb_all();
307}
308
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200309pgd_t *get_pgd_fast(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700310{
311 pgd_t *pgd = NULL;
312
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000313 pgd = __srmmu_get_nocache(SRMMU_PGD_TABLE_SIZE, SRMMU_PGD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 if (pgd) {
315 pgd_t *init = pgd_offset_k(0);
316 memset(pgd, 0, USER_PTRS_PER_PGD * sizeof(pgd_t));
317 memcpy(pgd + USER_PTRS_PER_PGD, init + USER_PTRS_PER_PGD,
318 (PTRS_PER_PGD - USER_PTRS_PER_PGD) * sizeof(pgd_t));
319 }
320
321 return pgd;
322}
323
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324/*
325 * Hardware needs alignment to 256 only, but we align to whole page size
326 * to reduce fragmentation problems due to the buddy principle.
327 * XXX Provide actual fragmentation statistics in /proc.
328 *
329 * Alignments up to the page size are the same for physical and virtual
330 * addresses of the nocache area.
331 */
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200332pgtable_t pte_alloc_one(struct mm_struct *mm, unsigned long address)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700333{
334 unsigned long pte;
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800335 struct page *page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200337 if ((pte = (unsigned long)pte_alloc_one_kernel(mm, address)) == 0)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 return NULL;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000339 page = pfn_to_page(__nocache_pa(pte) >> PAGE_SHIFT);
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800340 pgtable_page_ctor(page);
341 return page;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342}
343
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200344void pte_free(struct mm_struct *mm, pgtable_t pte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345{
346 unsigned long p;
347
Martin Schwidefsky2f569af2008-02-08 04:22:04 -0800348 pgtable_page_dtor(pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 p = (unsigned long)page_address(pte); /* Cached address (for test) */
350 if (p == 0)
351 BUG();
352 p = page_to_pfn(pte) << PAGE_SHIFT; /* Physical address */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000353
354 /* free non cached virtual address*/
355 srmmu_free_nocache(__nocache_va(p), PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356}
357
358/*
359 */
360static inline void alloc_context(struct mm_struct *old_mm, struct mm_struct *mm)
361{
362 struct ctx_list *ctxp;
363
364 ctxp = ctx_free.next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000365 if (ctxp != &ctx_free) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366 remove_from_ctx_list(ctxp);
367 add_to_used_ctxlist(ctxp);
368 mm->context = ctxp->ctx_number;
369 ctxp->ctx_mm = mm;
370 return;
371 }
372 ctxp = ctx_used.next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000373 if (ctxp->ctx_mm == old_mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 ctxp = ctxp->next;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000375 if (ctxp == &ctx_used)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376 panic("out of mmu contexts");
377 flush_cache_mm(ctxp->ctx_mm);
378 flush_tlb_mm(ctxp->ctx_mm);
379 remove_from_ctx_list(ctxp);
380 add_to_used_ctxlist(ctxp);
381 ctxp->ctx_mm->context = NO_CONTEXT;
382 ctxp->ctx_mm = mm;
383 mm->context = ctxp->ctx_number;
384}
385
386static inline void free_context(int context)
387{
388 struct ctx_list *ctx_old;
389
390 ctx_old = ctx_list_pool + context;
391 remove_from_ctx_list(ctx_old);
392 add_to_free_ctxlist(ctx_old);
393}
394
395
Sam Ravnborg34d4acc2012-05-12 08:04:11 +0000396void switch_mm(struct mm_struct *old_mm, struct mm_struct *mm,
397 struct task_struct *tsk)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398{
Sam Ravnborg605ae962012-07-26 11:02:13 +0000399 if (mm->context == NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700400 spin_lock(&srmmu_context_spinlock);
401 alloc_context(old_mm, mm);
402 spin_unlock(&srmmu_context_spinlock);
403 srmmu_ctxd_set(&srmmu_context_table[mm->context], mm->pgd);
404 }
405
Konrad Eisele75d9e342009-08-17 00:13:33 +0000406 if (sparc_cpu_model == sparc_leon)
407 leon_switch_mm();
408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 if (is_hypersparc)
410 hyper_flush_whole_icache();
411
412 srmmu_set_context(mm->context);
413}
414
415/* Low level IO area allocation on the SRMMU. */
416static inline void srmmu_mapioaddr(unsigned long physaddr,
Sam Ravnborg605ae962012-07-26 11:02:13 +0000417 unsigned long virt_addr, int bus_type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700418{
419 pgd_t *pgdp;
420 pmd_t *pmdp;
421 pte_t *ptep;
422 unsigned long tmp;
423
424 physaddr &= PAGE_MASK;
425 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200426 pmdp = pmd_offset(pgdp, virt_addr);
427 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 tmp = (physaddr >> 4) | SRMMU_ET_PTE;
429
Sam Ravnborg605ae962012-07-26 11:02:13 +0000430 /* I need to test whether this is consistent over all
Linus Torvalds1da177e2005-04-16 15:20:36 -0700431 * sun4m's. The bus_type represents the upper 4 bits of
432 * 36-bit physical address on the I/O space lines...
433 */
434 tmp |= (bus_type << 28);
435 tmp |= SRMMU_PRIV;
436 __flush_page_to_ram(virt_addr);
David S. Miller62875cf2012-05-12 13:39:23 -0700437 set_pte(ptep, __pte(tmp));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700438}
439
Sam Ravnborg9701b262012-05-13 10:21:25 +0200440void srmmu_mapiorange(unsigned int bus, unsigned long xpa,
441 unsigned long xva, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700442{
443 while (len != 0) {
444 len -= PAGE_SIZE;
445 srmmu_mapioaddr(xpa, xva, bus);
446 xva += PAGE_SIZE;
447 xpa += PAGE_SIZE;
448 }
449 flush_tlb_all();
450}
451
452static inline void srmmu_unmapioaddr(unsigned long virt_addr)
453{
454 pgd_t *pgdp;
455 pmd_t *pmdp;
456 pte_t *ptep;
457
458 pgdp = pgd_offset_k(virt_addr);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200459 pmdp = pmd_offset(pgdp, virt_addr);
460 ptep = pte_offset_kernel(pmdp, virt_addr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700461
462 /* No need to flush uncacheable page. */
David S. Millera46d6052012-05-12 12:26:47 -0700463 __pte_clear(ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700464}
465
Sam Ravnborg9701b262012-05-13 10:21:25 +0200466void srmmu_unmapiorange(unsigned long virt_addr, unsigned int len)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467{
468 while (len != 0) {
469 len -= PAGE_SIZE;
470 srmmu_unmapioaddr(virt_addr);
471 virt_addr += PAGE_SIZE;
472 }
473 flush_tlb_all();
474}
475
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476/* tsunami.S */
477extern void tsunami_flush_cache_all(void);
478extern void tsunami_flush_cache_mm(struct mm_struct *mm);
479extern void tsunami_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
480extern void tsunami_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
481extern void tsunami_flush_page_to_ram(unsigned long page);
482extern void tsunami_flush_page_for_dma(unsigned long page);
483extern void tsunami_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
484extern void tsunami_flush_tlb_all(void);
485extern void tsunami_flush_tlb_mm(struct mm_struct *mm);
486extern void tsunami_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
487extern void tsunami_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
488extern void tsunami_setup_blockops(void);
489
Linus Torvalds1da177e2005-04-16 15:20:36 -0700490/* swift.S */
491extern void swift_flush_cache_all(void);
492extern void swift_flush_cache_mm(struct mm_struct *mm);
493extern void swift_flush_cache_range(struct vm_area_struct *vma,
494 unsigned long start, unsigned long end);
495extern void swift_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
496extern void swift_flush_page_to_ram(unsigned long page);
497extern void swift_flush_page_for_dma(unsigned long page);
498extern void swift_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
499extern void swift_flush_tlb_all(void);
500extern void swift_flush_tlb_mm(struct mm_struct *mm);
501extern void swift_flush_tlb_range(struct vm_area_struct *vma,
502 unsigned long start, unsigned long end);
503extern void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
504
505#if 0 /* P3: deadwood to debug precise flushes on Swift. */
506void swift_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
507{
508 int cctx, ctx1;
509
510 page &= PAGE_MASK;
511 if ((ctx1 = vma->vm_mm->context) != -1) {
512 cctx = srmmu_get_context();
513/* Is context # ever different from current context? P3 */
514 if (cctx != ctx1) {
515 printk("flush ctx %02x curr %02x\n", ctx1, cctx);
516 srmmu_set_context(ctx1);
517 swift_flush_page(page);
518 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
519 "r" (page), "i" (ASI_M_FLUSH_PROBE));
520 srmmu_set_context(cctx);
521 } else {
522 /* Rm. prot. bits from virt. c. */
523 /* swift_flush_cache_all(); */
524 /* swift_flush_cache_page(vma, page); */
525 swift_flush_page(page);
526
527 __asm__ __volatile__("sta %%g0, [%0] %1\n\t" : :
528 "r" (page), "i" (ASI_M_FLUSH_PROBE));
529 /* same as above: srmmu_flush_tlb_page() */
530 }
531 }
532}
533#endif
534
535/*
536 * The following are all MBUS based SRMMU modules, and therefore could
537 * be found in a multiprocessor configuration. On the whole, these
538 * chips seems to be much more touchy about DVMA and page tables
539 * with respect to cache coherency.
540 */
541
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542/* viking.S */
543extern void viking_flush_cache_all(void);
544extern void viking_flush_cache_mm(struct mm_struct *mm);
545extern void viking_flush_cache_range(struct vm_area_struct *vma, unsigned long start,
546 unsigned long end);
547extern void viking_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
548extern void viking_flush_page_to_ram(unsigned long page);
549extern void viking_flush_page_for_dma(unsigned long page);
550extern void viking_flush_sig_insns(struct mm_struct *mm, unsigned long addr);
551extern void viking_flush_page(unsigned long page);
552extern void viking_mxcc_flush_page(unsigned long page);
553extern void viking_flush_tlb_all(void);
554extern void viking_flush_tlb_mm(struct mm_struct *mm);
555extern void viking_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
556 unsigned long end);
557extern void viking_flush_tlb_page(struct vm_area_struct *vma,
558 unsigned long page);
559extern void sun4dsmp_flush_tlb_all(void);
560extern void sun4dsmp_flush_tlb_mm(struct mm_struct *mm);
561extern void sun4dsmp_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
562 unsigned long end);
563extern void sun4dsmp_flush_tlb_page(struct vm_area_struct *vma,
564 unsigned long page);
565
566/* hypersparc.S */
567extern void hypersparc_flush_cache_all(void);
568extern void hypersparc_flush_cache_mm(struct mm_struct *mm);
569extern void hypersparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
570extern void hypersparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page);
571extern void hypersparc_flush_page_to_ram(unsigned long page);
572extern void hypersparc_flush_page_for_dma(unsigned long page);
573extern void hypersparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr);
574extern void hypersparc_flush_tlb_all(void);
575extern void hypersparc_flush_tlb_mm(struct mm_struct *mm);
576extern void hypersparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end);
577extern void hypersparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page);
578extern void hypersparc_setup_blockops(void);
579
580/*
581 * NOTE: All of this startup code assumes the low 16mb (approx.) of
582 * kernel mappings are done with one single contiguous chunk of
583 * ram. On small ram machines (classics mainly) we only get
584 * around 8mb mapped for us.
585 */
586
Adrian Bunk50215d62008-06-05 11:41:51 -0700587static void __init early_pgtable_allocfail(char *type)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700588{
589 prom_printf("inherit_prom_mappings: Cannot alloc kernel %s.\n", type);
590 prom_halt();
591}
592
Adrian Bunk50215d62008-06-05 11:41:51 -0700593static void __init srmmu_early_allocate_ptable_skeleton(unsigned long start,
594 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700595{
596 pgd_t *pgdp;
597 pmd_t *pmdp;
598 pte_t *ptep;
599
Sam Ravnborg605ae962012-07-26 11:02:13 +0000600 while (start < end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700602 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000603 pmdp = __srmmu_get_nocache(
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
605 if (pmdp == NULL)
606 early_pgtable_allocfail("pmd");
607 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200608 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700609 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200610 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000611 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000612 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700613 if (ptep == NULL)
614 early_pgtable_allocfail("pte");
615 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200616 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 }
618 if (start > (0xffffffffUL - PMD_SIZE))
619 break;
620 start = (start + PMD_SIZE) & PMD_MASK;
621 }
622}
623
Adrian Bunk50215d62008-06-05 11:41:51 -0700624static void __init srmmu_allocate_ptable_skeleton(unsigned long start,
625 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700626{
627 pgd_t *pgdp;
628 pmd_t *pmdp;
629 pte_t *ptep;
630
Sam Ravnborg605ae962012-07-26 11:02:13 +0000631 while (start < end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 pgdp = pgd_offset_k(start);
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700633 if (pgd_none(*pgdp)) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000634 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700635 if (pmdp == NULL)
636 early_pgtable_allocfail("pmd");
637 memset(pmdp, 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200638 pgd_set(pgdp, pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700639 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200640 pmdp = pmd_offset(pgdp, start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000641 if (srmmu_pmd_none(*pmdp)) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000642 ptep = __srmmu_get_nocache(PTE_SIZE,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700643 PTE_SIZE);
644 if (ptep == NULL)
645 early_pgtable_allocfail("pte");
646 memset(ptep, 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200647 pmd_set(pmdp, ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700648 }
649 if (start > (0xffffffffUL - PMD_SIZE))
650 break;
651 start = (start + PMD_SIZE) & PMD_MASK;
652 }
653}
654
Sam Ravnborg805918f2012-05-25 21:20:19 +0000655/* These flush types are not available on all chips... */
656static inline unsigned long srmmu_probe(unsigned long vaddr)
657{
658 unsigned long retval;
659
660 if (sparc_cpu_model != sparc_leon) {
661
662 vaddr &= PAGE_MASK;
663 __asm__ __volatile__("lda [%1] %2, %0\n\t" :
664 "=r" (retval) :
665 "r" (vaddr | 0x400), "i" (ASI_M_FLUSH_PROBE));
666 } else {
667 retval = leon_swprobe(vaddr, 0);
668 }
669 return retval;
670}
671
Linus Torvalds1da177e2005-04-16 15:20:36 -0700672/*
673 * This is much cleaner than poking around physical address space
674 * looking at the prom's page table directly which is what most
675 * other OS's do. Yuck... this is much better.
676 */
Adrian Bunk50215d62008-06-05 11:41:51 -0700677static void __init srmmu_inherit_prom_mappings(unsigned long start,
678 unsigned long end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700679{
680 pgd_t *pgdp;
681 pmd_t *pmdp;
682 pte_t *ptep;
683 int what = 0; /* 0 = normal-pte, 1 = pmd-level pte, 2 = pgd-level pte */
684 unsigned long prompte;
685
Sam Ravnborg605ae962012-07-26 11:02:13 +0000686 while (start <= end) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687 if (start == 0)
688 break; /* probably wrap around */
Sam Ravnborg605ae962012-07-26 11:02:13 +0000689 if (start == 0xfef00000)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700690 start = KADB_DEBUGGER_BEGVM;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000691 if (!(prompte = srmmu_probe(start))) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700692 start += PAGE_SIZE;
693 continue;
694 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000695
Linus Torvalds1da177e2005-04-16 15:20:36 -0700696 /* A red snapper, see what it really is. */
697 what = 0;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000698
699 if (!(start & ~(SRMMU_REAL_PMD_MASK))) {
700 if (srmmu_probe((start - PAGE_SIZE) + SRMMU_REAL_PMD_SIZE) == prompte)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700701 what = 1;
702 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000703
704 if (!(start & ~(SRMMU_PGDIR_MASK))) {
705 if (srmmu_probe((start-PAGE_SIZE) + SRMMU_PGDIR_SIZE) ==
Linus Torvalds1da177e2005-04-16 15:20:36 -0700706 prompte)
707 what = 2;
708 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000709
Linus Torvalds1da177e2005-04-16 15:20:36 -0700710 pgdp = pgd_offset_k(start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000711 if (what == 2) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700712 *(pgd_t *)__nocache_fix(pgdp) = __pgd(prompte);
713 start += SRMMU_PGDIR_SIZE;
714 continue;
715 }
David S. Miller7d9fa4a2012-05-12 13:13:16 -0700716 if (pgd_none(*(pgd_t *)__nocache_fix(pgdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000717 pmdp = __srmmu_get_nocache(SRMMU_PMD_TABLE_SIZE, SRMMU_PMD_TABLE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700718 if (pmdp == NULL)
719 early_pgtable_allocfail("pmd");
720 memset(__nocache_fix(pmdp), 0, SRMMU_PMD_TABLE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200721 pgd_set(__nocache_fix(pgdp), pmdp);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200723 pmdp = pmd_offset(__nocache_fix(pgdp), start);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000724 if (srmmu_pmd_none(*(pmd_t *)__nocache_fix(pmdp))) {
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000725 ptep = __srmmu_get_nocache(PTE_SIZE, PTE_SIZE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726 if (ptep == NULL)
727 early_pgtable_allocfail("pte");
728 memset(__nocache_fix(ptep), 0, PTE_SIZE);
Sam Ravnborg642ea3e2012-05-13 08:40:27 +0200729 pmd_set(__nocache_fix(pmdp), ptep);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700730 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000731 if (what == 1) {
732 /* We bend the rule where all 16 PTPs in a pmd_t point
Linus Torvalds1da177e2005-04-16 15:20:36 -0700733 * inside the same PTE page, and we leak a perfectly
734 * good hardware PTE piece. Alternatives seem worse.
735 */
736 unsigned int x; /* Index of HW PMD in soft cluster */
737 x = (start >> PMD_SHIFT) & 15;
738 *(unsigned long *)__nocache_fix(&pmdp->pmdv[x]) = prompte;
739 start += SRMMU_REAL_PMD_SIZE;
740 continue;
741 }
Sam Ravnborg9701b262012-05-13 10:21:25 +0200742 ptep = pte_offset_kernel(__nocache_fix(pmdp), start);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700743 *(pte_t *)__nocache_fix(ptep) = __pte(prompte);
744 start += PAGE_SIZE;
745 }
746}
747
748#define KERNEL_PTE(page_shifted) ((page_shifted)|SRMMU_CACHE|SRMMU_PRIV|SRMMU_VALID)
749
750/* Create a third-level SRMMU 16MB page mapping. */
751static void __init do_large_mapping(unsigned long vaddr, unsigned long phys_base)
752{
753 pgd_t *pgdp = pgd_offset_k(vaddr);
754 unsigned long big_pte;
755
756 big_pte = KERNEL_PTE(phys_base >> 4);
757 *(pgd_t *)__nocache_fix(pgdp) = __pgd(big_pte);
758}
759
760/* Map sp_bank entry SP_ENTRY, starting at virtual address VBASE. */
761static unsigned long __init map_spbank(unsigned long vbase, int sp_entry)
762{
763 unsigned long pstart = (sp_banks[sp_entry].base_addr & SRMMU_PGDIR_MASK);
764 unsigned long vstart = (vbase & SRMMU_PGDIR_MASK);
765 unsigned long vend = SRMMU_PGDIR_ALIGN(vbase + sp_banks[sp_entry].num_bytes);
766 /* Map "low" memory only */
767 const unsigned long min_vaddr = PAGE_OFFSET;
768 const unsigned long max_vaddr = PAGE_OFFSET + SRMMU_MAXMEM;
769
770 if (vstart < min_vaddr || vstart >= max_vaddr)
771 return vstart;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000772
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 if (vend > max_vaddr || vend < min_vaddr)
774 vend = max_vaddr;
775
Sam Ravnborg605ae962012-07-26 11:02:13 +0000776 while (vstart < vend) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 do_large_mapping(vstart, pstart);
778 vstart += SRMMU_PGDIR_SIZE; pstart += SRMMU_PGDIR_SIZE;
779 }
780 return vstart;
781}
782
Sam Ravnborg32442462012-07-26 11:02:11 +0000783static void __init map_kernel(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700784{
785 int i;
786
787 if (phys_base > 0) {
788 do_large_mapping(PAGE_OFFSET, phys_base);
789 }
790
791 for (i = 0; sp_banks[i].num_bytes != 0; i++) {
792 map_spbank((unsigned long)__va(sp_banks[i].base_addr), i);
793 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700794}
795
796/* Paging initialization on the Sparc Reference MMU. */
797extern void sparc_context_init(int);
798
Al Viro409832f2008-11-22 17:33:54 +0000799void (*poke_srmmu)(void) __cpuinitdata = NULL;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800
801extern unsigned long bootmem_init(unsigned long *pages_avail);
802
803void __init srmmu_paging_init(void)
804{
Andres Salomon8d125562010-10-08 14:18:11 -0700805 int i;
806 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700807 char node_str[128];
808 pgd_t *pgd;
809 pmd_t *pmd;
810 pte_t *pte;
811 unsigned long pages_avail;
812
813 sparc_iomap.start = SUN4M_IOBASE_VADDR; /* 16MB of IOSPACE on all sun4m's. */
814
815 if (sparc_cpu_model == sun4d)
816 num_contexts = 65536; /* We know it is Viking */
817 else {
818 /* Find the number of contexts on the srmmu. */
819 cpunode = prom_getchild(prom_root_node);
820 num_contexts = 0;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000821 while (cpunode != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700822 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +0000823 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700824 num_contexts = prom_getintdefault(cpunode, "mmu-nctx", 0x8);
825 break;
826 }
827 cpunode = prom_getsibling(cpunode);
828 }
829 }
830
Sam Ravnborg605ae962012-07-26 11:02:13 +0000831 if (!num_contexts) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700832 prom_printf("Something wrong, can't find cpu node in paging_init.\n");
833 prom_halt();
834 }
835
836 pages_avail = 0;
837 last_valid_pfn = bootmem_init(&pages_avail);
838
839 srmmu_nocache_calcsize();
840 srmmu_nocache_init();
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000841 srmmu_inherit_prom_mappings(0xfe400000, (LINUX_OPPROM_ENDVM - PAGE_SIZE));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 map_kernel();
843
844 /* ctx table has to be physically aligned to its size */
Sam Ravnborgf71a2aa2012-07-26 11:02:14 +0000845 srmmu_context_table = __srmmu_get_nocache(num_contexts * sizeof(ctxd_t), num_contexts * sizeof(ctxd_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 srmmu_ctx_table_phys = (ctxd_t *)__nocache_pa((unsigned long)srmmu_context_table);
847
Sam Ravnborg605ae962012-07-26 11:02:13 +0000848 for (i = 0; i < num_contexts; i++)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700849 srmmu_ctxd_set((ctxd_t *)__nocache_fix(&srmmu_context_table[i]), srmmu_swapper_pg_dir);
850
851 flush_cache_all();
852 srmmu_set_ctable_ptr((unsigned long)srmmu_ctx_table_phys);
Bob Breuera54123e2006-03-23 22:36:19 -0800853#ifdef CONFIG_SMP
854 /* Stop from hanging here... */
David S. Miller5d83d662012-05-13 20:49:31 -0700855 local_ops->tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800856#else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700857 flush_tlb_all();
Bob Breuera54123e2006-03-23 22:36:19 -0800858#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700859 poke_srmmu();
860
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861 srmmu_allocate_ptable_skeleton(sparc_iomap.start, IOBASE_END);
862 srmmu_allocate_ptable_skeleton(DVMA_VADDR, DVMA_END);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700863
864 srmmu_allocate_ptable_skeleton(
865 __fix_to_virt(__end_of_fixed_addresses - 1), FIXADDR_TOP);
866 srmmu_allocate_ptable_skeleton(PKMAP_BASE, PKMAP_END);
867
868 pgd = pgd_offset_k(PKMAP_BASE);
Sam Ravnborg9701b262012-05-13 10:21:25 +0200869 pmd = pmd_offset(pgd, PKMAP_BASE);
870 pte = pte_offset_kernel(pmd, PKMAP_BASE);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700871 pkmap_page_table = pte;
872
873 flush_cache_all();
874 flush_tlb_all();
875
876 sparc_context_init(num_contexts);
877
878 kmap_init();
879
880 {
881 unsigned long zones_size[MAX_NR_ZONES];
882 unsigned long zholes_size[MAX_NR_ZONES];
883 unsigned long npages;
884 int znum;
885
886 for (znum = 0; znum < MAX_NR_ZONES; znum++)
887 zones_size[znum] = zholes_size[znum] = 0;
888
889 npages = max_low_pfn - pfn_base;
890
891 zones_size[ZONE_DMA] = npages;
892 zholes_size[ZONE_DMA] = npages - pages_avail;
893
894 npages = highend_pfn - max_low_pfn;
895 zones_size[ZONE_HIGHMEM] = npages;
896 zholes_size[ZONE_HIGHMEM] = npages - calc_highpages();
897
Johannes Weiner9109fb72008-07-23 21:27:20 -0700898 free_area_init_node(0, zones_size, pfn_base, zholes_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899 }
900}
901
Sam Ravnborg9701b262012-05-13 10:21:25 +0200902void mmu_info(struct seq_file *m)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903{
Sam Ravnborg605ae962012-07-26 11:02:13 +0000904 seq_printf(m,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700905 "MMU type\t: %s\n"
906 "contexts\t: %d\n"
907 "nocache total\t: %ld\n"
908 "nocache used\t: %d\n",
909 srmmu_name,
910 num_contexts,
911 srmmu_nocache_size,
912 srmmu_nocache_map.used << SRMMU_NOCACHE_BITMAP_SHIFT);
913}
914
Sam Ravnborgb796c6d2012-05-13 10:30:54 +0200915void destroy_context(struct mm_struct *mm)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700916{
917
Sam Ravnborg605ae962012-07-26 11:02:13 +0000918 if (mm->context != NO_CONTEXT) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700919 flush_cache_mm(mm);
920 srmmu_ctxd_set(&srmmu_context_table[mm->context], srmmu_swapper_pg_dir);
921 flush_tlb_mm(mm);
922 spin_lock(&srmmu_context_spinlock);
923 free_context(mm->context);
924 spin_unlock(&srmmu_context_spinlock);
925 mm->context = NO_CONTEXT;
926 }
927}
928
929/* Init various srmmu chip types. */
930static void __init srmmu_is_bad(void)
931{
932 prom_printf("Could not determine SRMMU chip type.\n");
933 prom_halt();
934}
935
936static void __init init_vac_layout(void)
937{
Andres Salomon8d125562010-10-08 14:18:11 -0700938 phandle nd;
939 int cache_lines;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700940 char node_str[128];
941#ifdef CONFIG_SMP
942 int cpu = 0;
943 unsigned long max_size = 0;
944 unsigned long min_line_size = 0x10000000;
945#endif
946
947 nd = prom_getchild(prom_root_node);
Sam Ravnborg605ae962012-07-26 11:02:13 +0000948 while ((nd = prom_getsibling(nd)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949 prom_getstring(nd, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +0000950 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700951 vac_line_size = prom_getint(nd, "cache-line-size");
952 if (vac_line_size == -1) {
Sam Ravnborg605ae962012-07-26 11:02:13 +0000953 prom_printf("can't determine cache-line-size, halting.\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954 prom_halt();
955 }
956 cache_lines = prom_getint(nd, "cache-nlines");
957 if (cache_lines == -1) {
958 prom_printf("can't determine cache-nlines, halting.\n");
959 prom_halt();
960 }
961
962 vac_cache_size = cache_lines * vac_line_size;
963#ifdef CONFIG_SMP
Sam Ravnborg605ae962012-07-26 11:02:13 +0000964 if (vac_cache_size > max_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700965 max_size = vac_cache_size;
Sam Ravnborg605ae962012-07-26 11:02:13 +0000966 if (vac_line_size < min_line_size)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700967 min_line_size = vac_line_size;
Bob Breuera54123e2006-03-23 22:36:19 -0800968 //FIXME: cpus not contiguous!!
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 cpu++;
Rusty Russellec7c14b2009-03-16 14:40:24 +1030970 if (cpu >= nr_cpu_ids || !cpu_online(cpu))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 break;
972#else
973 break;
974#endif
975 }
976 }
Sam Ravnborg605ae962012-07-26 11:02:13 +0000977 if (nd == 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700978 prom_printf("No CPU nodes found, halting.\n");
979 prom_halt();
980 }
981#ifdef CONFIG_SMP
982 vac_cache_size = max_size;
983 vac_line_size = min_line_size;
984#endif
985 printk("SRMMU: Using VAC size of %d bytes, line size %d bytes.\n",
986 (int)vac_cache_size, (int)vac_line_size);
987}
988
Al Viro409832f2008-11-22 17:33:54 +0000989static void __cpuinit poke_hypersparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990{
991 volatile unsigned long clear;
992 unsigned long mreg = srmmu_get_mmureg();
993
994 hyper_flush_unconditional_combined();
995
996 mreg &= ~(HYPERSPARC_CWENABLE);
997 mreg |= (HYPERSPARC_CENABLE | HYPERSPARC_WBENABLE);
998 mreg |= (HYPERSPARC_CMODE);
999
1000 srmmu_set_mmureg(mreg);
1001
1002#if 0 /* XXX I think this is bad news... -DaveM */
1003 hyper_clear_all_tags();
1004#endif
1005
1006 put_ross_icr(HYPERSPARC_ICCR_FTD | HYPERSPARC_ICCR_ICE);
1007 hyper_flush_whole_icache();
1008 clear = srmmu_get_faddr();
1009 clear = srmmu_get_fstatus();
1010}
1011
David S. Miller5d83d662012-05-13 20:49:31 -07001012static const struct sparc32_cachetlb_ops hypersparc_ops = {
1013 .cache_all = hypersparc_flush_cache_all,
1014 .cache_mm = hypersparc_flush_cache_mm,
1015 .cache_page = hypersparc_flush_cache_page,
1016 .cache_range = hypersparc_flush_cache_range,
1017 .tlb_all = hypersparc_flush_tlb_all,
1018 .tlb_mm = hypersparc_flush_tlb_mm,
1019 .tlb_page = hypersparc_flush_tlb_page,
1020 .tlb_range = hypersparc_flush_tlb_range,
1021 .page_to_ram = hypersparc_flush_page_to_ram,
1022 .sig_insns = hypersparc_flush_sig_insns,
1023 .page_for_dma = hypersparc_flush_page_for_dma,
1024};
1025
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026static void __init init_hypersparc(void)
1027{
1028 srmmu_name = "ROSS HyperSparc";
1029 srmmu_modtype = HyperSparc;
1030
1031 init_vac_layout();
1032
1033 is_hypersparc = 1;
David S. Miller5d83d662012-05-13 20:49:31 -07001034 sparc32_cachetlb_ops = &hypersparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035
1036 poke_srmmu = poke_hypersparc;
1037
1038 hypersparc_setup_blockops();
1039}
1040
Al Viro409832f2008-11-22 17:33:54 +00001041static void __cpuinit poke_swift(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001042{
1043 unsigned long mreg;
1044
1045 /* Clear any crap from the cache or else... */
1046 swift_flush_cache_all();
1047
1048 /* Enable I & D caches */
1049 mreg = srmmu_get_mmureg();
1050 mreg |= (SWIFT_IE | SWIFT_DE);
1051 /*
1052 * The Swift branch folding logic is completely broken. At
1053 * trap time, if things are just right, if can mistakenly
1054 * think that a trap is coming from kernel mode when in fact
1055 * it is coming from user mode (it mis-executes the branch in
1056 * the trap code). So you see things like crashme completely
1057 * hosing your machine which is completely unacceptable. Turn
1058 * this shit off... nice job Fujitsu.
1059 */
1060 mreg &= ~(SWIFT_BF);
1061 srmmu_set_mmureg(mreg);
1062}
1063
David S. Miller5d83d662012-05-13 20:49:31 -07001064static const struct sparc32_cachetlb_ops swift_ops = {
1065 .cache_all = swift_flush_cache_all,
1066 .cache_mm = swift_flush_cache_mm,
1067 .cache_page = swift_flush_cache_page,
1068 .cache_range = swift_flush_cache_range,
1069 .tlb_all = swift_flush_tlb_all,
1070 .tlb_mm = swift_flush_tlb_mm,
1071 .tlb_page = swift_flush_tlb_page,
1072 .tlb_range = swift_flush_tlb_range,
1073 .page_to_ram = swift_flush_page_to_ram,
1074 .sig_insns = swift_flush_sig_insns,
1075 .page_for_dma = swift_flush_page_for_dma,
1076};
1077
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078#define SWIFT_MASKID_ADDR 0x10003018
1079static void __init init_swift(void)
1080{
1081 unsigned long swift_rev;
1082
1083 __asm__ __volatile__("lda [%1] %2, %0\n\t"
1084 "srl %0, 0x18, %0\n\t" :
1085 "=r" (swift_rev) :
1086 "r" (SWIFT_MASKID_ADDR), "i" (ASI_M_BYPASS));
1087 srmmu_name = "Fujitsu Swift";
Sam Ravnborg605ae962012-07-26 11:02:13 +00001088 switch (swift_rev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001089 case 0x11:
1090 case 0x20:
1091 case 0x23:
1092 case 0x30:
1093 srmmu_modtype = Swift_lots_o_bugs;
1094 hwbug_bitmask |= (HWBUG_KERN_ACCBROKEN | HWBUG_KERN_CBITBROKEN);
1095 /*
1096 * Gee george, I wonder why Sun is so hush hush about
1097 * this hardware bug... really braindamage stuff going
1098 * on here. However I think we can find a way to avoid
1099 * all of the workaround overhead under Linux. Basically,
1100 * any page fault can cause kernel pages to become user
1101 * accessible (the mmu gets confused and clears some of
1102 * the ACC bits in kernel ptes). Aha, sounds pretty
1103 * horrible eh? But wait, after extensive testing it appears
1104 * that if you use pgd_t level large kernel pte's (like the
1105 * 4MB pages on the Pentium) the bug does not get tripped
1106 * at all. This avoids almost all of the major overhead.
1107 * Welcome to a world where your vendor tells you to,
1108 * "apply this kernel patch" instead of "sorry for the
1109 * broken hardware, send it back and we'll give you
1110 * properly functioning parts"
1111 */
1112 break;
1113 case 0x25:
1114 case 0x31:
1115 srmmu_modtype = Swift_bad_c;
1116 hwbug_bitmask |= HWBUG_KERN_CBITBROKEN;
1117 /*
1118 * You see Sun allude to this hardware bug but never
1119 * admit things directly, they'll say things like,
1120 * "the Swift chip cache problems" or similar.
1121 */
1122 break;
1123 default:
1124 srmmu_modtype = Swift_ok;
1125 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001126 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001127
David S. Miller5d83d662012-05-13 20:49:31 -07001128 sparc32_cachetlb_ops = &swift_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 flush_page_for_dma_global = 0;
1130
1131 /*
1132 * Are you now convinced that the Swift is one of the
1133 * biggest VLSI abortions of all time? Bravo Fujitsu!
1134 * Fujitsu, the !#?!%$'d up processor people. I bet if
1135 * you examined the microcode of the Swift you'd find
1136 * XXX's all over the place.
1137 */
1138 poke_srmmu = poke_swift;
1139}
1140
1141static void turbosparc_flush_cache_all(void)
1142{
1143 flush_user_windows();
1144 turbosparc_idflash_clear();
1145}
1146
1147static void turbosparc_flush_cache_mm(struct mm_struct *mm)
1148{
1149 FLUSH_BEGIN(mm)
1150 flush_user_windows();
1151 turbosparc_idflash_clear();
1152 FLUSH_END
1153}
1154
1155static void turbosparc_flush_cache_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1156{
1157 FLUSH_BEGIN(vma->vm_mm)
1158 flush_user_windows();
1159 turbosparc_idflash_clear();
1160 FLUSH_END
1161}
1162
1163static void turbosparc_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1164{
1165 FLUSH_BEGIN(vma->vm_mm)
1166 flush_user_windows();
1167 if (vma->vm_flags & VM_EXEC)
1168 turbosparc_flush_icache();
1169 turbosparc_flush_dcache();
1170 FLUSH_END
1171}
1172
1173/* TurboSparc is copy-back, if we turn it on, but this does not work. */
1174static void turbosparc_flush_page_to_ram(unsigned long page)
1175{
1176#ifdef TURBOSPARC_WRITEBACK
1177 volatile unsigned long clear;
1178
Sam Ravnborg805918f2012-05-25 21:20:19 +00001179 if (srmmu_probe(page))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180 turbosparc_flush_page_cache(page);
1181 clear = srmmu_get_fstatus();
1182#endif
1183}
1184
1185static void turbosparc_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1186{
1187}
1188
1189static void turbosparc_flush_page_for_dma(unsigned long page)
1190{
1191 turbosparc_flush_dcache();
1192}
1193
1194static void turbosparc_flush_tlb_all(void)
1195{
1196 srmmu_flush_whole_tlb();
1197}
1198
1199static void turbosparc_flush_tlb_mm(struct mm_struct *mm)
1200{
1201 FLUSH_BEGIN(mm)
1202 srmmu_flush_whole_tlb();
1203 FLUSH_END
1204}
1205
1206static void turbosparc_flush_tlb_range(struct vm_area_struct *vma, unsigned long start, unsigned long end)
1207{
1208 FLUSH_BEGIN(vma->vm_mm)
1209 srmmu_flush_whole_tlb();
1210 FLUSH_END
1211}
1212
1213static void turbosparc_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1214{
1215 FLUSH_BEGIN(vma->vm_mm)
1216 srmmu_flush_whole_tlb();
1217 FLUSH_END
1218}
1219
1220
Al Viro409832f2008-11-22 17:33:54 +00001221static void __cpuinit poke_turbosparc(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001222{
1223 unsigned long mreg = srmmu_get_mmureg();
1224 unsigned long ccreg;
1225
1226 /* Clear any crap from the cache or else... */
1227 turbosparc_flush_cache_all();
Sam Ravnborg605ae962012-07-26 11:02:13 +00001228 /* Temporarily disable I & D caches */
1229 mreg &= ~(TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001230 mreg &= ~(TURBOSPARC_PCENABLE); /* Don't check parity */
1231 srmmu_set_mmureg(mreg);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001232
Linus Torvalds1da177e2005-04-16 15:20:36 -07001233 ccreg = turbosparc_get_ccreg();
1234
1235#ifdef TURBOSPARC_WRITEBACK
1236 ccreg |= (TURBOSPARC_SNENABLE); /* Do DVMA snooping in Dcache */
1237 ccreg &= ~(TURBOSPARC_uS2 | TURBOSPARC_WTENABLE);
1238 /* Write-back D-cache, emulate VLSI
1239 * abortion number three, not number one */
1240#else
1241 /* For now let's play safe, optimize later */
1242 ccreg |= (TURBOSPARC_SNENABLE | TURBOSPARC_WTENABLE);
1243 /* Do DVMA snooping in Dcache, Write-thru D-cache */
1244 ccreg &= ~(TURBOSPARC_uS2);
1245 /* Emulate VLSI abortion number three, not number one */
1246#endif
1247
1248 switch (ccreg & 7) {
1249 case 0: /* No SE cache */
1250 case 7: /* Test mode */
1251 break;
1252 default:
1253 ccreg |= (TURBOSPARC_SCENABLE);
1254 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001255 turbosparc_set_ccreg(ccreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001256
1257 mreg |= (TURBOSPARC_ICENABLE | TURBOSPARC_DCENABLE); /* I & D caches on */
1258 mreg |= (TURBOSPARC_ICSNOOP); /* Icache snooping on */
1259 srmmu_set_mmureg(mreg);
1260}
1261
David S. Miller5d83d662012-05-13 20:49:31 -07001262static const struct sparc32_cachetlb_ops turbosparc_ops = {
1263 .cache_all = turbosparc_flush_cache_all,
1264 .cache_mm = turbosparc_flush_cache_mm,
1265 .cache_page = turbosparc_flush_cache_page,
1266 .cache_range = turbosparc_flush_cache_range,
1267 .tlb_all = turbosparc_flush_tlb_all,
1268 .tlb_mm = turbosparc_flush_tlb_mm,
1269 .tlb_page = turbosparc_flush_tlb_page,
1270 .tlb_range = turbosparc_flush_tlb_range,
1271 .page_to_ram = turbosparc_flush_page_to_ram,
1272 .sig_insns = turbosparc_flush_sig_insns,
1273 .page_for_dma = turbosparc_flush_page_for_dma,
1274};
1275
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276static void __init init_turbosparc(void)
1277{
1278 srmmu_name = "Fujitsu TurboSparc";
1279 srmmu_modtype = TurboSparc;
David S. Miller5d83d662012-05-13 20:49:31 -07001280 sparc32_cachetlb_ops = &turbosparc_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001281 poke_srmmu = poke_turbosparc;
1282}
1283
Al Viro409832f2008-11-22 17:33:54 +00001284static void __cpuinit poke_tsunami(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285{
1286 unsigned long mreg = srmmu_get_mmureg();
1287
1288 tsunami_flush_icache();
1289 tsunami_flush_dcache();
1290 mreg &= ~TSUNAMI_ITD;
1291 mreg |= (TSUNAMI_IENAB | TSUNAMI_DENAB);
1292 srmmu_set_mmureg(mreg);
1293}
1294
David S. Miller5d83d662012-05-13 20:49:31 -07001295static const struct sparc32_cachetlb_ops tsunami_ops = {
1296 .cache_all = tsunami_flush_cache_all,
1297 .cache_mm = tsunami_flush_cache_mm,
1298 .cache_page = tsunami_flush_cache_page,
1299 .cache_range = tsunami_flush_cache_range,
1300 .tlb_all = tsunami_flush_tlb_all,
1301 .tlb_mm = tsunami_flush_tlb_mm,
1302 .tlb_page = tsunami_flush_tlb_page,
1303 .tlb_range = tsunami_flush_tlb_range,
1304 .page_to_ram = tsunami_flush_page_to_ram,
1305 .sig_insns = tsunami_flush_sig_insns,
1306 .page_for_dma = tsunami_flush_page_for_dma,
1307};
1308
Linus Torvalds1da177e2005-04-16 15:20:36 -07001309static void __init init_tsunami(void)
1310{
1311 /*
1312 * Tsunami's pretty sane, Sun and TI actually got it
1313 * somewhat right this time. Fujitsu should have
1314 * taken some lessons from them.
1315 */
1316
1317 srmmu_name = "TI Tsunami";
1318 srmmu_modtype = Tsunami;
David S. Miller5d83d662012-05-13 20:49:31 -07001319 sparc32_cachetlb_ops = &tsunami_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320 poke_srmmu = poke_tsunami;
1321
1322 tsunami_setup_blockops();
1323}
1324
Al Viro409832f2008-11-22 17:33:54 +00001325static void __cpuinit poke_viking(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326{
1327 unsigned long mreg = srmmu_get_mmureg();
1328 static int smp_catch;
1329
David S. Miller5d83d662012-05-13 20:49:31 -07001330 if (viking_mxcc_present) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001331 unsigned long mxcc_control = mxcc_get_creg();
1332
1333 mxcc_control |= (MXCC_CTL_ECE | MXCC_CTL_PRE | MXCC_CTL_MCE);
1334 mxcc_control &= ~(MXCC_CTL_RRC);
1335 mxcc_set_creg(mxcc_control);
1336
1337 /*
1338 * We don't need memory parity checks.
1339 * XXX This is a mess, have to dig out later. ecd.
1340 viking_mxcc_turn_off_parity(&mreg, &mxcc_control);
1341 */
1342
1343 /* We do cache ptables on MXCC. */
1344 mreg |= VIKING_TCENABLE;
1345 } else {
1346 unsigned long bpreg;
1347
1348 mreg &= ~(VIKING_TCENABLE);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001349 if (smp_catch++) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 /* Must disable mixed-cmd mode here for other cpu's. */
1351 bpreg = viking_get_bpreg();
1352 bpreg &= ~(VIKING_ACTION_MIX);
1353 viking_set_bpreg(bpreg);
1354
1355 /* Just in case PROM does something funny. */
1356 msi_set_sync();
1357 }
1358 }
1359
1360 mreg |= VIKING_SPENABLE;
1361 mreg |= (VIKING_ICENABLE | VIKING_DCENABLE);
1362 mreg |= VIKING_SBENABLE;
1363 mreg &= ~(VIKING_ACENABLE);
1364 srmmu_set_mmureg(mreg);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365}
1366
David S. Miller5d83d662012-05-13 20:49:31 -07001367static struct sparc32_cachetlb_ops viking_ops = {
1368 .cache_all = viking_flush_cache_all,
1369 .cache_mm = viking_flush_cache_mm,
1370 .cache_page = viking_flush_cache_page,
1371 .cache_range = viking_flush_cache_range,
1372 .tlb_all = viking_flush_tlb_all,
1373 .tlb_mm = viking_flush_tlb_mm,
1374 .tlb_page = viking_flush_tlb_page,
1375 .tlb_range = viking_flush_tlb_range,
1376 .page_to_ram = viking_flush_page_to_ram,
1377 .sig_insns = viking_flush_sig_insns,
1378 .page_for_dma = viking_flush_page_for_dma,
1379};
1380
1381#ifdef CONFIG_SMP
1382/* On sun4d the cpu broadcasts local TLB flushes, so we can just
1383 * perform the local TLB flush and all the other cpus will see it.
1384 * But, unfortunately, there is a bug in the sun4d XBUS backplane
1385 * that requires that we add some synchronization to these flushes.
1386 *
1387 * The bug is that the fifo which keeps track of all the pending TLB
1388 * broadcasts in the system is an entry or two too small, so if we
1389 * have too many going at once we'll overflow that fifo and lose a TLB
1390 * flush resulting in corruption.
1391 *
1392 * Our workaround is to take a global spinlock around the TLB flushes,
1393 * which guarentees we won't ever have too many pending. It's a big
1394 * hammer, but a semaphore like system to make sure we only have N TLB
1395 * flushes going at once will require SMP locking anyways so there's
1396 * no real value in trying any harder than this.
1397 */
1398static struct sparc32_cachetlb_ops viking_sun4d_smp_ops = {
1399 .cache_all = viking_flush_cache_all,
1400 .cache_mm = viking_flush_cache_mm,
1401 .cache_page = viking_flush_cache_page,
1402 .cache_range = viking_flush_cache_range,
1403 .tlb_all = sun4dsmp_flush_tlb_all,
1404 .tlb_mm = sun4dsmp_flush_tlb_mm,
1405 .tlb_page = sun4dsmp_flush_tlb_page,
1406 .tlb_range = sun4dsmp_flush_tlb_range,
1407 .page_to_ram = viking_flush_page_to_ram,
1408 .sig_insns = viking_flush_sig_insns,
1409 .page_for_dma = viking_flush_page_for_dma,
1410};
1411#endif
1412
Linus Torvalds1da177e2005-04-16 15:20:36 -07001413static void __init init_viking(void)
1414{
1415 unsigned long mreg = srmmu_get_mmureg();
1416
1417 /* Ahhh, the viking. SRMMU VLSI abortion number two... */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001418 if (mreg & VIKING_MMODE) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001419 srmmu_name = "TI Viking";
1420 viking_mxcc_present = 0;
1421 msi_set_sync();
1422
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 /*
1424 * We need this to make sure old viking takes no hits
1425 * on it's cache for dma snoops to workaround the
1426 * "load from non-cacheable memory" interrupt bug.
1427 * This is only necessary because of the new way in
1428 * which we use the IOMMU.
1429 */
David S. Miller5d83d662012-05-13 20:49:31 -07001430 viking_ops.page_for_dma = viking_flush_page;
1431#ifdef CONFIG_SMP
1432 viking_sun4d_smp_ops.page_for_dma = viking_flush_page;
1433#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001434 flush_page_for_dma_global = 0;
1435 } else {
1436 srmmu_name = "TI Viking/MXCC";
1437 viking_mxcc_present = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001438 srmmu_cache_pagetables = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001439 }
1440
David S. Miller5d83d662012-05-13 20:49:31 -07001441 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1442 &viking_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443#ifdef CONFIG_SMP
David S. Miller5d83d662012-05-13 20:49:31 -07001444 if (sparc_cpu_model == sun4d)
1445 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1446 &viking_sun4d_smp_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001447#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448
1449 poke_srmmu = poke_viking;
1450}
1451
1452/* Probe for the srmmu chip version. */
1453static void __init get_srmmu_type(void)
1454{
1455 unsigned long mreg, psr;
1456 unsigned long mod_typ, mod_rev, psr_typ, psr_vers;
1457
1458 srmmu_modtype = SRMMU_INVAL_MOD;
1459 hwbug_bitmask = 0;
1460
1461 mreg = srmmu_get_mmureg(); psr = get_psr();
1462 mod_typ = (mreg & 0xf0000000) >> 28;
1463 mod_rev = (mreg & 0x0f000000) >> 24;
1464 psr_typ = (psr >> 28) & 0xf;
1465 psr_vers = (psr >> 24) & 0xf;
1466
Konrad Eisele75d9e342009-08-17 00:13:33 +00001467 /* First, check for sparc-leon. */
1468 if (sparc_cpu_model == sparc_leon) {
Konrad Eisele75d9e342009-08-17 00:13:33 +00001469 init_leon();
1470 return;
1471 }
1472
1473 /* Second, check for HyperSparc or Cypress. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001474 if (mod_typ == 1) {
1475 switch (mod_rev) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476 case 7:
1477 /* UP or MP Hypersparc */
1478 init_hypersparc();
1479 break;
1480 case 0:
1481 case 2:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001482 case 10:
1483 case 11:
1484 case 12:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485 case 13:
1486 case 14:
1487 case 15:
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488 default:
David S. Millerc7020eb2012-05-14 22:02:08 -07001489 prom_printf("Sparc-Linux Cypress support does not longer exit.\n");
1490 prom_halt();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001491 break;
Joe Perches6cb79b32011-06-03 14:45:23 +00001492 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001493 return;
1494 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001495
1496 /* Now Fujitsu TurboSparc. It might happen that it is
Linus Torvalds1da177e2005-04-16 15:20:36 -07001497 * in Swift emulation mode, so we will check later...
1498 */
1499 if (psr_typ == 0 && psr_vers == 5) {
1500 init_turbosparc();
1501 return;
1502 }
1503
1504 /* Next check for Fujitsu Swift. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001505 if (psr_typ == 0 && psr_vers == 4) {
Andres Salomon8d125562010-10-08 14:18:11 -07001506 phandle cpunode;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001507 char node_str[128];
1508
1509 /* Look if it is not a TurboSparc emulating Swift... */
1510 cpunode = prom_getchild(prom_root_node);
Sam Ravnborg605ae962012-07-26 11:02:13 +00001511 while ((cpunode = prom_getsibling(cpunode)) != 0) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001512 prom_getstring(cpunode, "device_type", node_str, sizeof(node_str));
Sam Ravnborg605ae962012-07-26 11:02:13 +00001513 if (!strcmp(node_str, "cpu")) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 if (!prom_getintdefault(cpunode, "psr-implementation", 1) &&
1515 prom_getintdefault(cpunode, "psr-version", 1) == 5) {
1516 init_turbosparc();
1517 return;
1518 }
1519 break;
1520 }
1521 }
Sam Ravnborg605ae962012-07-26 11:02:13 +00001522
Linus Torvalds1da177e2005-04-16 15:20:36 -07001523 init_swift();
1524 return;
1525 }
1526
1527 /* Now the Viking family of srmmu. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001528 if (psr_typ == 4 &&
Linus Torvalds1da177e2005-04-16 15:20:36 -07001529 ((psr_vers == 0) ||
1530 ((psr_vers == 1) && (mod_typ == 0) && (mod_rev == 0)))) {
1531 init_viking();
1532 return;
1533 }
1534
1535 /* Finally the Tsunami. */
Sam Ravnborg605ae962012-07-26 11:02:13 +00001536 if (psr_typ == 4 && psr_vers == 1 && (mod_typ || mod_rev)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537 init_tsunami();
1538 return;
1539 }
1540
1541 /* Oh well */
1542 srmmu_is_bad();
1543}
1544
Linus Torvalds1da177e2005-04-16 15:20:36 -07001545#ifdef CONFIG_SMP
1546/* Local cross-calls. */
1547static void smp_flush_page_for_dma(unsigned long page)
1548{
David S. Miller5d83d662012-05-13 20:49:31 -07001549 xc1((smpfunc_t) local_ops->page_for_dma, page);
1550 local_ops->page_for_dma(page);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001551}
1552
David S. Miller5d83d662012-05-13 20:49:31 -07001553static void smp_flush_cache_all(void)
1554{
1555 xc0((smpfunc_t) local_ops->cache_all);
1556 local_ops->cache_all();
1557}
1558
1559static void smp_flush_tlb_all(void)
1560{
1561 xc0((smpfunc_t) local_ops->tlb_all);
1562 local_ops->tlb_all();
1563}
1564
1565static void smp_flush_cache_mm(struct mm_struct *mm)
1566{
1567 if (mm->context != NO_CONTEXT) {
1568 cpumask_t cpu_mask;
1569 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1570 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1571 if (!cpumask_empty(&cpu_mask))
1572 xc1((smpfunc_t) local_ops->cache_mm, (unsigned long) mm);
1573 local_ops->cache_mm(mm);
1574 }
1575}
1576
1577static void smp_flush_tlb_mm(struct mm_struct *mm)
1578{
1579 if (mm->context != NO_CONTEXT) {
1580 cpumask_t cpu_mask;
1581 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1582 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1583 if (!cpumask_empty(&cpu_mask)) {
1584 xc1((smpfunc_t) local_ops->tlb_mm, (unsigned long) mm);
1585 if (atomic_read(&mm->mm_users) == 1 && current->active_mm == mm)
1586 cpumask_copy(mm_cpumask(mm),
1587 cpumask_of(smp_processor_id()));
1588 }
1589 local_ops->tlb_mm(mm);
1590 }
1591}
1592
1593static void smp_flush_cache_range(struct vm_area_struct *vma,
1594 unsigned long start,
1595 unsigned long end)
1596{
1597 struct mm_struct *mm = vma->vm_mm;
1598
1599 if (mm->context != NO_CONTEXT) {
1600 cpumask_t cpu_mask;
1601 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1602 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1603 if (!cpumask_empty(&cpu_mask))
1604 xc3((smpfunc_t) local_ops->cache_range,
1605 (unsigned long) vma, start, end);
1606 local_ops->cache_range(vma, start, end);
1607 }
1608}
1609
1610static void smp_flush_tlb_range(struct vm_area_struct *vma,
1611 unsigned long start,
1612 unsigned long end)
1613{
1614 struct mm_struct *mm = vma->vm_mm;
1615
1616 if (mm->context != NO_CONTEXT) {
1617 cpumask_t cpu_mask;
1618 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1619 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1620 if (!cpumask_empty(&cpu_mask))
1621 xc3((smpfunc_t) local_ops->tlb_range,
1622 (unsigned long) vma, start, end);
1623 local_ops->tlb_range(vma, start, end);
1624 }
1625}
1626
1627static void smp_flush_cache_page(struct vm_area_struct *vma, unsigned long page)
1628{
1629 struct mm_struct *mm = vma->vm_mm;
1630
1631 if (mm->context != NO_CONTEXT) {
1632 cpumask_t cpu_mask;
1633 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1634 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1635 if (!cpumask_empty(&cpu_mask))
1636 xc2((smpfunc_t) local_ops->cache_page,
1637 (unsigned long) vma, page);
1638 local_ops->cache_page(vma, page);
1639 }
1640}
1641
1642static void smp_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
1643{
1644 struct mm_struct *mm = vma->vm_mm;
1645
1646 if (mm->context != NO_CONTEXT) {
1647 cpumask_t cpu_mask;
1648 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1649 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1650 if (!cpumask_empty(&cpu_mask))
1651 xc2((smpfunc_t) local_ops->tlb_page,
1652 (unsigned long) vma, page);
1653 local_ops->tlb_page(vma, page);
1654 }
1655}
1656
1657static void smp_flush_page_to_ram(unsigned long page)
1658{
1659 /* Current theory is that those who call this are the one's
1660 * who have just dirtied their cache with the pages contents
1661 * in kernel space, therefore we only run this on local cpu.
1662 *
1663 * XXX This experiment failed, research further... -DaveM
1664 */
1665#if 1
1666 xc1((smpfunc_t) local_ops->page_to_ram, page);
1667#endif
1668 local_ops->page_to_ram(page);
1669}
1670
1671static void smp_flush_sig_insns(struct mm_struct *mm, unsigned long insn_addr)
1672{
1673 cpumask_t cpu_mask;
1674 cpumask_copy(&cpu_mask, mm_cpumask(mm));
1675 cpumask_clear_cpu(smp_processor_id(), &cpu_mask);
1676 if (!cpumask_empty(&cpu_mask))
1677 xc2((smpfunc_t) local_ops->sig_insns,
1678 (unsigned long) mm, insn_addr);
1679 local_ops->sig_insns(mm, insn_addr);
1680}
1681
1682static struct sparc32_cachetlb_ops smp_cachetlb_ops = {
1683 .cache_all = smp_flush_cache_all,
1684 .cache_mm = smp_flush_cache_mm,
1685 .cache_page = smp_flush_cache_page,
1686 .cache_range = smp_flush_cache_range,
1687 .tlb_all = smp_flush_tlb_all,
1688 .tlb_mm = smp_flush_tlb_mm,
1689 .tlb_page = smp_flush_tlb_page,
1690 .tlb_range = smp_flush_tlb_range,
1691 .page_to_ram = smp_flush_page_to_ram,
1692 .sig_insns = smp_flush_sig_insns,
1693 .page_for_dma = smp_flush_page_for_dma,
1694};
Linus Torvalds1da177e2005-04-16 15:20:36 -07001695#endif
1696
Linus Torvalds1da177e2005-04-16 15:20:36 -07001697/* Load up routines and constants for sun4m and sun4d mmu */
Sam Ravnborga3c5c662012-05-12 20:35:52 +02001698void __init load_mmu(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699{
1700 extern void ld_mmu_iommu(void);
1701 extern void ld_mmu_iounit(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001702
Linus Torvalds1da177e2005-04-16 15:20:36 -07001703 /* Functions */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001704 get_srmmu_type();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705
1706#ifdef CONFIG_SMP
1707 /* El switcheroo... */
David S. Miller5d83d662012-05-13 20:49:31 -07001708 local_ops = sparc32_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709
David S. Miller5d83d662012-05-13 20:49:31 -07001710 if (sparc_cpu_model == sun4d || sparc_cpu_model == sparc_leon) {
1711 smp_cachetlb_ops.tlb_all = local_ops->tlb_all;
1712 smp_cachetlb_ops.tlb_mm = local_ops->tlb_mm;
1713 smp_cachetlb_ops.tlb_range = local_ops->tlb_range;
1714 smp_cachetlb_ops.tlb_page = local_ops->tlb_page;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001715 }
David S. Miller64273d02008-11-26 01:00:58 -08001716
1717 if (poke_srmmu == poke_viking) {
1718 /* Avoid unnecessary cross calls. */
David S. Miller5d83d662012-05-13 20:49:31 -07001719 smp_cachetlb_ops.cache_all = local_ops->cache_all;
1720 smp_cachetlb_ops.cache_mm = local_ops->cache_mm;
1721 smp_cachetlb_ops.cache_range = local_ops->cache_range;
1722 smp_cachetlb_ops.cache_page = local_ops->cache_page;
1723
1724 smp_cachetlb_ops.page_to_ram = local_ops->page_to_ram;
1725 smp_cachetlb_ops.sig_insns = local_ops->sig_insns;
1726 smp_cachetlb_ops.page_for_dma = local_ops->page_for_dma;
David S. Miller64273d02008-11-26 01:00:58 -08001727 }
David S. Miller5d83d662012-05-13 20:49:31 -07001728
1729 /* It really is const after this point. */
1730 sparc32_cachetlb_ops = (const struct sparc32_cachetlb_ops *)
1731 &smp_cachetlb_ops;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001732#endif
1733
1734 if (sparc_cpu_model == sun4d)
1735 ld_mmu_iounit();
1736 else
1737 ld_mmu_iommu();
1738#ifdef CONFIG_SMP
1739 if (sparc_cpu_model == sun4d)
1740 sun4d_init_smp();
Konrad Eisele84017072009-08-31 22:08:13 +00001741 else if (sparc_cpu_model == sparc_leon)
1742 leon_init_smp();
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743 else
1744 sun4m_init_smp();
1745#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746}