blob: 3f258e230ba5d0c95806848ef96c64ce1160d479 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2** Tablewalk MMU emulator
3**
4** by Toshiyasu Morita
5**
6** Started 1/16/98 @ 2:22 am
7*/
8
Geert Uytterhoevena4df02a2013-06-25 21:15:24 +02009#include <linux/init.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070010#include <linux/mman.h>
11#include <linux/mm.h>
12#include <linux/kernel.h>
13#include <linux/ptrace.h>
14#include <linux/delay.h>
15#include <linux/bootmem.h>
16#include <linux/bitops.h>
17#include <linux/module.h>
18
19#include <asm/setup.h>
20#include <asm/traps.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070021#include <asm/uaccess.h>
22#include <asm/page.h>
23#include <asm/pgtable.h>
24#include <asm/sun3mmu.h>
25#include <asm/segment.h>
26#include <asm/oplib.h>
27#include <asm/mmu_context.h>
28#include <asm/dvma.h>
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
31#undef DEBUG_MMU_EMU
32#define DEBUG_PROM_MAPS
33
34/*
35** Defines
36*/
37
38#define CONTEXTS_NUM 8
39#define SEGMAPS_PER_CONTEXT_NUM 2048
40#define PAGES_PER_SEGMENT 16
41#define PMEGS_NUM 256
42#define PMEG_MASK 0xFF
43
44/*
45** Globals
46*/
47
Tejun Heo51e99be2009-12-09 17:43:19 +090048unsigned long m68k_vmalloc_end;
49EXPORT_SYMBOL(m68k_vmalloc_end);
Linus Torvalds1da177e2005-04-16 15:20:36 -070050
51unsigned long pmeg_vaddr[PMEGS_NUM];
52unsigned char pmeg_alloc[PMEGS_NUM];
53unsigned char pmeg_ctx[PMEGS_NUM];
54
55/* pointers to the mm structs for each task in each
56 context. 0xffffffff is a marker for kernel context */
Adrian Bunk07b81252008-07-17 21:16:27 +020057static struct mm_struct *ctx_alloc[CONTEXTS_NUM] = {
Linus Torvalds1da177e2005-04-16 15:20:36 -070058 [0] = (struct mm_struct *)0xffffffff
59};
60
61/* has this context been mmdrop'd? */
62static unsigned char ctx_avail = CONTEXTS_NUM-1;
63
64/* array of pages to be marked off for the rom when we do mem_init later */
65/* 256 pages lets the rom take up to 2mb of physical ram.. I really
66 hope it never wants mote than that. */
67unsigned long rom_pages[256];
68
69/* Print a PTE value in symbolic form. For debugging. */
70void print_pte (pte_t pte)
71{
72#if 0
73 /* Verbose version. */
74 unsigned long val = pte_val (pte);
75 printk (" pte=%lx [addr=%lx",
76 val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT);
77 if (val & SUN3_PAGE_VALID) printk (" valid");
78 if (val & SUN3_PAGE_WRITEABLE) printk (" write");
79 if (val & SUN3_PAGE_SYSTEM) printk (" sys");
80 if (val & SUN3_PAGE_NOCACHE) printk (" nocache");
81 if (val & SUN3_PAGE_ACCESSED) printk (" accessed");
82 if (val & SUN3_PAGE_MODIFIED) printk (" modified");
83 switch (val & SUN3_PAGE_TYPE_MASK) {
84 case SUN3_PAGE_TYPE_MEMORY: printk (" memory"); break;
85 case SUN3_PAGE_TYPE_IO: printk (" io"); break;
86 case SUN3_PAGE_TYPE_VME16: printk (" vme16"); break;
87 case SUN3_PAGE_TYPE_VME32: printk (" vme32"); break;
88 }
89 printk ("]\n");
90#else
91 /* Terse version. More likely to fit on a line. */
92 unsigned long val = pte_val (pte);
93 char flags[7], *type;
94
95 flags[0] = (val & SUN3_PAGE_VALID) ? 'v' : '-';
96 flags[1] = (val & SUN3_PAGE_WRITEABLE) ? 'w' : '-';
97 flags[2] = (val & SUN3_PAGE_SYSTEM) ? 's' : '-';
98 flags[3] = (val & SUN3_PAGE_NOCACHE) ? 'x' : '-';
99 flags[4] = (val & SUN3_PAGE_ACCESSED) ? 'a' : '-';
100 flags[5] = (val & SUN3_PAGE_MODIFIED) ? 'm' : '-';
101 flags[6] = '\0';
102
103 switch (val & SUN3_PAGE_TYPE_MASK) {
104 case SUN3_PAGE_TYPE_MEMORY: type = "memory"; break;
105 case SUN3_PAGE_TYPE_IO: type = "io" ; break;
106 case SUN3_PAGE_TYPE_VME16: type = "vme16" ; break;
107 case SUN3_PAGE_TYPE_VME32: type = "vme32" ; break;
108 default: type = "unknown?"; break;
109 }
110
111 printk (" pte=%08lx [%07lx %s %s]\n",
112 val, (val & SUN3_PAGE_PGNUM_MASK) << PAGE_SHIFT, flags, type);
113#endif
114}
115
116/* Print the PTE value for a given virtual address. For debugging. */
117void print_pte_vaddr (unsigned long vaddr)
118{
119 printk (" vaddr=%lx [%02lx]", vaddr, sun3_get_segmap (vaddr));
120 print_pte (__pte (sun3_get_pte (vaddr)));
121}
122
123/*
124 * Initialise the MMU emulator.
125 */
Geert Uytterhoevena4df02a2013-06-25 21:15:24 +0200126void __init mmu_emu_init(unsigned long bootmem_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700127{
128 unsigned long seg, num;
129 int i,j;
130
131 memset(rom_pages, 0, sizeof(rom_pages));
132 memset(pmeg_vaddr, 0, sizeof(pmeg_vaddr));
133 memset(pmeg_alloc, 0, sizeof(pmeg_alloc));
134 memset(pmeg_ctx, 0, sizeof(pmeg_ctx));
135
136 /* pmeg align the end of bootmem, adding another pmeg,
137 * later bootmem allocations will likely need it */
138 bootmem_end = (bootmem_end + (2 * SUN3_PMEG_SIZE)) & ~SUN3_PMEG_MASK;
139
140 /* mark all of the pmegs used thus far as reserved */
141 for (i=0; i < __pa(bootmem_end) / SUN3_PMEG_SIZE ; ++i)
142 pmeg_alloc[i] = 2;
143
144
145 /* I'm thinking that most of the top pmeg's are going to be
146 used for something, and we probably shouldn't risk it */
147 for(num = 0xf0; num <= 0xff; num++)
148 pmeg_alloc[num] = 2;
149
150 /* liberate all existing mappings in the rest of kernel space */
151 for(seg = bootmem_end; seg < 0x0f800000; seg += SUN3_PMEG_SIZE) {
152 i = sun3_get_segmap(seg);
153
154 if(!pmeg_alloc[i]) {
155#ifdef DEBUG_MMU_EMU
156 printk("freed: ");
157 print_pte_vaddr (seg);
158#endif
159 sun3_put_segmap(seg, SUN3_INVALID_PMEG);
160 }
161 }
162
163 j = 0;
164 for (num=0, seg=0x0F800000; seg<0x10000000; seg+=16*PAGE_SIZE) {
165 if (sun3_get_segmap (seg) != SUN3_INVALID_PMEG) {
166#ifdef DEBUG_PROM_MAPS
167 for(i = 0; i < 16; i++) {
168 printk ("mapped:");
169 print_pte_vaddr (seg + (i*PAGE_SIZE));
170 break;
171 }
172#endif
173 // the lowest mapping here is the end of our
174 // vmalloc region
Tejun Heo51e99be2009-12-09 17:43:19 +0900175 if (!m68k_vmalloc_end)
176 m68k_vmalloc_end = seg;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177
178 // mark the segmap alloc'd, and reserve any
179 // of the first 0xbff pages the hardware is
180 // already using... does any sun3 support > 24mb?
181 pmeg_alloc[sun3_get_segmap(seg)] = 2;
182 }
183 }
184
185 dvma_init();
186
187
188 /* blank everything below the kernel, and we've got the base
189 mapping to start all the contexts off with... */
190 for(seg = 0; seg < PAGE_OFFSET; seg += SUN3_PMEG_SIZE)
191 sun3_put_segmap(seg, SUN3_INVALID_PMEG);
192
193 set_fs(MAKE_MM_SEG(3));
194 for(seg = 0; seg < 0x10000000; seg += SUN3_PMEG_SIZE) {
195 i = sun3_get_segmap(seg);
196 for(j = 1; j < CONTEXTS_NUM; j++)
197 (*(romvec->pv_setctxt))(j, (void *)seg, i);
198 }
199 set_fs(KERNEL_DS);
200
201}
202
203/* erase the mappings for a dead context. Uses the pg_dir for hints
204 as the pmeg tables proved somewhat unreliable, and unmapping all of
205 TASK_SIZE was much slower and no more stable. */
206/* todo: find a better way to keep track of the pmegs used by a
207 context for when they're cleared */
208void clear_context(unsigned long context)
209{
210 unsigned char oldctx;
211 unsigned long i;
212
213 if(context) {
214 if(!ctx_alloc[context])
215 panic("clear_context: context not allocated\n");
216
217 ctx_alloc[context]->context = SUN3_INVALID_CONTEXT;
218 ctx_alloc[context] = (struct mm_struct *)0;
219 ctx_avail++;
220 }
221
222 oldctx = sun3_get_context();
223
224 sun3_put_context(context);
225
226 for(i = 0; i < SUN3_INVALID_PMEG; i++) {
227 if((pmeg_ctx[i] == context) && (pmeg_alloc[i] == 1)) {
228 sun3_put_segmap(pmeg_vaddr[i], SUN3_INVALID_PMEG);
229 pmeg_ctx[i] = 0;
230 pmeg_alloc[i] = 0;
231 pmeg_vaddr[i] = 0;
232 }
233 }
234
235 sun3_put_context(oldctx);
236}
237
238/* gets an empty context. if full, kills the next context listed to
239 die first */
240/* This context invalidation scheme is, well, totally arbitrary, I'm
Simon Arlott0c79cf62007-10-20 01:20:32 +0200241 sure it could be much more intelligent... but it gets the job done
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 for now without much overhead in making it's decision. */
243/* todo: come up with optimized scheme for flushing contexts */
244unsigned long get_free_context(struct mm_struct *mm)
245{
246 unsigned long new = 1;
247 static unsigned char next_to_die = 1;
248
249 if(!ctx_avail) {
250 /* kill someone to get our context */
251 new = next_to_die;
252 clear_context(new);
253 next_to_die = (next_to_die + 1) & 0x7;
254 if(!next_to_die)
255 next_to_die++;
256 } else {
257 while(new < CONTEXTS_NUM) {
258 if(ctx_alloc[new])
259 new++;
260 else
261 break;
262 }
263 // check to make sure one was really free...
264 if(new == CONTEXTS_NUM)
265 panic("get_free_context: failed to find free context");
266 }
267
268 ctx_alloc[new] = mm;
269 ctx_avail--;
270
271 return new;
272}
273
274/*
275 * Dynamically select a `spare' PMEG and use it to map virtual `vaddr' in
276 * `context'. Maintain internal PMEG management structures. This doesn't
277 * actually map the physical address, but does clear the old mappings.
278 */
279//todo: better allocation scheme? but is extra complexity worthwhile?
280//todo: only clear old entries if necessary? how to tell?
281
282inline void mmu_emu_map_pmeg (int context, int vaddr)
283{
284 static unsigned char curr_pmeg = 128;
285 int i;
286
287 /* Round address to PMEG boundary. */
288 vaddr &= ~SUN3_PMEG_MASK;
289
290 /* Find a spare one. */
291 while (pmeg_alloc[curr_pmeg] == 2)
292 ++curr_pmeg;
293
294
295#ifdef DEBUG_MMU_EMU
296printk("mmu_emu_map_pmeg: pmeg %x to context %d vaddr %x\n",
297 curr_pmeg, context, vaddr);
298#endif
299
300 /* Invalidate old mapping for the pmeg, if any */
301 if (pmeg_alloc[curr_pmeg] == 1) {
302 sun3_put_context(pmeg_ctx[curr_pmeg]);
303 sun3_put_segmap (pmeg_vaddr[curr_pmeg], SUN3_INVALID_PMEG);
304 sun3_put_context(context);
305 }
306
307 /* Update PMEG management structures. */
308 // don't take pmeg's away from the kernel...
309 if(vaddr >= PAGE_OFFSET) {
310 /* map kernel pmegs into all contexts */
311 unsigned char i;
312
313 for(i = 0; i < CONTEXTS_NUM; i++) {
314 sun3_put_context(i);
315 sun3_put_segmap (vaddr, curr_pmeg);
316 }
317 sun3_put_context(context);
318 pmeg_alloc[curr_pmeg] = 2;
319 pmeg_ctx[curr_pmeg] = 0;
320
321 }
322 else {
323 pmeg_alloc[curr_pmeg] = 1;
324 pmeg_ctx[curr_pmeg] = context;
325 sun3_put_segmap (vaddr, curr_pmeg);
326
327 }
328 pmeg_vaddr[curr_pmeg] = vaddr;
329
330 /* Set hardware mapping and clear the old PTE entries. */
331 for (i=0; i<SUN3_PMEG_SIZE; i+=SUN3_PTE_SIZE)
332 sun3_put_pte (vaddr + i, SUN3_PAGE_SYSTEM);
333
334 /* Consider a different one next time. */
335 ++curr_pmeg;
336}
337
338/*
339 * Handle a pagefault at virtual address `vaddr'; check if there should be a
340 * page there (specifically, whether the software pagetables indicate that
341 * there is). This is necessary due to the limited size of the second-level
342 * Sun3 hardware pagetables (256 groups of 16 pages). If there should be a
343 * mapping present, we select a `spare' PMEG and use it to create a mapping.
344 * `read_flag' is nonzero for a read fault; zero for a write. Returns nonzero
345 * if we successfully handled the fault.
346 */
347//todo: should we bump minor pagefault counter? if so, here or in caller?
348//todo: possibly inline this into bus_error030 in <asm/buserror.h> ?
349
350// kernel_fault is set when a kernel page couldn't be demand mapped,
351// and forces another try using the kernel page table. basically a
352// hack so that vmalloc would work correctly.
353
354int mmu_emu_handle_fault (unsigned long vaddr, int read_flag, int kernel_fault)
355{
356 unsigned long segment, offset;
357 unsigned char context;
358 pte_t *pte;
359 pgd_t * crp;
360
361 if(current->mm == NULL) {
362 crp = swapper_pg_dir;
363 context = 0;
364 } else {
365 context = current->mm->context;
366 if(kernel_fault)
367 crp = swapper_pg_dir;
368 else
369 crp = current->mm->pgd;
370 }
371
372#ifdef DEBUG_MMU_EMU
373 printk ("mmu_emu_handle_fault: vaddr=%lx type=%s crp=%p\n",
374 vaddr, read_flag ? "read" : "write", crp);
375#endif
376
377 segment = (vaddr >> SUN3_PMEG_SIZE_BITS) & 0x7FF;
378 offset = (vaddr >> SUN3_PTE_SIZE_BITS) & 0xF;
379
380#ifdef DEBUG_MMU_EMU
381 printk ("mmu_emu_handle_fault: segment=%lx offset=%lx\n", segment, offset);
382#endif
383
384 pte = (pte_t *) pgd_val (*(crp + segment));
385
386//todo: next line should check for valid pmd properly.
387 if (!pte) {
388// printk ("mmu_emu_handle_fault: invalid pmd\n");
389 return 0;
390 }
391
392 pte = (pte_t *) __va ((unsigned long)(pte + offset));
393
394 /* Make sure this is a valid page */
395 if (!(pte_val (*pte) & SUN3_PAGE_VALID))
396 return 0;
397
398 /* Make sure there's a pmeg allocated for the page */
399 if (sun3_get_segmap (vaddr&~SUN3_PMEG_MASK) == SUN3_INVALID_PMEG)
400 mmu_emu_map_pmeg (context, vaddr);
401
402 /* Write the pte value to hardware MMU */
403 sun3_put_pte (vaddr&PAGE_MASK, pte_val (*pte));
404
405 /* Update software copy of the pte value */
406// I'm not sure this is necessary. If this is required, we ought to simply
407// copy this out when we reuse the PMEG or at some other convenient time.
408// Doing it here is fairly meaningless, anyway, as we only know about the
409// first access to a given page. --m
410 if (!read_flag) {
411 if (pte_val (*pte) & SUN3_PAGE_WRITEABLE)
412 pte_val (*pte) |= (SUN3_PAGE_ACCESSED
413 | SUN3_PAGE_MODIFIED);
414 else
415 return 0; /* Write-protect error. */
416 } else
417 pte_val (*pte) |= SUN3_PAGE_ACCESSED;
418
419#ifdef DEBUG_MMU_EMU
420 printk ("seg:%d crp:%p ->", get_fs().seg, crp);
421 print_pte_vaddr (vaddr);
422 printk ("\n");
423#endif
424
425 return 1;
426}