blob: 9dca099ba16b01b907568d9f387f5ea9d693d665 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
11#include <linux/config.h>
12#include <linux/init.h>
13#include <linux/sched.h>
14#include <linux/mm.h>
15
16#include <asm/cpu.h>
17#include <asm/bootinfo.h>
18#include <asm/mmu_context.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21
22extern void build_tlb_refill_handler(void);
23
Thiemo Seufer172546b2005-04-02 10:21:56 +000024/*
25 * Make sure all entries differ. If they're not different
26 * MIPS32 will take revenge ...
27 */
28#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030/* CP0 hazard avoidance. */
31#define BARRIER __asm__ __volatile__(".set noreorder\n\t" \
32 "nop; nop; nop; nop; nop; nop;\n\t" \
33 ".set reorder\n\t")
34
Ralf Baechle41c594a2006-04-05 09:45:45 +010035/* Atomicity and interruptability */
36#ifdef CONFIG_MIPS_MT_SMTC
37
38#include <asm/smtc.h>
39#include <asm/mipsmtregs.h>
40
41#define ENTER_CRITICAL(flags) \
42 { \
43 unsigned int mvpflags; \
44 local_irq_save(flags);\
45 mvpflags = dvpe()
46#define EXIT_CRITICAL(flags) \
47 evpe(mvpflags); \
48 local_irq_restore(flags); \
49 }
50#else
51
52#define ENTER_CRITICAL(flags) local_irq_save(flags)
53#define EXIT_CRITICAL(flags) local_irq_restore(flags)
54
55#endif /* CONFIG_MIPS_MT_SMTC */
56
Linus Torvalds1da177e2005-04-16 15:20:36 -070057void local_flush_tlb_all(void)
58{
59 unsigned long flags;
60 unsigned long old_ctx;
61 int entry;
62
Ralf Baechle41c594a2006-04-05 09:45:45 +010063 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070064 /* Save old context and create impossible VPN2 value */
65 old_ctx = read_c0_entryhi();
66 write_c0_entrylo0(0);
67 write_c0_entrylo1(0);
68
69 entry = read_c0_wired();
70
71 /* Blast 'em all away. */
72 while (entry < current_cpu_data.tlbsize) {
Thiemo Seufer172546b2005-04-02 10:21:56 +000073 /* Make sure all entries differ. */
74 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 write_c0_index(entry);
76 mtc0_tlbw_hazard();
77 tlb_write_indexed();
78 entry++;
79 }
80 tlbw_use_hazard();
81 write_c0_entryhi(old_ctx);
Ralf Baechle41c594a2006-04-05 09:45:45 +010082 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083}
84
Thiemo Seufer172546b2005-04-02 10:21:56 +000085/* All entries common to a mm share an asid. To effectively flush
86 these entries, we just bump the asid. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070087void local_flush_tlb_mm(struct mm_struct *mm)
88{
Thiemo Seufer172546b2005-04-02 10:21:56 +000089 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -070090
Thiemo Seufer172546b2005-04-02 10:21:56 +000091 preempt_disable();
92
93 cpu = smp_processor_id();
94
95 if (cpu_context(cpu, mm) != 0) {
96 drop_mmu_context(mm, cpu);
97 }
98
99 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100}
101
102void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
103 unsigned long end)
104{
105 struct mm_struct *mm = vma->vm_mm;
106 int cpu = smp_processor_id();
107
108 if (cpu_context(cpu, mm) != 0) {
109 unsigned long flags;
110 int size;
111
Ralf Baechle41c594a2006-04-05 09:45:45 +0100112 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700113 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
114 size = (size + 1) >> 1;
Thiemo Seufer172546b2005-04-02 10:21:56 +0000115 local_irq_save(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116 if (size <= current_cpu_data.tlbsize/2) {
117 int oldpid = read_c0_entryhi();
118 int newpid = cpu_asid(cpu, mm);
119
120 start &= (PAGE_MASK << 1);
121 end += ((PAGE_SIZE << 1) - 1);
122 end &= (PAGE_MASK << 1);
123 while (start < end) {
124 int idx;
125
126 write_c0_entryhi(start | newpid);
127 start += (PAGE_SIZE << 1);
128 mtc0_tlbw_hazard();
129 tlb_probe();
130 BARRIER;
131 idx = read_c0_index();
132 write_c0_entrylo0(0);
133 write_c0_entrylo1(0);
134 if (idx < 0)
135 continue;
136 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000137 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700138 mtc0_tlbw_hazard();
139 tlb_write_indexed();
140 }
141 tlbw_use_hazard();
142 write_c0_entryhi(oldpid);
143 } else {
144 drop_mmu_context(mm, cpu);
145 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100146 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700147 }
148}
149
150void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
151{
152 unsigned long flags;
153 int size;
154
Ralf Baechle41c594a2006-04-05 09:45:45 +0100155 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
157 size = (size + 1) >> 1;
158 if (size <= current_cpu_data.tlbsize / 2) {
159 int pid = read_c0_entryhi();
160
161 start &= (PAGE_MASK << 1);
162 end += ((PAGE_SIZE << 1) - 1);
163 end &= (PAGE_MASK << 1);
164
165 while (start < end) {
166 int idx;
167
168 write_c0_entryhi(start);
169 start += (PAGE_SIZE << 1);
170 mtc0_tlbw_hazard();
171 tlb_probe();
172 BARRIER;
173 idx = read_c0_index();
174 write_c0_entrylo0(0);
175 write_c0_entrylo1(0);
176 if (idx < 0)
177 continue;
178 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000179 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 mtc0_tlbw_hazard();
181 tlb_write_indexed();
182 }
183 tlbw_use_hazard();
184 write_c0_entryhi(pid);
185 } else {
186 local_flush_tlb_all();
187 }
Ralf Baechle41c594a2006-04-05 09:45:45 +0100188 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700189}
190
191void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
192{
193 int cpu = smp_processor_id();
194
195 if (cpu_context(cpu, vma->vm_mm) != 0) {
196 unsigned long flags;
197 int oldpid, newpid, idx;
198
199 newpid = cpu_asid(cpu, vma->vm_mm);
200 page &= (PAGE_MASK << 1);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100201 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700202 oldpid = read_c0_entryhi();
203 write_c0_entryhi(page | newpid);
204 mtc0_tlbw_hazard();
205 tlb_probe();
206 BARRIER;
207 idx = read_c0_index();
208 write_c0_entrylo0(0);
209 write_c0_entrylo1(0);
210 if (idx < 0)
211 goto finish;
212 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000213 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 mtc0_tlbw_hazard();
215 tlb_write_indexed();
216 tlbw_use_hazard();
217
218 finish:
219 write_c0_entryhi(oldpid);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100220 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700221 }
222}
223
224/*
225 * This one is only used for pages with the global bit set so we don't care
226 * much about the ASID.
227 */
228void local_flush_tlb_one(unsigned long page)
229{
230 unsigned long flags;
231 int oldpid, idx;
232
Ralf Baechle41c594a2006-04-05 09:45:45 +0100233 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700234 oldpid = read_c0_entryhi();
Thiemo Seufer172546b2005-04-02 10:21:56 +0000235 page &= (PAGE_MASK << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700236 write_c0_entryhi(page);
237 mtc0_tlbw_hazard();
238 tlb_probe();
239 BARRIER;
240 idx = read_c0_index();
241 write_c0_entrylo0(0);
242 write_c0_entrylo1(0);
243 if (idx >= 0) {
244 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000245 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 mtc0_tlbw_hazard();
247 tlb_write_indexed();
248 tlbw_use_hazard();
249 }
250 write_c0_entryhi(oldpid);
251
Ralf Baechle41c594a2006-04-05 09:45:45 +0100252 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253}
254
255/*
256 * We will need multiple versions of update_mmu_cache(), one that just
257 * updates the TLB with the new pte(s), and another which also checks
258 * for the R4k "end of page" hardware bug and does the needy.
259 */
260void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
261{
262 unsigned long flags;
263 pgd_t *pgdp;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000264 pud_t *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 pmd_t *pmdp;
266 pte_t *ptep;
267 int idx, pid;
268
269 /*
270 * Handle debugger faulting in for debugee.
271 */
272 if (current->active_mm != vma->vm_mm)
273 return;
274
Ralf Baechle41c594a2006-04-05 09:45:45 +0100275 ENTER_CRITICAL(flags);
Thiemo Seufer172546b2005-04-02 10:21:56 +0000276
277 pid = read_c0_entryhi() & ASID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700278 address &= (PAGE_MASK << 1);
279 write_c0_entryhi(address | pid);
280 pgdp = pgd_offset(vma->vm_mm, address);
281 mtc0_tlbw_hazard();
282 tlb_probe();
283 BARRIER;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000284 pudp = pud_offset(pgdp, address);
285 pmdp = pmd_offset(pudp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286 idx = read_c0_index();
287 ptep = pte_offset_map(pmdp, address);
288
Ralf Baechle6e760c82005-07-06 12:08:11 +0000289#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32_R1)
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000290 write_c0_entrylo0(ptep->pte_high);
291 ptep++;
292 write_c0_entrylo1(ptep->pte_high);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700293#else
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000294 write_c0_entrylo0(pte_val(*ptep++) >> 6);
295 write_c0_entrylo1(pte_val(*ptep) >> 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700296#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 mtc0_tlbw_hazard();
298 if (idx < 0)
299 tlb_write_random();
300 else
301 tlb_write_indexed();
302 tlbw_use_hazard();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100303 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700304}
305
306#if 0
307static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
308 unsigned long address, pte_t pte)
309{
310 unsigned long flags;
311 unsigned int asid;
312 pgd_t *pgdp;
313 pmd_t *pmdp;
314 pte_t *ptep;
315 int idx;
316
Ralf Baechle41c594a2006-04-05 09:45:45 +0100317 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700318 address &= (PAGE_MASK << 1);
319 asid = read_c0_entryhi() & ASID_MASK;
320 write_c0_entryhi(address | asid);
321 pgdp = pgd_offset(vma->vm_mm, address);
322 mtc0_tlbw_hazard();
323 tlb_probe();
324 BARRIER;
325 pmdp = pmd_offset(pgdp, address);
326 idx = read_c0_index();
327 ptep = pte_offset_map(pmdp, address);
328 write_c0_entrylo0(pte_val(*ptep++) >> 6);
329 write_c0_entrylo1(pte_val(*ptep) >> 6);
330 mtc0_tlbw_hazard();
331 if (idx < 0)
332 tlb_write_random();
333 else
334 tlb_write_indexed();
335 tlbw_use_hazard();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100336 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338#endif
339
340void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
341 unsigned long entryhi, unsigned long pagemask)
342{
343 unsigned long flags;
344 unsigned long wired;
345 unsigned long old_pagemask;
346 unsigned long old_ctx;
347
Ralf Baechle41c594a2006-04-05 09:45:45 +0100348 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349 /* Save old context and create impossible VPN2 value */
350 old_ctx = read_c0_entryhi();
351 old_pagemask = read_c0_pagemask();
352 wired = read_c0_wired();
353 write_c0_wired(wired + 1);
354 write_c0_index(wired);
355 BARRIER;
356 write_c0_pagemask(pagemask);
357 write_c0_entryhi(entryhi);
358 write_c0_entrylo0(entrylo0);
359 write_c0_entrylo1(entrylo1);
360 mtc0_tlbw_hazard();
361 tlb_write_indexed();
362 tlbw_use_hazard();
363
364 write_c0_entryhi(old_ctx);
365 BARRIER;
366 write_c0_pagemask(old_pagemask);
367 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100368 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700369}
370
371/*
372 * Used for loading TLB entries before trap_init() has started, when we
373 * don't actually want to add a wired entry which remains throughout the
374 * lifetime of the system
375 */
376
377static int temp_tlb_entry __initdata;
378
379__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
380 unsigned long entryhi, unsigned long pagemask)
381{
382 int ret = 0;
383 unsigned long flags;
384 unsigned long wired;
385 unsigned long old_pagemask;
386 unsigned long old_ctx;
387
Ralf Baechle41c594a2006-04-05 09:45:45 +0100388 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 /* Save old context and create impossible VPN2 value */
390 old_ctx = read_c0_entryhi();
391 old_pagemask = read_c0_pagemask();
392 wired = read_c0_wired();
393 if (--temp_tlb_entry < wired) {
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000394 printk(KERN_WARNING
395 "No TLB space left for add_temporary_entry\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700396 ret = -ENOSPC;
397 goto out;
398 }
399
400 write_c0_index(temp_tlb_entry);
401 write_c0_pagemask(pagemask);
402 write_c0_entryhi(entryhi);
403 write_c0_entrylo0(entrylo0);
404 write_c0_entrylo1(entrylo1);
405 mtc0_tlbw_hazard();
406 tlb_write_indexed();
407 tlbw_use_hazard();
408
409 write_c0_entryhi(old_ctx);
410 write_c0_pagemask(old_pagemask);
411out:
Ralf Baechle41c594a2006-04-05 09:45:45 +0100412 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413 return ret;
414}
415
Ralf Baechle41c594a2006-04-05 09:45:45 +0100416extern void __init sanitize_tlb_entries(void);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700417static void __init probe_tlb(unsigned long config)
418{
419 struct cpuinfo_mips *c = &current_cpu_data;
420 unsigned int reg;
421
422 /*
423 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
424 * is not supported, we assume R4k style. Cpu probing already figured
425 * out the number of tlb entries.
426 */
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000427 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700428 return;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100429#ifdef CONFIG_MIPS_MT_SMTC
430 /*
431 * If TLB is shared in SMTC system, total size already
432 * has been calculated and written into cpu_data tlbsize
433 */
434 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
435 return;
436#endif /* CONFIG_MIPS_MT_SMTC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 reg = read_c0_config1();
439 if (!((config >> 7) & 3))
440 panic("No TLB present");
441
442 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
443}
444
Ralf Baechle41c594a2006-04-05 09:45:45 +0100445static int __initdata ntlb = 0;
446static int __init set_ntlb(char *str)
447{
448 get_option(&str, &ntlb);
449 return 1;
450}
451
452__setup("ntlb=", set_ntlb);
453
Linus Torvalds1da177e2005-04-16 15:20:36 -0700454void __init tlb_init(void)
455{
456 unsigned int config = read_c0_config();
457
458 /*
459 * You should never change this register:
460 * - On R4600 1.7 the tlbp never hits for pages smaller than
461 * the value in the c0_pagemask register.
462 * - The entire mm handling assumes the c0_pagemask register to
463 * be set for 4kb pages.
464 */
465 probe_tlb(config);
466 write_c0_pagemask(PM_DEFAULT_MASK);
467 write_c0_wired(0);
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000468 write_c0_framemask(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469 temp_tlb_entry = current_cpu_data.tlbsize - 1;
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000470
471 /* From this point on the ARC firmware is dead. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700472 local_flush_tlb_all();
473
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000474 /* Did I tell you that ARC SUCKS? */
475
Ralf Baechle41c594a2006-04-05 09:45:45 +0100476 if (ntlb) {
477 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
478 int wired = current_cpu_data.tlbsize - ntlb;
479 write_c0_wired(wired);
480 write_c0_index(wired-1);
481 printk ("Restricting TLB to %d entries\n", ntlb);
482 } else
483 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
484 }
485
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 build_tlb_refill_handler();
487}