blob: f60fe513eb60ca91e91fe3956d0920eb79b75a57 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Copyright (C) 1996 David S. Miller (dm@engr.sgi.com)
7 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/sched.h>
13#include <linux/mm.h>
David Daneyfd062c82009-05-27 17:47:44 -070014#include <linux/hugetlb.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070015
16#include <asm/cpu.h>
17#include <asm/bootinfo.h>
18#include <asm/mmu_context.h>
19#include <asm/pgtable.h>
20#include <asm/system.h>
21
22extern void build_tlb_refill_handler(void);
23
Thiemo Seufer172546b2005-04-02 10:21:56 +000024/*
25 * Make sure all entries differ. If they're not different
26 * MIPS32 will take revenge ...
27 */
28#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
29
Ralf Baechle41c594a2006-04-05 09:45:45 +010030/* Atomicity and interruptability */
31#ifdef CONFIG_MIPS_MT_SMTC
32
33#include <asm/smtc.h>
34#include <asm/mipsmtregs.h>
35
36#define ENTER_CRITICAL(flags) \
37 { \
38 unsigned int mvpflags; \
39 local_irq_save(flags);\
40 mvpflags = dvpe()
41#define EXIT_CRITICAL(flags) \
42 evpe(mvpflags); \
43 local_irq_restore(flags); \
44 }
45#else
46
47#define ENTER_CRITICAL(flags) local_irq_save(flags)
48#define EXIT_CRITICAL(flags) local_irq_restore(flags)
49
50#endif /* CONFIG_MIPS_MT_SMTC */
51
Fuxin Zhang2a21c732007-06-06 14:52:43 +080052#if defined(CONFIG_CPU_LOONGSON2)
53/*
54 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
55 * unfortrunately, itlb is not totally transparent to software.
56 */
57#define FLUSH_ITLB write_c0_diag(4);
58
59#define FLUSH_ITLB_VM(vma) { if ((vma)->vm_flags & VM_EXEC) write_c0_diag(4); }
60
61#else
62
63#define FLUSH_ITLB
64#define FLUSH_ITLB_VM(vma)
65
66#endif
67
Linus Torvalds1da177e2005-04-16 15:20:36 -070068void local_flush_tlb_all(void)
69{
70 unsigned long flags;
71 unsigned long old_ctx;
72 int entry;
73
Ralf Baechle41c594a2006-04-05 09:45:45 +010074 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070075 /* Save old context and create impossible VPN2 value */
76 old_ctx = read_c0_entryhi();
77 write_c0_entrylo0(0);
78 write_c0_entrylo1(0);
79
80 entry = read_c0_wired();
81
82 /* Blast 'em all away. */
83 while (entry < current_cpu_data.tlbsize) {
Thiemo Seufer172546b2005-04-02 10:21:56 +000084 /* Make sure all entries differ. */
85 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070086 write_c0_index(entry);
87 mtc0_tlbw_hazard();
88 tlb_write_indexed();
89 entry++;
90 }
91 tlbw_use_hazard();
92 write_c0_entryhi(old_ctx);
Fuxin Zhang2a21c732007-06-06 14:52:43 +080093 FLUSH_ITLB;
Ralf Baechle41c594a2006-04-05 09:45:45 +010094 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070095}
96
Thiemo Seufer172546b2005-04-02 10:21:56 +000097/* All entries common to a mm share an asid. To effectively flush
98 these entries, we just bump the asid. */
Linus Torvalds1da177e2005-04-16 15:20:36 -070099void local_flush_tlb_mm(struct mm_struct *mm)
100{
Thiemo Seufer172546b2005-04-02 10:21:56 +0000101 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700102
Thiemo Seufer172546b2005-04-02 10:21:56 +0000103 preempt_disable();
104
105 cpu = smp_processor_id();
106
107 if (cpu_context(cpu, mm) != 0) {
108 drop_mmu_context(mm, cpu);
109 }
110
111 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700112}
113
114void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
115 unsigned long end)
116{
117 struct mm_struct *mm = vma->vm_mm;
118 int cpu = smp_processor_id();
119
120 if (cpu_context(cpu, mm) != 0) {
Greg Ungerera5e696e2009-05-20 16:12:32 +1000121 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700122
Ralf Baechle41c594a2006-04-05 09:45:45 +0100123 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
125 size = (size + 1) >> 1;
126 if (size <= current_cpu_data.tlbsize/2) {
127 int oldpid = read_c0_entryhi();
128 int newpid = cpu_asid(cpu, mm);
129
130 start &= (PAGE_MASK << 1);
131 end += ((PAGE_SIZE << 1) - 1);
132 end &= (PAGE_MASK << 1);
133 while (start < end) {
134 int idx;
135
136 write_c0_entryhi(start | newpid);
137 start += (PAGE_SIZE << 1);
138 mtc0_tlbw_hazard();
139 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200140 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700141 idx = read_c0_index();
142 write_c0_entrylo0(0);
143 write_c0_entrylo1(0);
144 if (idx < 0)
145 continue;
146 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000147 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 mtc0_tlbw_hazard();
149 tlb_write_indexed();
150 }
151 tlbw_use_hazard();
152 write_c0_entryhi(oldpid);
153 } else {
154 drop_mmu_context(mm, cpu);
155 }
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800156 FLUSH_ITLB;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100157 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700158 }
159}
160
161void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
162{
Greg Ungerera5e696e2009-05-20 16:12:32 +1000163 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164
Ralf Baechle41c594a2006-04-05 09:45:45 +0100165 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700166 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
167 size = (size + 1) >> 1;
168 if (size <= current_cpu_data.tlbsize / 2) {
169 int pid = read_c0_entryhi();
170
171 start &= (PAGE_MASK << 1);
172 end += ((PAGE_SIZE << 1) - 1);
173 end &= (PAGE_MASK << 1);
174
175 while (start < end) {
176 int idx;
177
178 write_c0_entryhi(start);
179 start += (PAGE_SIZE << 1);
180 mtc0_tlbw_hazard();
181 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200182 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700183 idx = read_c0_index();
184 write_c0_entrylo0(0);
185 write_c0_entrylo1(0);
186 if (idx < 0)
187 continue;
188 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000189 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 mtc0_tlbw_hazard();
191 tlb_write_indexed();
192 }
193 tlbw_use_hazard();
194 write_c0_entryhi(pid);
195 } else {
196 local_flush_tlb_all();
197 }
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800198 FLUSH_ITLB;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100199 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700200}
201
202void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
203{
204 int cpu = smp_processor_id();
205
206 if (cpu_context(cpu, vma->vm_mm) != 0) {
207 unsigned long flags;
208 int oldpid, newpid, idx;
209
210 newpid = cpu_asid(cpu, vma->vm_mm);
211 page &= (PAGE_MASK << 1);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100212 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700213 oldpid = read_c0_entryhi();
214 write_c0_entryhi(page | newpid);
215 mtc0_tlbw_hazard();
216 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200217 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700218 idx = read_c0_index();
219 write_c0_entrylo0(0);
220 write_c0_entrylo1(0);
221 if (idx < 0)
222 goto finish;
223 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000224 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 mtc0_tlbw_hazard();
226 tlb_write_indexed();
227 tlbw_use_hazard();
228
229 finish:
230 write_c0_entryhi(oldpid);
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800231 FLUSH_ITLB_VM(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100232 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700233 }
234}
235
236/*
237 * This one is only used for pages with the global bit set so we don't care
238 * much about the ASID.
239 */
240void local_flush_tlb_one(unsigned long page)
241{
242 unsigned long flags;
243 int oldpid, idx;
244
Ralf Baechle41c594a2006-04-05 09:45:45 +0100245 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700246 oldpid = read_c0_entryhi();
Thiemo Seufer172546b2005-04-02 10:21:56 +0000247 page &= (PAGE_MASK << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700248 write_c0_entryhi(page);
249 mtc0_tlbw_hazard();
250 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200251 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700252 idx = read_c0_index();
253 write_c0_entrylo0(0);
254 write_c0_entrylo1(0);
255 if (idx >= 0) {
256 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000257 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700258 mtc0_tlbw_hazard();
259 tlb_write_indexed();
260 tlbw_use_hazard();
261 }
262 write_c0_entryhi(oldpid);
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800263 FLUSH_ITLB;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100264 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265}
266
267/*
268 * We will need multiple versions of update_mmu_cache(), one that just
269 * updates the TLB with the new pte(s), and another which also checks
270 * for the R4k "end of page" hardware bug and does the needy.
271 */
272void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
273{
274 unsigned long flags;
275 pgd_t *pgdp;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000276 pud_t *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700277 pmd_t *pmdp;
278 pte_t *ptep;
279 int idx, pid;
280
281 /*
282 * Handle debugger faulting in for debugee.
283 */
284 if (current->active_mm != vma->vm_mm)
285 return;
286
Ralf Baechle41c594a2006-04-05 09:45:45 +0100287 ENTER_CRITICAL(flags);
Thiemo Seufer172546b2005-04-02 10:21:56 +0000288
289 pid = read_c0_entryhi() & ASID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700290 address &= (PAGE_MASK << 1);
291 write_c0_entryhi(address | pid);
292 pgdp = pgd_offset(vma->vm_mm, address);
293 mtc0_tlbw_hazard();
294 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200295 tlb_probe_hazard();
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000296 pudp = pud_offset(pgdp, address);
297 pmdp = pmd_offset(pudp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 idx = read_c0_index();
David Daneyfd062c82009-05-27 17:47:44 -0700299#ifdef CONFIG_HUGETLB_PAGE
300 /* this could be a huge page */
301 if (pmd_huge(*pmdp)) {
302 unsigned long lo;
303 write_c0_pagemask(PM_HUGE_MASK);
304 ptep = (pte_t *)pmdp;
305 lo = pte_val(*ptep) >> 6;
306 write_c0_entrylo0(lo);
307 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
308
309 mtc0_tlbw_hazard();
310 if (idx < 0)
311 tlb_write_random();
312 else
313 tlb_write_indexed();
314 write_c0_pagemask(PM_DEFAULT_MASK);
315 } else
316#endif
317 {
318 ptep = pte_offset_map(pmdp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700319
Chris Dearman962f4802007-09-19 00:46:32 +0100320#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
David Daneyfd062c82009-05-27 17:47:44 -0700321 write_c0_entrylo0(ptep->pte_high);
322 ptep++;
323 write_c0_entrylo1(ptep->pte_high);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324#else
David Daneyfd062c82009-05-27 17:47:44 -0700325 write_c0_entrylo0(pte_val(*ptep++) >> 6);
326 write_c0_entrylo1(pte_val(*ptep) >> 6);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327#endif
David Daneyfd062c82009-05-27 17:47:44 -0700328 mtc0_tlbw_hazard();
329 if (idx < 0)
330 tlb_write_random();
331 else
332 tlb_write_indexed();
333 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 tlbw_use_hazard();
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800335 FLUSH_ITLB_VM(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100336 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700337}
338
339#if 0
340static void r4k_update_mmu_cache_hwbug(struct vm_area_struct * vma,
341 unsigned long address, pte_t pte)
342{
343 unsigned long flags;
344 unsigned int asid;
345 pgd_t *pgdp;
346 pmd_t *pmdp;
347 pte_t *ptep;
348 int idx;
349
Ralf Baechle41c594a2006-04-05 09:45:45 +0100350 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 address &= (PAGE_MASK << 1);
352 asid = read_c0_entryhi() & ASID_MASK;
353 write_c0_entryhi(address | asid);
354 pgdp = pgd_offset(vma->vm_mm, address);
355 mtc0_tlbw_hazard();
356 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200357 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700358 pmdp = pmd_offset(pgdp, address);
359 idx = read_c0_index();
360 ptep = pte_offset_map(pmdp, address);
361 write_c0_entrylo0(pte_val(*ptep++) >> 6);
362 write_c0_entrylo1(pte_val(*ptep) >> 6);
363 mtc0_tlbw_hazard();
364 if (idx < 0)
365 tlb_write_random();
366 else
367 tlb_write_indexed();
368 tlbw_use_hazard();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100369 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370}
371#endif
372
373void __init add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
374 unsigned long entryhi, unsigned long pagemask)
375{
376 unsigned long flags;
377 unsigned long wired;
378 unsigned long old_pagemask;
379 unsigned long old_ctx;
380
Ralf Baechle41c594a2006-04-05 09:45:45 +0100381 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700382 /* Save old context and create impossible VPN2 value */
383 old_ctx = read_c0_entryhi();
384 old_pagemask = read_c0_pagemask();
385 wired = read_c0_wired();
386 write_c0_wired(wired + 1);
387 write_c0_index(wired);
Ralf Baechle432bef22006-09-08 04:16:21 +0200388 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 write_c0_pagemask(pagemask);
390 write_c0_entryhi(entryhi);
391 write_c0_entrylo0(entrylo0);
392 write_c0_entrylo1(entrylo1);
393 mtc0_tlbw_hazard();
394 tlb_write_indexed();
395 tlbw_use_hazard();
396
397 write_c0_entryhi(old_ctx);
Ralf Baechle432bef22006-09-08 04:16:21 +0200398 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 write_c0_pagemask(old_pagemask);
400 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100401 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402}
403
404/*
405 * Used for loading TLB entries before trap_init() has started, when we
406 * don't actually want to add a wired entry which remains throughout the
407 * lifetime of the system
408 */
409
Ralf Baechle234fcd12008-03-08 09:56:28 +0000410static int temp_tlb_entry __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411
412__init int add_temporary_entry(unsigned long entrylo0, unsigned long entrylo1,
413 unsigned long entryhi, unsigned long pagemask)
414{
415 int ret = 0;
416 unsigned long flags;
417 unsigned long wired;
418 unsigned long old_pagemask;
419 unsigned long old_ctx;
420
Ralf Baechle41c594a2006-04-05 09:45:45 +0100421 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700422 /* Save old context and create impossible VPN2 value */
423 old_ctx = read_c0_entryhi();
424 old_pagemask = read_c0_pagemask();
425 wired = read_c0_wired();
426 if (--temp_tlb_entry < wired) {
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000427 printk(KERN_WARNING
428 "No TLB space left for add_temporary_entry\n");
Linus Torvalds1da177e2005-04-16 15:20:36 -0700429 ret = -ENOSPC;
430 goto out;
431 }
432
433 write_c0_index(temp_tlb_entry);
434 write_c0_pagemask(pagemask);
435 write_c0_entryhi(entryhi);
436 write_c0_entrylo0(entrylo0);
437 write_c0_entrylo1(entrylo1);
438 mtc0_tlbw_hazard();
439 tlb_write_indexed();
440 tlbw_use_hazard();
441
442 write_c0_entryhi(old_ctx);
443 write_c0_pagemask(old_pagemask);
444out:
Ralf Baechle41c594a2006-04-05 09:45:45 +0100445 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700446 return ret;
447}
448
Ralf Baechle234fcd12008-03-08 09:56:28 +0000449static void __cpuinit probe_tlb(unsigned long config)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450{
451 struct cpuinfo_mips *c = &current_cpu_data;
452 unsigned int reg;
453
454 /*
455 * If this isn't a MIPS32 / MIPS64 compliant CPU. Config 1 register
456 * is not supported, we assume R4k style. Cpu probing already figured
457 * out the number of tlb entries.
458 */
Maciej W. Rozycki30442992005-02-01 23:02:12 +0000459 if ((c->processor_id & 0xff0000) == PRID_COMP_LEGACY)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700460 return;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100461#ifdef CONFIG_MIPS_MT_SMTC
462 /*
463 * If TLB is shared in SMTC system, total size already
464 * has been calculated and written into cpu_data tlbsize
465 */
466 if((smtc_status & SMTC_TLB_SHARED) == SMTC_TLB_SHARED)
467 return;
468#endif /* CONFIG_MIPS_MT_SMTC */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700469
470 reg = read_c0_config1();
471 if (!((config >> 7) & 3))
472 panic("No TLB present");
473
474 c->tlbsize = ((reg >> 25) & 0x3f) + 1;
475}
476
Ralf Baechle234fcd12008-03-08 09:56:28 +0000477static int __cpuinitdata ntlb = 0;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100478static int __init set_ntlb(char *str)
479{
480 get_option(&str, &ntlb);
481 return 1;
482}
483
484__setup("ntlb=", set_ntlb);
485
Ralf Baechle234fcd12008-03-08 09:56:28 +0000486void __cpuinit tlb_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700487{
488 unsigned int config = read_c0_config();
489
490 /*
491 * You should never change this register:
492 * - On R4600 1.7 the tlbp never hits for pages smaller than
493 * the value in the c0_pagemask register.
494 * - The entire mm handling assumes the c0_pagemask register to
Thiemo Seufera7c29962008-02-29 00:43:47 +0000495 * be set to fixed-size pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 */
497 probe_tlb(config);
498 write_c0_pagemask(PM_DEFAULT_MASK);
499 write_c0_wired(0);
Ralf Baechlecde15b52009-01-06 23:07:20 +0000500 if (current_cpu_type() == CPU_R10000 ||
501 current_cpu_type() == CPU_R12000 ||
502 current_cpu_type() == CPU_R14000)
503 write_c0_framemask(0);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700504 temp_tlb_entry = current_cpu_data.tlbsize - 1;
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000505
506 /* From this point on the ARC firmware is dead. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700507 local_flush_tlb_all();
508
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000509 /* Did I tell you that ARC SUCKS? */
510
Ralf Baechle41c594a2006-04-05 09:45:45 +0100511 if (ntlb) {
512 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
513 int wired = current_cpu_data.tlbsize - ntlb;
514 write_c0_wired(wired);
515 write_c0_index(wired-1);
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100516 printk("Restricting TLB to %d entries\n", ntlb);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100517 } else
518 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
519 }
520
Linus Torvalds1da177e2005-04-16 15:20:36 -0700521 build_tlb_refill_handler();
522}