blob: eeaf50f5df2b7fcb909252a655b435a780adfa23 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Justin P. Mattock79add622011-04-04 14:15:29 -07006 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010013#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mm.h>
David Daneyfd062c82009-05-27 17:47:44 -070015#include <linux/hugetlb.h>
Sanjay Lalf2e36562012-11-21 18:34:10 -080016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/cpu.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020019#include <asm/cpu-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/bootinfo.h>
21#include <asm/mmu_context.h>
22#include <asm/pgtable.h>
Markos Chandrasc01905e2013-11-14 16:12:22 +000023#include <asm/tlb.h>
Ralf Baechle3d18c982011-11-28 16:11:28 +000024#include <asm/tlbmisc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025
26extern void build_tlb_refill_handler(void);
27
Ralf Baechle41c594a2006-04-05 09:45:45 +010028/* Atomicity and interruptability */
29#ifdef CONFIG_MIPS_MT_SMTC
30
31#include <asm/smtc.h>
32#include <asm/mipsmtregs.h>
33
34#define ENTER_CRITICAL(flags) \
35 { \
36 unsigned int mvpflags; \
37 local_irq_save(flags);\
38 mvpflags = dvpe()
39#define EXIT_CRITICAL(flags) \
40 evpe(mvpflags); \
41 local_irq_restore(flags); \
42 }
43#else
44
45#define ENTER_CRITICAL(flags) local_irq_save(flags)
46#define EXIT_CRITICAL(flags) local_irq_restore(flags)
47
48#endif /* CONFIG_MIPS_MT_SMTC */
49
Fuxin Zhang2a21c732007-06-06 14:52:43 +080050/*
Huacai Chenc579d312014-03-21 18:44:00 +080051 * LOONGSON2/3 has a 4 entry itlb which is a subset of dtlb,
52 * unfortunately, itlb is not totally transparent to software.
Fuxin Zhang2a21c732007-06-06 14:52:43 +080053 */
Ralf Baechle14bd8c02013-09-25 18:21:26 +020054static inline void flush_itlb(void)
55{
56 switch (current_cpu_type()) {
57 case CPU_LOONGSON2:
Huacai Chenc579d312014-03-21 18:44:00 +080058 case CPU_LOONGSON3:
Ralf Baechle14bd8c02013-09-25 18:21:26 +020059 write_c0_diag(4);
60 break;
61 default:
62 break;
63 }
64}
Fuxin Zhang2a21c732007-06-06 14:52:43 +080065
Ralf Baechle14bd8c02013-09-25 18:21:26 +020066static inline void flush_itlb_vm(struct vm_area_struct *vma)
67{
68 if (vma->vm_flags & VM_EXEC)
69 flush_itlb();
70}
Fuxin Zhang2a21c732007-06-06 14:52:43 +080071
Linus Torvalds1da177e2005-04-16 15:20:36 -070072void local_flush_tlb_all(void)
73{
74 unsigned long flags;
75 unsigned long old_ctx;
Leonid Yegoshin75b5b5e2013-11-14 16:12:31 +000076 int entry, ftlbhighset;
Linus Torvalds1da177e2005-04-16 15:20:36 -070077
Ralf Baechle41c594a2006-04-05 09:45:45 +010078 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070079 /* Save old context and create impossible VPN2 value */
80 old_ctx = read_c0_entryhi();
81 write_c0_entrylo0(0);
82 write_c0_entrylo1(0);
83
84 entry = read_c0_wired();
85
86 /* Blast 'em all away. */
Leonid Yegoshin75b5b5e2013-11-14 16:12:31 +000087 if (cpu_has_tlbinv) {
88 if (current_cpu_data.tlbsizevtlb) {
89 write_c0_index(0);
90 mtc0_tlbw_hazard();
91 tlbinvf(); /* invalidate VTLB */
92 }
93 ftlbhighset = current_cpu_data.tlbsizevtlb +
94 current_cpu_data.tlbsizeftlbsets;
95 for (entry = current_cpu_data.tlbsizevtlb;
96 entry < ftlbhighset;
97 entry++) {
98 write_c0_index(entry);
99 mtc0_tlbw_hazard();
100 tlbinvf(); /* invalidate one FTLB set */
101 }
Leonid Yegoshin601cfa72013-11-14 16:12:30 +0000102 } else {
103 while (entry < current_cpu_data.tlbsize) {
104 /* Make sure all entries differ. */
105 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
106 write_c0_index(entry);
107 mtc0_tlbw_hazard();
108 tlb_write_indexed();
109 entry++;
110 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111 }
112 tlbw_use_hazard();
113 write_c0_entryhi(old_ctx);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200114 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100115 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700116}
Sanjay Lalf2e36562012-11-21 18:34:10 -0800117EXPORT_SYMBOL(local_flush_tlb_all);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700118
Thiemo Seufer172546b2005-04-02 10:21:56 +0000119/* All entries common to a mm share an asid. To effectively flush
120 these entries, we just bump the asid. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121void local_flush_tlb_mm(struct mm_struct *mm)
122{
Thiemo Seufer172546b2005-04-02 10:21:56 +0000123 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700124
Thiemo Seufer172546b2005-04-02 10:21:56 +0000125 preempt_disable();
126
127 cpu = smp_processor_id();
128
129 if (cpu_context(cpu, mm) != 0) {
130 drop_mmu_context(mm, cpu);
131 }
132
133 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700134}
135
136void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
137 unsigned long end)
138{
139 struct mm_struct *mm = vma->vm_mm;
140 int cpu = smp_processor_id();
141
142 if (cpu_context(cpu, mm) != 0) {
Greg Ungerera5e696e2009-05-20 16:12:32 +1000143 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700144
Ralf Baechle41c594a2006-04-05 09:45:45 +0100145 ENTER_CRITICAL(flags);
David Daneyac53c4f2012-12-03 12:44:26 -0800146 start = round_down(start, PAGE_SIZE << 1);
147 end = round_up(end, PAGE_SIZE << 1);
148 size = (end - start) >> (PAGE_SHIFT + 1);
Leonid Yegoshin75b5b5e2013-11-14 16:12:31 +0000149 if (size <= (current_cpu_data.tlbsizeftlbsets ?
150 current_cpu_data.tlbsize / 8 :
151 current_cpu_data.tlbsize / 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700152 int oldpid = read_c0_entryhi();
153 int newpid = cpu_asid(cpu, mm);
154
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 while (start < end) {
156 int idx;
157
158 write_c0_entryhi(start | newpid);
David Daneyac53c4f2012-12-03 12:44:26 -0800159 start += (PAGE_SIZE << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700160 mtc0_tlbw_hazard();
161 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200162 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700163 idx = read_c0_index();
164 write_c0_entrylo0(0);
165 write_c0_entrylo1(0);
166 if (idx < 0)
167 continue;
168 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000169 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700170 mtc0_tlbw_hazard();
171 tlb_write_indexed();
172 }
173 tlbw_use_hazard();
174 write_c0_entryhi(oldpid);
175 } else {
176 drop_mmu_context(mm, cpu);
177 }
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200178 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100179 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700180 }
181}
182
183void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
184{
Greg Ungerera5e696e2009-05-20 16:12:32 +1000185 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186
Ralf Baechle41c594a2006-04-05 09:45:45 +0100187 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700188 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
189 size = (size + 1) >> 1;
Leonid Yegoshin75b5b5e2013-11-14 16:12:31 +0000190 if (size <= (current_cpu_data.tlbsizeftlbsets ?
191 current_cpu_data.tlbsize / 8 :
192 current_cpu_data.tlbsize / 2)) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700193 int pid = read_c0_entryhi();
194
195 start &= (PAGE_MASK << 1);
196 end += ((PAGE_SIZE << 1) - 1);
197 end &= (PAGE_MASK << 1);
198
199 while (start < end) {
200 int idx;
201
202 write_c0_entryhi(start);
203 start += (PAGE_SIZE << 1);
204 mtc0_tlbw_hazard();
205 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200206 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207 idx = read_c0_index();
208 write_c0_entrylo0(0);
209 write_c0_entrylo1(0);
210 if (idx < 0)
211 continue;
212 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000213 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700214 mtc0_tlbw_hazard();
215 tlb_write_indexed();
216 }
217 tlbw_use_hazard();
218 write_c0_entryhi(pid);
219 } else {
220 local_flush_tlb_all();
221 }
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200222 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100223 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700224}
225
226void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
227{
228 int cpu = smp_processor_id();
229
230 if (cpu_context(cpu, vma->vm_mm) != 0) {
231 unsigned long flags;
232 int oldpid, newpid, idx;
233
234 newpid = cpu_asid(cpu, vma->vm_mm);
235 page &= (PAGE_MASK << 1);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100236 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700237 oldpid = read_c0_entryhi();
238 write_c0_entryhi(page | newpid);
239 mtc0_tlbw_hazard();
240 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200241 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700242 idx = read_c0_index();
243 write_c0_entrylo0(0);
244 write_c0_entrylo1(0);
245 if (idx < 0)
246 goto finish;
247 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000248 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700249 mtc0_tlbw_hazard();
250 tlb_write_indexed();
251 tlbw_use_hazard();
252
253 finish:
254 write_c0_entryhi(oldpid);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200255 flush_itlb_vm(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100256 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700257 }
258}
259
260/*
261 * This one is only used for pages with the global bit set so we don't care
262 * much about the ASID.
263 */
264void local_flush_tlb_one(unsigned long page)
265{
266 unsigned long flags;
267 int oldpid, idx;
268
Ralf Baechle41c594a2006-04-05 09:45:45 +0100269 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700270 oldpid = read_c0_entryhi();
Thiemo Seufer172546b2005-04-02 10:21:56 +0000271 page &= (PAGE_MASK << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272 write_c0_entryhi(page);
273 mtc0_tlbw_hazard();
274 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200275 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700276 idx = read_c0_index();
277 write_c0_entrylo0(0);
278 write_c0_entrylo1(0);
279 if (idx >= 0) {
280 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000281 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700282 mtc0_tlbw_hazard();
283 tlb_write_indexed();
284 tlbw_use_hazard();
285 }
286 write_c0_entryhi(oldpid);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200287 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100288 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700289}
290
291/*
292 * We will need multiple versions of update_mmu_cache(), one that just
293 * updates the TLB with the new pte(s), and another which also checks
294 * for the R4k "end of page" hardware bug and does the needy.
295 */
296void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
297{
298 unsigned long flags;
299 pgd_t *pgdp;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000300 pud_t *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700301 pmd_t *pmdp;
302 pte_t *ptep;
303 int idx, pid;
304
305 /*
306 * Handle debugger faulting in for debugee.
307 */
308 if (current->active_mm != vma->vm_mm)
309 return;
310
Ralf Baechle41c594a2006-04-05 09:45:45 +0100311 ENTER_CRITICAL(flags);
Thiemo Seufer172546b2005-04-02 10:21:56 +0000312
David Daney48c4ac92013-05-13 13:56:44 -0700313 pid = read_c0_entryhi() & ASID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700314 address &= (PAGE_MASK << 1);
315 write_c0_entryhi(address | pid);
316 pgdp = pgd_offset(vma->vm_mm, address);
317 mtc0_tlbw_hazard();
318 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200319 tlb_probe_hazard();
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000320 pudp = pud_offset(pgdp, address);
321 pmdp = pmd_offset(pudp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700322 idx = read_c0_index();
David Daneyaa1762f2012-10-17 00:48:10 +0200323#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700324 /* this could be a huge page */
325 if (pmd_huge(*pmdp)) {
326 unsigned long lo;
327 write_c0_pagemask(PM_HUGE_MASK);
328 ptep = (pte_t *)pmdp;
David Daney6dd93442010-02-10 15:12:47 -0800329 lo = pte_to_entrylo(pte_val(*ptep));
David Daneyfd062c82009-05-27 17:47:44 -0700330 write_c0_entrylo0(lo);
331 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
332
333 mtc0_tlbw_hazard();
334 if (idx < 0)
335 tlb_write_random();
336 else
337 tlb_write_indexed();
Ralf Baechlefb944c92012-10-17 01:01:21 +0200338 tlbw_use_hazard();
David Daneyfd062c82009-05-27 17:47:44 -0700339 write_c0_pagemask(PM_DEFAULT_MASK);
340 } else
341#endif
342 {
343 ptep = pte_offset_map(pmdp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700344
Chris Dearman962f4802007-09-19 00:46:32 +0100345#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
David Daneyfd062c82009-05-27 17:47:44 -0700346 write_c0_entrylo0(ptep->pte_high);
347 ptep++;
348 write_c0_entrylo1(ptep->pte_high);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349#else
David Daney6dd93442010-02-10 15:12:47 -0800350 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
351 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352#endif
David Daneyfd062c82009-05-27 17:47:44 -0700353 mtc0_tlbw_hazard();
354 if (idx < 0)
355 tlb_write_random();
356 else
357 tlb_write_indexed();
358 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700359 tlbw_use_hazard();
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200360 flush_itlb_vm(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100361 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362}
363
Manuel Lauss694b8c32011-08-02 19:51:08 +0200364void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
365 unsigned long entryhi, unsigned long pagemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700366{
367 unsigned long flags;
368 unsigned long wired;
369 unsigned long old_pagemask;
370 unsigned long old_ctx;
371
Ralf Baechle41c594a2006-04-05 09:45:45 +0100372 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 /* Save old context and create impossible VPN2 value */
374 old_ctx = read_c0_entryhi();
375 old_pagemask = read_c0_pagemask();
376 wired = read_c0_wired();
377 write_c0_wired(wired + 1);
378 write_c0_index(wired);
Ralf Baechle432bef22006-09-08 04:16:21 +0200379 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700380 write_c0_pagemask(pagemask);
381 write_c0_entryhi(entryhi);
382 write_c0_entrylo0(entrylo0);
383 write_c0_entrylo1(entrylo1);
384 mtc0_tlbw_hazard();
385 tlb_write_indexed();
386 tlbw_use_hazard();
387
388 write_c0_entryhi(old_ctx);
Ralf Baechle432bef22006-09-08 04:16:21 +0200389 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 write_c0_pagemask(old_pagemask);
391 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100392 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700393}
394
Ralf Baechle970d0322012-10-18 13:54:15 +0200395#ifdef CONFIG_TRANSPARENT_HUGEPAGE
396
397int __init has_transparent_hugepage(void)
398{
399 unsigned int mask;
400 unsigned long flags;
401
402 ENTER_CRITICAL(flags);
403 write_c0_pagemask(PM_HUGE_MASK);
404 back_to_back_c0_hazard();
405 mask = read_c0_pagemask();
406 write_c0_pagemask(PM_DEFAULT_MASK);
407
408 EXIT_CRITICAL(flags);
409
410 return mask == PM_HUGE_MASK;
411}
412
413#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
414
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000415static int ntlb;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100416static int __init set_ntlb(char *str)
417{
418 get_option(&str, &ntlb);
419 return 1;
420}
421
422__setup("ntlb=", set_ntlb);
423
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000424void tlb_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700425{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700426 /*
427 * You should never change this register:
428 * - On R4600 1.7 the tlbp never hits for pages smaller than
429 * the value in the c0_pagemask register.
430 * - The entire mm handling assumes the c0_pagemask register to
Thiemo Seufera7c29962008-02-29 00:43:47 +0000431 * be set to fixed-size pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700432 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700433 write_c0_pagemask(PM_DEFAULT_MASK);
434 write_c0_wired(0);
Ralf Baechlecde15b52009-01-06 23:07:20 +0000435 if (current_cpu_type() == CPU_R10000 ||
436 current_cpu_type() == CPU_R12000 ||
437 current_cpu_type() == CPU_R14000)
438 write_c0_framemask(0);
David Daney6dd93442010-02-10 15:12:47 -0800439
Steven J. Hill05857c62012-09-13 16:51:46 -0500440 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800441 /*
442 * Enable the no read, no exec bits, and enable large virtual
443 * address.
444 */
445 u32 pg = PG_RIE | PG_XIE;
446#ifdef CONFIG_64BIT
447 pg |= PG_ELPA;
448#endif
449 write_c0_pagegrain(pg);
450 }
451
Ralf Baechle70342282013-01-22 12:59:30 +0100452 /* From this point on the ARC firmware is dead. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700453 local_flush_tlb_all();
454
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000455 /* Did I tell you that ARC SUCKS? */
456
Ralf Baechle41c594a2006-04-05 09:45:45 +0100457 if (ntlb) {
458 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
459 int wired = current_cpu_data.tlbsize - ntlb;
460 write_c0_wired(wired);
461 write_c0_index(wired-1);
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100462 printk("Restricting TLB to %d entries\n", ntlb);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100463 } else
464 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
465 }
466
Linus Torvalds1da177e2005-04-16 15:20:36 -0700467 build_tlb_refill_handler();
468}