blob: da3b0b9c9eae0a9800dfbbd20f6717709e72bfed [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
Justin P. Mattock79add622011-04-04 14:15:29 -07006 * Copyright (C) 1996 David S. Miller (davem@davemloft.net)
Linus Torvalds1da177e2005-04-16 15:20:36 -07007 * Copyright (C) 1997, 1998, 1999, 2000 Ralf Baechle ralf@gnu.org
8 * Carsten Langgaard, carstenl@mips.com
9 * Copyright (C) 2002 MIPS Technologies, Inc. All rights reserved.
10 */
Linus Torvalds1da177e2005-04-16 15:20:36 -070011#include <linux/init.h>
12#include <linux/sched.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010013#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070014#include <linux/mm.h>
David Daneyfd062c82009-05-27 17:47:44 -070015#include <linux/hugetlb.h>
Sanjay Lalf2e36562012-11-21 18:34:10 -080016#include <linux/module.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070017
18#include <asm/cpu.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020019#include <asm/cpu-type.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070020#include <asm/bootinfo.h>
21#include <asm/mmu_context.h>
22#include <asm/pgtable.h>
Ralf Baechle3d18c982011-11-28 16:11:28 +000023#include <asm/tlbmisc.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024
25extern void build_tlb_refill_handler(void);
26
Thiemo Seufer172546b2005-04-02 10:21:56 +000027/*
28 * Make sure all entries differ. If they're not different
29 * MIPS32 will take revenge ...
30 */
31#define UNIQUE_ENTRYHI(idx) (CKSEG0 + ((idx) << (PAGE_SHIFT + 1)))
32
Ralf Baechle41c594a2006-04-05 09:45:45 +010033/* Atomicity and interruptability */
34#ifdef CONFIG_MIPS_MT_SMTC
35
36#include <asm/smtc.h>
37#include <asm/mipsmtregs.h>
38
39#define ENTER_CRITICAL(flags) \
40 { \
41 unsigned int mvpflags; \
42 local_irq_save(flags);\
43 mvpflags = dvpe()
44#define EXIT_CRITICAL(flags) \
45 evpe(mvpflags); \
46 local_irq_restore(flags); \
47 }
48#else
49
50#define ENTER_CRITICAL(flags) local_irq_save(flags)
51#define EXIT_CRITICAL(flags) local_irq_restore(flags)
52
53#endif /* CONFIG_MIPS_MT_SMTC */
54
Fuxin Zhang2a21c732007-06-06 14:52:43 +080055/*
56 * LOONGSON2 has a 4 entry itlb which is a subset of dtlb,
57 * unfortrunately, itlb is not totally transparent to software.
58 */
Ralf Baechle14bd8c02013-09-25 18:21:26 +020059static inline void flush_itlb(void)
60{
61 switch (current_cpu_type()) {
62 case CPU_LOONGSON2:
63 write_c0_diag(4);
64 break;
65 default:
66 break;
67 }
68}
Fuxin Zhang2a21c732007-06-06 14:52:43 +080069
Ralf Baechle14bd8c02013-09-25 18:21:26 +020070static inline void flush_itlb_vm(struct vm_area_struct *vma)
71{
72 if (vma->vm_flags & VM_EXEC)
73 flush_itlb();
74}
Fuxin Zhang2a21c732007-06-06 14:52:43 +080075
Linus Torvalds1da177e2005-04-16 15:20:36 -070076void local_flush_tlb_all(void)
77{
78 unsigned long flags;
79 unsigned long old_ctx;
80 int entry;
81
Ralf Baechle41c594a2006-04-05 09:45:45 +010082 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -070083 /* Save old context and create impossible VPN2 value */
84 old_ctx = read_c0_entryhi();
85 write_c0_entrylo0(0);
86 write_c0_entrylo1(0);
87
88 entry = read_c0_wired();
89
90 /* Blast 'em all away. */
91 while (entry < current_cpu_data.tlbsize) {
Thiemo Seufer172546b2005-04-02 10:21:56 +000092 /* Make sure all entries differ. */
93 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
Linus Torvalds1da177e2005-04-16 15:20:36 -070094 write_c0_index(entry);
95 mtc0_tlbw_hazard();
96 tlb_write_indexed();
97 entry++;
98 }
99 tlbw_use_hazard();
100 write_c0_entryhi(old_ctx);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200101 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100102 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700103}
Sanjay Lalf2e36562012-11-21 18:34:10 -0800104EXPORT_SYMBOL(local_flush_tlb_all);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700105
Thiemo Seufer172546b2005-04-02 10:21:56 +0000106/* All entries common to a mm share an asid. To effectively flush
107 these entries, we just bump the asid. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700108void local_flush_tlb_mm(struct mm_struct *mm)
109{
Thiemo Seufer172546b2005-04-02 10:21:56 +0000110 int cpu;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700111
Thiemo Seufer172546b2005-04-02 10:21:56 +0000112 preempt_disable();
113
114 cpu = smp_processor_id();
115
116 if (cpu_context(cpu, mm) != 0) {
117 drop_mmu_context(mm, cpu);
118 }
119
120 preempt_enable();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700121}
122
123void local_flush_tlb_range(struct vm_area_struct *vma, unsigned long start,
124 unsigned long end)
125{
126 struct mm_struct *mm = vma->vm_mm;
127 int cpu = smp_processor_id();
128
129 if (cpu_context(cpu, mm) != 0) {
Greg Ungerera5e696e2009-05-20 16:12:32 +1000130 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700131
Ralf Baechle41c594a2006-04-05 09:45:45 +0100132 ENTER_CRITICAL(flags);
David Daneyac53c4f2012-12-03 12:44:26 -0800133 start = round_down(start, PAGE_SIZE << 1);
134 end = round_up(end, PAGE_SIZE << 1);
135 size = (end - start) >> (PAGE_SHIFT + 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136 if (size <= current_cpu_data.tlbsize/2) {
137 int oldpid = read_c0_entryhi();
138 int newpid = cpu_asid(cpu, mm);
139
Linus Torvalds1da177e2005-04-16 15:20:36 -0700140 while (start < end) {
141 int idx;
142
143 write_c0_entryhi(start | newpid);
David Daneyac53c4f2012-12-03 12:44:26 -0800144 start += (PAGE_SIZE << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700145 mtc0_tlbw_hazard();
146 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200147 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 idx = read_c0_index();
149 write_c0_entrylo0(0);
150 write_c0_entrylo1(0);
151 if (idx < 0)
152 continue;
153 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000154 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 mtc0_tlbw_hazard();
156 tlb_write_indexed();
157 }
158 tlbw_use_hazard();
159 write_c0_entryhi(oldpid);
160 } else {
161 drop_mmu_context(mm, cpu);
162 }
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200163 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100164 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700165 }
166}
167
168void local_flush_tlb_kernel_range(unsigned long start, unsigned long end)
169{
Greg Ungerera5e696e2009-05-20 16:12:32 +1000170 unsigned long size, flags;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
Ralf Baechle41c594a2006-04-05 09:45:45 +0100172 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700173 size = (end - start + (PAGE_SIZE - 1)) >> PAGE_SHIFT;
174 size = (size + 1) >> 1;
175 if (size <= current_cpu_data.tlbsize / 2) {
176 int pid = read_c0_entryhi();
177
178 start &= (PAGE_MASK << 1);
179 end += ((PAGE_SIZE << 1) - 1);
180 end &= (PAGE_MASK << 1);
181
182 while (start < end) {
183 int idx;
184
185 write_c0_entryhi(start);
186 start += (PAGE_SIZE << 1);
187 mtc0_tlbw_hazard();
188 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200189 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700190 idx = read_c0_index();
191 write_c0_entrylo0(0);
192 write_c0_entrylo1(0);
193 if (idx < 0)
194 continue;
195 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000196 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700197 mtc0_tlbw_hazard();
198 tlb_write_indexed();
199 }
200 tlbw_use_hazard();
201 write_c0_entryhi(pid);
202 } else {
203 local_flush_tlb_all();
204 }
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200205 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100206 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700207}
208
209void local_flush_tlb_page(struct vm_area_struct *vma, unsigned long page)
210{
211 int cpu = smp_processor_id();
212
213 if (cpu_context(cpu, vma->vm_mm) != 0) {
214 unsigned long flags;
215 int oldpid, newpid, idx;
216
217 newpid = cpu_asid(cpu, vma->vm_mm);
218 page &= (PAGE_MASK << 1);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100219 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220 oldpid = read_c0_entryhi();
221 write_c0_entryhi(page | newpid);
222 mtc0_tlbw_hazard();
223 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200224 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700225 idx = read_c0_index();
226 write_c0_entrylo0(0);
227 write_c0_entrylo1(0);
228 if (idx < 0)
229 goto finish;
230 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000231 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700232 mtc0_tlbw_hazard();
233 tlb_write_indexed();
234 tlbw_use_hazard();
235
236 finish:
237 write_c0_entryhi(oldpid);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200238 flush_itlb_vm(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100239 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700240 }
241}
242
243/*
244 * This one is only used for pages with the global bit set so we don't care
245 * much about the ASID.
246 */
247void local_flush_tlb_one(unsigned long page)
248{
249 unsigned long flags;
250 int oldpid, idx;
251
Ralf Baechle41c594a2006-04-05 09:45:45 +0100252 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700253 oldpid = read_c0_entryhi();
Thiemo Seufer172546b2005-04-02 10:21:56 +0000254 page &= (PAGE_MASK << 1);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255 write_c0_entryhi(page);
256 mtc0_tlbw_hazard();
257 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200258 tlb_probe_hazard();
Linus Torvalds1da177e2005-04-16 15:20:36 -0700259 idx = read_c0_index();
260 write_c0_entrylo0(0);
261 write_c0_entrylo1(0);
262 if (idx >= 0) {
263 /* Make sure all entries differ. */
Thiemo Seufer172546b2005-04-02 10:21:56 +0000264 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 mtc0_tlbw_hazard();
266 tlb_write_indexed();
267 tlbw_use_hazard();
268 }
269 write_c0_entryhi(oldpid);
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200270 flush_itlb();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100271 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272}
273
274/*
275 * We will need multiple versions of update_mmu_cache(), one that just
276 * updates the TLB with the new pte(s), and another which also checks
277 * for the R4k "end of page" hardware bug and does the needy.
278 */
279void __update_tlb(struct vm_area_struct * vma, unsigned long address, pte_t pte)
280{
281 unsigned long flags;
282 pgd_t *pgdp;
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000283 pud_t *pudp;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700284 pmd_t *pmdp;
285 pte_t *ptep;
286 int idx, pid;
287
288 /*
289 * Handle debugger faulting in for debugee.
290 */
291 if (current->active_mm != vma->vm_mm)
292 return;
293
Ralf Baechle41c594a2006-04-05 09:45:45 +0100294 ENTER_CRITICAL(flags);
Thiemo Seufer172546b2005-04-02 10:21:56 +0000295
David Daney48c4ac92013-05-13 13:56:44 -0700296 pid = read_c0_entryhi() & ASID_MASK;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297 address &= (PAGE_MASK << 1);
298 write_c0_entryhi(address | pid);
299 pgdp = pgd_offset(vma->vm_mm, address);
300 mtc0_tlbw_hazard();
301 tlb_probe();
Ralf Baechle432bef22006-09-08 04:16:21 +0200302 tlb_probe_hazard();
Ralf Baechlec6e8b582005-02-10 12:19:59 +0000303 pudp = pud_offset(pgdp, address);
304 pmdp = pmd_offset(pudp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700305 idx = read_c0_index();
David Daneyaa1762f2012-10-17 00:48:10 +0200306#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700307 /* this could be a huge page */
308 if (pmd_huge(*pmdp)) {
309 unsigned long lo;
310 write_c0_pagemask(PM_HUGE_MASK);
311 ptep = (pte_t *)pmdp;
David Daney6dd93442010-02-10 15:12:47 -0800312 lo = pte_to_entrylo(pte_val(*ptep));
David Daneyfd062c82009-05-27 17:47:44 -0700313 write_c0_entrylo0(lo);
314 write_c0_entrylo1(lo + (HPAGE_SIZE >> 7));
315
316 mtc0_tlbw_hazard();
317 if (idx < 0)
318 tlb_write_random();
319 else
320 tlb_write_indexed();
Ralf Baechlefb944c92012-10-17 01:01:21 +0200321 tlbw_use_hazard();
David Daneyfd062c82009-05-27 17:47:44 -0700322 write_c0_pagemask(PM_DEFAULT_MASK);
323 } else
324#endif
325 {
326 ptep = pte_offset_map(pmdp, address);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700327
Chris Dearman962f4802007-09-19 00:46:32 +0100328#if defined(CONFIG_64BIT_PHYS_ADDR) && defined(CONFIG_CPU_MIPS32)
David Daneyfd062c82009-05-27 17:47:44 -0700329 write_c0_entrylo0(ptep->pte_high);
330 ptep++;
331 write_c0_entrylo1(ptep->pte_high);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700332#else
David Daney6dd93442010-02-10 15:12:47 -0800333 write_c0_entrylo0(pte_to_entrylo(pte_val(*ptep++)));
334 write_c0_entrylo1(pte_to_entrylo(pte_val(*ptep)));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700335#endif
David Daneyfd062c82009-05-27 17:47:44 -0700336 mtc0_tlbw_hazard();
337 if (idx < 0)
338 tlb_write_random();
339 else
340 tlb_write_indexed();
341 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700342 tlbw_use_hazard();
Ralf Baechle14bd8c02013-09-25 18:21:26 +0200343 flush_itlb_vm(vma);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100344 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700345}
346
Manuel Lauss694b8c32011-08-02 19:51:08 +0200347void add_wired_entry(unsigned long entrylo0, unsigned long entrylo1,
348 unsigned long entryhi, unsigned long pagemask)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700349{
350 unsigned long flags;
351 unsigned long wired;
352 unsigned long old_pagemask;
353 unsigned long old_ctx;
354
Ralf Baechle41c594a2006-04-05 09:45:45 +0100355 ENTER_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /* Save old context and create impossible VPN2 value */
357 old_ctx = read_c0_entryhi();
358 old_pagemask = read_c0_pagemask();
359 wired = read_c0_wired();
360 write_c0_wired(wired + 1);
361 write_c0_index(wired);
Ralf Baechle432bef22006-09-08 04:16:21 +0200362 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 write_c0_pagemask(pagemask);
364 write_c0_entryhi(entryhi);
365 write_c0_entrylo0(entrylo0);
366 write_c0_entrylo1(entrylo1);
367 mtc0_tlbw_hazard();
368 tlb_write_indexed();
369 tlbw_use_hazard();
370
371 write_c0_entryhi(old_ctx);
Ralf Baechle432bef22006-09-08 04:16:21 +0200372 tlbw_use_hazard(); /* What is the hazard here? */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 write_c0_pagemask(old_pagemask);
374 local_flush_tlb_all();
Ralf Baechle41c594a2006-04-05 09:45:45 +0100375 EXIT_CRITICAL(flags);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700376}
377
Ralf Baechle970d0322012-10-18 13:54:15 +0200378#ifdef CONFIG_TRANSPARENT_HUGEPAGE
379
380int __init has_transparent_hugepage(void)
381{
382 unsigned int mask;
383 unsigned long flags;
384
385 ENTER_CRITICAL(flags);
386 write_c0_pagemask(PM_HUGE_MASK);
387 back_to_back_c0_hazard();
388 mask = read_c0_pagemask();
389 write_c0_pagemask(PM_DEFAULT_MASK);
390
391 EXIT_CRITICAL(flags);
392
393 return mask == PM_HUGE_MASK;
394}
395
396#endif /* CONFIG_TRANSPARENT_HUGEPAGE */
397
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000398static int ntlb;
Ralf Baechle41c594a2006-04-05 09:45:45 +0100399static int __init set_ntlb(char *str)
400{
401 get_option(&str, &ntlb);
402 return 1;
403}
404
405__setup("ntlb=", set_ntlb);
406
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000407void tlb_init(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408{
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 /*
410 * You should never change this register:
411 * - On R4600 1.7 the tlbp never hits for pages smaller than
412 * the value in the c0_pagemask register.
413 * - The entire mm handling assumes the c0_pagemask register to
Thiemo Seufera7c29962008-02-29 00:43:47 +0000414 * be set to fixed-size pages.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700415 */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 write_c0_pagemask(PM_DEFAULT_MASK);
417 write_c0_wired(0);
Ralf Baechlecde15b52009-01-06 23:07:20 +0000418 if (current_cpu_type() == CPU_R10000 ||
419 current_cpu_type() == CPU_R12000 ||
420 current_cpu_type() == CPU_R14000)
421 write_c0_framemask(0);
David Daney6dd93442010-02-10 15:12:47 -0800422
Steven J. Hill05857c62012-09-13 16:51:46 -0500423 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -0800424 /*
425 * Enable the no read, no exec bits, and enable large virtual
426 * address.
427 */
428 u32 pg = PG_RIE | PG_XIE;
429#ifdef CONFIG_64BIT
430 pg |= PG_ELPA;
431#endif
432 write_c0_pagegrain(pg);
433 }
434
Ralf Baechle70342282013-01-22 12:59:30 +0100435 /* From this point on the ARC firmware is dead. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700436 local_flush_tlb_all();
437
Thiemo Seuferc6281ed2006-03-14 14:35:27 +0000438 /* Did I tell you that ARC SUCKS? */
439
Ralf Baechle41c594a2006-04-05 09:45:45 +0100440 if (ntlb) {
441 if (ntlb > 1 && ntlb <= current_cpu_data.tlbsize) {
442 int wired = current_cpu_data.tlbsize - ntlb;
443 write_c0_wired(wired);
444 write_c0_index(wired-1);
Ralf Baechle49a89ef2007-10-11 23:46:15 +0100445 printk("Restricting TLB to %d entries\n", ntlb);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100446 } else
447 printk("Ignoring invalid argument ntlb=%d\n", ntlb);
448 }
449
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450 build_tlb_refill_handler();
451}