blob: 7f6cd461dac594008cbf2d967f23592a6d40cfff [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
Ralf Baechle70342282013-01-22 12:59:30 +01008 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
Ralf Baechle41c594a2006-04-05 09:45:45 +010010 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
David Daneyfd062c82009-05-27 17:47:44 -070011 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
Steven J. Hill113c62d2012-07-06 23:56:00 +020012 * Copyright (C) 2011 MIPS Technologies, Inc.
Ralf Baechle41c594a2006-04-05 09:45:45 +010013 *
14 * ... and the days got worse and worse and now you see
15 * I've gone completly out of my mind.
16 *
17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha
19 * to the funny farm where code is beautiful all the time ...
20 *
21 * (Condolences to Napoleon XIV)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
23
David Daney95affdd2009-05-20 11:40:59 -070024#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/kernel.h>
26#include <linux/types.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010027#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
29#include <linux/init.h>
David Daney3d8bfdd2010-12-21 14:19:11 -080030#include <linux/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031
David Daney3d8bfdd2010-12-21 14:19:11 -080032#include <asm/cacheflush.h>
33#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/war.h>
Florian Fainelli3482d712010-01-28 15:21:24 +010035#include <asm/uasm.h>
David Howellsb81947c2012-03-28 18:30:02 +010036#include <asm/setup.h>
Thiemo Seufere30ec452008-01-28 20:05:38 +000037
David Daney1ec56322010-04-28 12:16:18 -070038/*
39 * TLB load/store/modify handlers.
40 *
41 * Only the fastpath gets synthesized at runtime, the slowpath for
42 * do_page_fault remains normal asm.
43 */
44extern void tlb_do_page_fault_0(void);
45extern void tlb_do_page_fault_1(void);
46
David Daneybf286072011-07-05 16:34:46 -070047struct work_registers {
48 int r1;
49 int r2;
50 int r3;
51};
52
53struct tlb_reg_save {
54 unsigned long a;
55 unsigned long b;
56} ____cacheline_aligned_in_smp;
57
58static struct tlb_reg_save handler_reg_save[NR_CPUS];
David Daney1ec56322010-04-28 12:16:18 -070059
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010060static inline int r45k_bvahwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070061{
62 /* XXX: We should probe for the presence of this bug, but we don't. */
63 return 0;
64}
65
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010066static inline int r4k_250MHZhwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070067{
68 /* XXX: We should probe for the presence of this bug, but we don't. */
69 return 0;
70}
71
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010072static inline int __maybe_unused bcm1250_m3_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070073{
74 return BCM1250_M3_WAR;
75}
76
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010077static inline int __maybe_unused r10000_llsc_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
79 return R10000_LLSC_WAR;
80}
81
David Daneycc33ae42010-12-20 15:54:50 -080082static int use_bbit_insns(void)
83{
84 switch (current_cpu_type()) {
85 case CPU_CAVIUM_OCTEON:
86 case CPU_CAVIUM_OCTEON_PLUS:
87 case CPU_CAVIUM_OCTEON2:
88 return 1;
89 default:
90 return 0;
91 }
92}
93
David Daney2c8c53e2010-12-27 18:07:57 -080094static int use_lwx_insns(void)
95{
96 switch (current_cpu_type()) {
97 case CPU_CAVIUM_OCTEON2:
98 return 1;
99 default:
100 return 0;
101 }
102}
103#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
104 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
105static bool scratchpad_available(void)
106{
107 return true;
108}
109static int scratchpad_offset(int i)
110{
111 /*
112 * CVMSEG starts at address -32768 and extends for
113 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
114 */
115 i += 1; /* Kernel use starts at the top and works down. */
116 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
117}
118#else
119static bool scratchpad_available(void)
120{
121 return false;
122}
123static int scratchpad_offset(int i)
124{
125 BUG();
David Daneye1c87d22011-01-19 15:24:42 -0800126 /* Really unreachable, but evidently some GCC want this. */
127 return 0;
David Daney2c8c53e2010-12-27 18:07:57 -0800128}
129#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700130/*
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100131 * Found by experiment: At least some revisions of the 4kc throw under
132 * some circumstances a machine check exception, triggered by invalid
133 * values in the index register. Delaying the tlbp instruction until
134 * after the next branch, plus adding an additional nop in front of
135 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
136 * why; it's not an issue caused by the core RTL.
137 *
138 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000139static int __cpuinit m4kc_tlbp_war(void)
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100140{
141 return (current_cpu_data.processor_id & 0xffff00) ==
142 (PRID_COMP_MIPS | PRID_IMP_4KC);
143}
144
Thiemo Seufere30ec452008-01-28 20:05:38 +0000145/* Handle labels (which must be positive integers). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700146enum label_id {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000147 label_second_part = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700148 label_leave,
149 label_vmalloc,
150 label_vmalloc_done,
Ralf Baechle02a54172012-10-13 22:46:26 +0200151 label_tlbw_hazard_0,
152 label_split = label_tlbw_hazard_0 + 8,
David Daney6dd93442010-02-10 15:12:47 -0800153 label_tlbl_goaround1,
154 label_tlbl_goaround2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700155 label_nopage_tlbl,
156 label_nopage_tlbs,
157 label_nopage_tlbm,
158 label_smp_pgtable_change,
159 label_r3000_write_probe_fail,
David Daney1ec56322010-04-28 12:16:18 -0700160 label_large_segbits_fault,
David Daneyaa1762f2012-10-17 00:48:10 +0200161#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700162 label_tlb_huge_update,
163#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700164};
165
Thiemo Seufere30ec452008-01-28 20:05:38 +0000166UASM_L_LA(_second_part)
167UASM_L_LA(_leave)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000168UASM_L_LA(_vmalloc)
169UASM_L_LA(_vmalloc_done)
Ralf Baechle02a54172012-10-13 22:46:26 +0200170/* _tlbw_hazard_x is handled differently. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000171UASM_L_LA(_split)
David Daney6dd93442010-02-10 15:12:47 -0800172UASM_L_LA(_tlbl_goaround1)
173UASM_L_LA(_tlbl_goaround2)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000174UASM_L_LA(_nopage_tlbl)
175UASM_L_LA(_nopage_tlbs)
176UASM_L_LA(_nopage_tlbm)
177UASM_L_LA(_smp_pgtable_change)
178UASM_L_LA(_r3000_write_probe_fail)
David Daney1ec56322010-04-28 12:16:18 -0700179UASM_L_LA(_large_segbits_fault)
David Daneyaa1762f2012-10-17 00:48:10 +0200180#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700181UASM_L_LA(_tlb_huge_update)
182#endif
Atsushi Nemoto656be922006-10-26 00:08:31 +0900183
Ralf Baechle02a54172012-10-13 22:46:26 +0200184static int __cpuinitdata hazard_instance;
185
Kevin Cernekeef151f3b2012-11-07 18:39:48 +0000186static void __cpuinit uasm_bgezl_hazard(u32 **p,
187 struct uasm_reloc **r,
188 int instance)
Ralf Baechle02a54172012-10-13 22:46:26 +0200189{
190 switch (instance) {
191 case 0 ... 7:
192 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
193 return;
194 default:
195 BUG();
196 }
197}
198
Kevin Cernekeef151f3b2012-11-07 18:39:48 +0000199static void __cpuinit uasm_bgezl_label(struct uasm_label **l,
200 u32 **p,
201 int instance)
Ralf Baechle02a54172012-10-13 22:46:26 +0200202{
203 switch (instance) {
204 case 0 ... 7:
205 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
206 break;
207 default:
208 BUG();
209 }
210}
211
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200212/*
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200213 * pgtable bits are assigned dynamically depending on processor feature
214 * and statically based on kernel configuration. This spits out the actual
Ralf Baechle70342282013-01-22 12:59:30 +0100215 * values the kernel is using. Required to make sense from disassembled
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200216 * TLB exception handlers.
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200217 */
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200218static void output_pgtable_bits_defines(void)
219{
220#define pr_define(fmt, ...) \
221 pr_debug("#define " fmt, ##__VA_ARGS__)
222
223 pr_debug("#include <asm/asm.h>\n");
224 pr_debug("#include <asm/regdef.h>\n");
225 pr_debug("\n");
226
227 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
228 pr_define("_PAGE_READ_SHIFT %d\n", _PAGE_READ_SHIFT);
229 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
230 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
231 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
Ralf Baechle970d0322012-10-18 13:54:15 +0200232#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200233 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
Ralf Baechle970d0322012-10-18 13:54:15 +0200234 pr_define("_PAGE_SPLITTING_SHIFT %d\n", _PAGE_SPLITTING_SHIFT);
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200235#endif
236 if (cpu_has_rixi) {
237#ifdef _PAGE_NO_EXEC_SHIFT
238 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
239#endif
240#ifdef _PAGE_NO_READ_SHIFT
241 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
242#endif
243 }
244 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
245 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
246 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
247 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
248 pr_debug("\n");
249}
250
251static inline void dump_handler(const char *symbol, const u32 *handler, int count)
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200252{
253 int i;
254
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200255 pr_debug("LEAF(%s)\n", symbol);
256
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200257 pr_debug("\t.set push\n");
258 pr_debug("\t.set noreorder\n");
259
260 for (i = 0; i < count; i++)
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200261 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200262
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200263 pr_debug("\t.set\tpop\n");
264
265 pr_debug("\tEND(%s)\n", symbol);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200266}
267
Linus Torvalds1da177e2005-04-16 15:20:36 -0700268/* The only general purpose registers allowed in TLB handlers. */
269#define K0 26
270#define K1 27
271
272/* Some CP0 registers */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100273#define C0_INDEX 0, 0
274#define C0_ENTRYLO0 2, 0
275#define C0_TCBIND 2, 2
276#define C0_ENTRYLO1 3, 0
277#define C0_CONTEXT 4, 0
David Daneyfd062c82009-05-27 17:47:44 -0700278#define C0_PAGEMASK 5, 0
Ralf Baechle41c594a2006-04-05 09:45:45 +0100279#define C0_BADVADDR 8, 0
280#define C0_ENTRYHI 10, 0
281#define C0_EPC 14, 0
282#define C0_XCONTEXT 20, 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700283
Ralf Baechle875d43e2005-09-03 15:56:16 -0700284#ifdef CONFIG_64BIT
Thiemo Seufere30ec452008-01-28 20:05:38 +0000285# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700286#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000287# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288#endif
289
290/* The worst case length of the handler is around 18 instructions for
291 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
292 * Maximum space available is 32 instructions for R3000 and 64
293 * instructions for R4000.
294 *
295 * We deliberately chose a buffer size of 128, so we won't scribble
296 * over anything important on overflow before we panic.
297 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000298static u32 tlb_handler[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700299
300/* simply assume worst case size for labels and relocs */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000301static struct uasm_label labels[128] __cpuinitdata;
302static struct uasm_reloc relocs[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700303
David Daney2c8c53e2010-12-27 18:07:57 -0800304static int check_for_high_segbits __cpuinitdata;
David Daney3d8bfdd2010-12-21 14:19:11 -0800305
306static unsigned int kscratch_used_mask __cpuinitdata;
307
308static int __cpuinit allocate_kscratch(void)
309{
310 int r;
311 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
312
313 r = ffs(a);
314
315 if (r == 0)
316 return -1;
317
318 r--; /* make it zero based */
319
320 kscratch_used_mask |= (1 << r);
321
322 return r;
323}
324
David Daney2c8c53e2010-12-27 18:07:57 -0800325static int scratch_reg __cpuinitdata;
David Daney3d8bfdd2010-12-21 14:19:11 -0800326static int pgd_reg __cpuinitdata;
David Daney2c8c53e2010-12-27 18:07:57 -0800327enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
David Daney3d8bfdd2010-12-21 14:19:11 -0800328
David Daneybf286072011-07-05 16:34:46 -0700329static struct work_registers __cpuinit build_get_work_registers(u32 **p)
330{
331 struct work_registers r;
332
333 int smp_processor_id_reg;
334 int smp_processor_id_sel;
335 int smp_processor_id_shift;
336
337 if (scratch_reg > 0) {
338 /* Save in CPU local C0_KScratch? */
339 UASM_i_MTC0(p, 1, 31, scratch_reg);
340 r.r1 = K0;
341 r.r2 = K1;
342 r.r3 = 1;
343 return r;
344 }
345
346 if (num_possible_cpus() > 1) {
347#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
348 smp_processor_id_shift = 51;
349 smp_processor_id_reg = 20; /* XContext */
350 smp_processor_id_sel = 0;
351#else
352# ifdef CONFIG_32BIT
353 smp_processor_id_shift = 25;
354 smp_processor_id_reg = 4; /* Context */
355 smp_processor_id_sel = 0;
356# endif
357# ifdef CONFIG_64BIT
358 smp_processor_id_shift = 26;
359 smp_processor_id_reg = 4; /* Context */
360 smp_processor_id_sel = 0;
361# endif
362#endif
363 /* Get smp_processor_id */
364 UASM_i_MFC0(p, K0, smp_processor_id_reg, smp_processor_id_sel);
365 UASM_i_SRL_SAFE(p, K0, K0, smp_processor_id_shift);
366
367 /* handler_reg_save index in K0 */
368 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
369
370 UASM_i_LA(p, K1, (long)&handler_reg_save);
371 UASM_i_ADDU(p, K0, K0, K1);
372 } else {
373 UASM_i_LA(p, K0, (long)&handler_reg_save);
374 }
375 /* K0 now points to save area, save $1 and $2 */
376 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
377 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
378
379 r.r1 = K1;
380 r.r2 = 1;
381 r.r3 = 2;
382 return r;
383}
384
385static void __cpuinit build_restore_work_registers(u32 **p)
386{
387 if (scratch_reg > 0) {
388 UASM_i_MFC0(p, 1, 31, scratch_reg);
389 return;
390 }
391 /* K0 already points to save area, restore $1 and $2 */
392 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
393 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
394}
395
David Daney2c8c53e2010-12-27 18:07:57 -0800396#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
397
David Daney826222842009-10-14 12:16:56 -0700398/*
399 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
400 * we cannot do r3000 under these circumstances.
David Daney3d8bfdd2010-12-21 14:19:11 -0800401 *
402 * Declare pgd_current here instead of including mmu_context.h to avoid type
403 * conflicts for tlbmiss_handler_setup_pgd
David Daney826222842009-10-14 12:16:56 -0700404 */
David Daney3d8bfdd2010-12-21 14:19:11 -0800405extern unsigned long pgd_current[];
David Daney826222842009-10-14 12:16:56 -0700406
Linus Torvalds1da177e2005-04-16 15:20:36 -0700407/*
408 * The R3000 TLB handler is simple.
409 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000410static void __cpuinit build_r3000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700411{
412 long pgdc = (long)pgd_current;
413 u32 *p;
414
415 memset(tlb_handler, 0, sizeof(tlb_handler));
416 p = tlb_handler;
417
Thiemo Seufere30ec452008-01-28 20:05:38 +0000418 uasm_i_mfc0(&p, K0, C0_BADVADDR);
419 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
420 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
421 uasm_i_srl(&p, K0, K0, 22); /* load delay */
422 uasm_i_sll(&p, K0, K0, 2);
423 uasm_i_addu(&p, K1, K1, K0);
424 uasm_i_mfc0(&p, K0, C0_CONTEXT);
425 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
426 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
427 uasm_i_addu(&p, K1, K1, K0);
428 uasm_i_lw(&p, K0, 0, K1);
429 uasm_i_nop(&p); /* load delay */
430 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
431 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
432 uasm_i_tlbwr(&p); /* cp0 delay */
433 uasm_i_jr(&p, K1);
434 uasm_i_rfe(&p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700435
436 if (p > tlb_handler + 32)
437 panic("TLB refill handler space exceeded");
438
Thiemo Seufere30ec452008-01-28 20:05:38 +0000439 pr_debug("Wrote TLB refill handler (%u instructions).\n",
440 (unsigned int)(p - tlb_handler));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700441
Ralf Baechle91b05e62006-03-29 18:53:00 +0100442 memcpy((void *)ebase, tlb_handler, 0x80);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200443
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200444 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700445}
David Daney826222842009-10-14 12:16:56 -0700446#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700447
448/*
449 * The R4000 TLB handler is much more complicated. We have two
450 * consecutive handler areas with 32 instructions space each.
451 * Since they aren't used at the same time, we can overflow in the
452 * other one.To keep things simple, we first assume linear space,
453 * then we relocate it to the final handler layout as needed.
454 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000455static u32 final_handler[64] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700456
457/*
458 * Hazards
459 *
460 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
461 * 2. A timing hazard exists for the TLBP instruction.
462 *
Ralf Baechle70342282013-01-22 12:59:30 +0100463 * stalling_instruction
464 * TLBP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700465 *
466 * The JTLB is being read for the TLBP throughout the stall generated by the
467 * previous instruction. This is not really correct as the stalling instruction
468 * can modify the address used to access the JTLB. The failure symptom is that
469 * the TLBP instruction will use an address created for the stalling instruction
470 * and not the address held in C0_ENHI and thus report the wrong results.
471 *
472 * The software work-around is to not allow the instruction preceding the TLBP
473 * to stall - make it an NOP or some other instruction guaranteed not to stall.
474 *
Ralf Baechle70342282013-01-22 12:59:30 +0100475 * Errata 2 will not be fixed. This errata is also on the R5000.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700476 *
477 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
478 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000479static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700480{
Ralf Baechle10cc3522007-10-11 23:46:15 +0100481 switch (current_cpu_type()) {
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200482 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
Thiemo Seuferf5b4d952005-09-09 17:11:50 +0000483 case CPU_R4600:
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200484 case CPU_R4700:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700485 case CPU_R5000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700486 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000487 uasm_i_nop(p);
488 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 break;
490
491 default:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000492 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700493 break;
494 }
495}
496
497/*
498 * Write random or indexed TLB entry, and care about the hazards from
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300499 * the preceding mtc0 and for the following eret.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700500 */
501enum tlb_write_entry { tlb_random, tlb_indexed };
502
Ralf Baechle234fcd12008-03-08 09:56:28 +0000503static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
Thiemo Seufere30ec452008-01-28 20:05:38 +0000504 struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700505 enum tlb_write_entry wmode)
506{
507 void(*tlbw)(u32 **) = NULL;
508
509 switch (wmode) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000510 case tlb_random: tlbw = uasm_i_tlbwr; break;
511 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700512 }
513
Ralf Baechle161548b2008-01-29 10:14:54 +0000514 if (cpu_has_mips_r2) {
Steven J. Hill625c0a22012-08-28 23:20:08 -0500515 /*
516 * The architecture spec says an ehb is required here,
517 * but a number of cores do not have the hazard and
518 * using an ehb causes an expensive pipeline stall.
519 */
520 switch (current_cpu_type()) {
521 case CPU_M14KC:
522 case CPU_74K:
523 break;
524
525 default:
David Daney41f0e4d2009-05-12 12:41:53 -0700526 uasm_i_ehb(p);
Steven J. Hill625c0a22012-08-28 23:20:08 -0500527 break;
528 }
Ralf Baechle161548b2008-01-29 10:14:54 +0000529 tlbw(p);
530 return;
531 }
532
Ralf Baechle10cc3522007-10-11 23:46:15 +0100533 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700534 case CPU_R4000PC:
535 case CPU_R4000SC:
536 case CPU_R4000MC:
537 case CPU_R4400PC:
538 case CPU_R4400SC:
539 case CPU_R4400MC:
540 /*
541 * This branch uses up a mtc0 hazard nop slot and saves
542 * two nops after the tlbw instruction.
543 */
Ralf Baechle02a54172012-10-13 22:46:26 +0200544 uasm_bgezl_hazard(p, r, hazard_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 tlbw(p);
Ralf Baechle02a54172012-10-13 22:46:26 +0200546 uasm_bgezl_label(l, p, hazard_instance);
547 hazard_instance++;
Thiemo Seufere30ec452008-01-28 20:05:38 +0000548 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 break;
550
551 case CPU_R4600:
552 case CPU_R4700:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000553 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000554 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000555 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000556 break;
557
Ralf Baechle359187d2012-10-16 22:13:06 +0200558 case CPU_R5000:
Ralf Baechle359187d2012-10-16 22:13:06 +0200559 case CPU_NEVADA:
560 uasm_i_nop(p); /* QED specifies 2 nops hazard */
561 uasm_i_nop(p); /* QED specifies 2 nops hazard */
562 tlbw(p);
563 break;
564
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000565 case CPU_R4300:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700566 case CPU_5KC:
567 case CPU_TX49XX:
Pete Popovbdf21b12005-07-14 17:47:57 +0000568 case CPU_PR4450:
Jayachandran Cefa0f812011-05-07 01:36:21 +0530569 case CPU_XLR:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000570 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700571 tlbw(p);
572 break;
573
574 case CPU_R10000:
575 case CPU_R12000:
Kumba44d921b2006-05-16 22:23:59 -0400576 case CPU_R14000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700577 case CPU_4KC:
Thomas Bogendoerferb1ec4c82008-03-26 16:42:54 +0100578 case CPU_4KEC:
Steven J. Hill113c62d2012-07-06 23:56:00 +0200579 case CPU_M14KC:
Steven J. Hillf8fa4812012-12-07 03:51:35 +0000580 case CPU_M14KEC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700581 case CPU_SB1:
Andrew Isaacson93ce2f522005-10-19 23:56:20 -0700582 case CPU_SB1A:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700583 case CPU_4KSC:
584 case CPU_20KC:
585 case CPU_25KF:
Kevin Cernekee602977b2010-10-16 14:22:30 -0700586 case CPU_BMIPS32:
587 case CPU_BMIPS3300:
588 case CPU_BMIPS4350:
589 case CPU_BMIPS4380:
590 case CPU_BMIPS5000:
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800591 case CPU_LOONGSON2:
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900592 case CPU_R5500:
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100593 if (m4kc_tlbp_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +0000594 uasm_i_nop(p);
Manuel Lauss2f794d02009-03-25 17:49:30 +0100595 case CPU_ALCHEMY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700596 tlbw(p);
597 break;
598
Linus Torvalds1da177e2005-04-16 15:20:36 -0700599 case CPU_RM7000:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000600 uasm_i_nop(p);
601 uasm_i_nop(p);
602 uasm_i_nop(p);
603 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700604 tlbw(p);
605 break;
606
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 case CPU_VR4111:
608 case CPU_VR4121:
609 case CPU_VR4122:
610 case CPU_VR4181:
611 case CPU_VR4181A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000612 uasm_i_nop(p);
613 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700614 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000615 uasm_i_nop(p);
616 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 break;
618
619 case CPU_VR4131:
620 case CPU_VR4133:
Ralf Baechle7623deb2005-08-29 16:49:55 +0000621 case CPU_R5432:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000622 uasm_i_nop(p);
623 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624 tlbw(p);
625 break;
626
Lars-Peter Clausen83ccf692010-07-17 11:07:51 +0000627 case CPU_JZRISC:
628 tlbw(p);
629 uasm_i_nop(p);
630 break;
631
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632 default:
633 panic("No TLB refill handler yet (CPU type: %d)",
634 current_cpu_data.cputype);
635 break;
636 }
637}
638
David Daney6dd93442010-02-10 15:12:47 -0800639static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
640 unsigned int reg)
641{
Steven J. Hill05857c62012-09-13 16:51:46 -0500642 if (cpu_has_rixi) {
David Daney748e7872012-08-23 10:02:03 -0700643 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -0800644 } else {
645#ifdef CONFIG_64BIT_PHYS_ADDR
David Daney3be60222010-04-28 12:16:17 -0700646 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -0800647#else
648 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
649#endif
650 }
651}
652
David Daneyaa1762f2012-10-17 00:48:10 +0200653#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney6dd93442010-02-10 15:12:47 -0800654
655static __cpuinit void build_restore_pagemask(u32 **p,
656 struct uasm_reloc **r,
657 unsigned int tmp,
David Daney2c8c53e2010-12-27 18:07:57 -0800658 enum label_id lid,
659 int restore_scratch)
David Daney6dd93442010-02-10 15:12:47 -0800660{
David Daney2c8c53e2010-12-27 18:07:57 -0800661 if (restore_scratch) {
662 /* Reset default page size */
663 if (PM_DEFAULT_MASK >> 16) {
664 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
665 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
666 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
667 uasm_il_b(p, r, lid);
668 } else if (PM_DEFAULT_MASK) {
669 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
670 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
671 uasm_il_b(p, r, lid);
672 } else {
673 uasm_i_mtc0(p, 0, C0_PAGEMASK);
674 uasm_il_b(p, r, lid);
675 }
676 if (scratch_reg > 0)
677 UASM_i_MFC0(p, 1, 31, scratch_reg);
678 else
679 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
David Daney6dd93442010-02-10 15:12:47 -0800680 } else {
David Daney2c8c53e2010-12-27 18:07:57 -0800681 /* Reset default page size */
682 if (PM_DEFAULT_MASK >> 16) {
683 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
684 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
685 uasm_il_b(p, r, lid);
686 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
687 } else if (PM_DEFAULT_MASK) {
688 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
689 uasm_il_b(p, r, lid);
690 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
691 } else {
692 uasm_il_b(p, r, lid);
693 uasm_i_mtc0(p, 0, C0_PAGEMASK);
694 }
David Daney6dd93442010-02-10 15:12:47 -0800695 }
696}
697
David Daneyfd062c82009-05-27 17:47:44 -0700698static __cpuinit void build_huge_tlb_write_entry(u32 **p,
699 struct uasm_label **l,
700 struct uasm_reloc **r,
701 unsigned int tmp,
David Daney2c8c53e2010-12-27 18:07:57 -0800702 enum tlb_write_entry wmode,
703 int restore_scratch)
David Daneyfd062c82009-05-27 17:47:44 -0700704{
705 /* Set huge page tlb entry size */
706 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
707 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
708 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
709
710 build_tlb_write_entry(p, l, r, wmode);
711
David Daney2c8c53e2010-12-27 18:07:57 -0800712 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
David Daneyfd062c82009-05-27 17:47:44 -0700713}
714
715/*
716 * Check if Huge PTE is present, if so then jump to LABEL.
717 */
718static void __cpuinit
719build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
720 unsigned int pmd, int lid)
721{
722 UASM_i_LW(p, tmp, 0, pmd);
David Daneycc33ae42010-12-20 15:54:50 -0800723 if (use_bbit_insns()) {
724 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
725 } else {
726 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
727 uasm_il_bnez(p, r, tmp, lid);
728 }
David Daneyfd062c82009-05-27 17:47:44 -0700729}
730
731static __cpuinit void build_huge_update_entries(u32 **p,
732 unsigned int pte,
733 unsigned int tmp)
734{
735 int small_sequence;
736
737 /*
738 * A huge PTE describes an area the size of the
739 * configured huge page size. This is twice the
740 * of the large TLB entry size we intend to use.
741 * A TLB entry half the size of the configured
742 * huge page size is configured into entrylo0
743 * and entrylo1 to cover the contiguous huge PTE
744 * address space.
745 */
746 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
747
Ralf Baechle70342282013-01-22 12:59:30 +0100748 /* We can clobber tmp. It isn't used after this.*/
David Daneyfd062c82009-05-27 17:47:44 -0700749 if (!small_sequence)
750 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
751
David Daney6dd93442010-02-10 15:12:47 -0800752 build_convert_pte_to_entrylo(p, pte);
David Daney9b8c3892010-02-10 15:12:44 -0800753 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700754 /* convert to entrylo1 */
755 if (small_sequence)
756 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
757 else
758 UASM_i_ADDU(p, pte, pte, tmp);
759
David Daney9b8c3892010-02-10 15:12:44 -0800760 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700761}
762
763static __cpuinit void build_huge_handler_tail(u32 **p,
764 struct uasm_reloc **r,
765 struct uasm_label **l,
766 unsigned int pte,
767 unsigned int ptr)
768{
769#ifdef CONFIG_SMP
770 UASM_i_SC(p, pte, 0, ptr);
771 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
772 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
773#else
774 UASM_i_SW(p, pte, 0, ptr);
775#endif
776 build_huge_update_entries(p, pte, ptr);
David Daney2c8c53e2010-12-27 18:07:57 -0800777 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
David Daneyfd062c82009-05-27 17:47:44 -0700778}
David Daneyaa1762f2012-10-17 00:48:10 +0200779#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
David Daneyfd062c82009-05-27 17:47:44 -0700780
Ralf Baechle875d43e2005-09-03 15:56:16 -0700781#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700782/*
783 * TMP and PTR are scratch.
784 * TMP will be clobbered, PTR will hold the pmd entry.
785 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000786static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000787build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700788 unsigned int tmp, unsigned int ptr)
789{
David Daney826222842009-10-14 12:16:56 -0700790#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700791 long pgdc = (long)pgd_current;
David Daney826222842009-10-14 12:16:56 -0700792#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700793 /*
794 * The vmalloc handling is not in the hotpath.
795 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000796 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
David Daney1ec56322010-04-28 12:16:18 -0700797
798 if (check_for_high_segbits) {
799 /*
800 * The kernel currently implicitely assumes that the
801 * MIPS SEGBITS parameter for the processor is
802 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
803 * allocate virtual addresses outside the maximum
804 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
805 * that doesn't prevent user code from accessing the
806 * higher xuseg addresses. Here, we make sure that
807 * everything but the lower xuseg addresses goes down
808 * the module_alloc/vmalloc path.
809 */
810 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
811 uasm_il_bnez(p, r, ptr, label_vmalloc);
812 } else {
813 uasm_il_bltz(p, r, tmp, label_vmalloc);
814 }
Thiemo Seufere30ec452008-01-28 20:05:38 +0000815 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700816
David Daney826222842009-10-14 12:16:56 -0700817#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
David Daney3d8bfdd2010-12-21 14:19:11 -0800818 if (pgd_reg != -1) {
819 /* pgd is in pgd_reg */
820 UASM_i_MFC0(p, ptr, 31, pgd_reg);
821 } else {
822 /*
823 * &pgd << 11 stored in CONTEXT [23..63].
824 */
825 UASM_i_MFC0(p, ptr, C0_CONTEXT);
826
827 /* Clear lower 23 bits of context. */
828 uasm_i_dins(p, ptr, 0, 0, 23);
829
Ralf Baechle70342282013-01-22 12:59:30 +0100830 /* 1 0 1 0 1 << 6 xkphys cached */
David Daney3d8bfdd2010-12-21 14:19:11 -0800831 uasm_i_ori(p, ptr, ptr, 0x540);
832 uasm_i_drotr(p, ptr, ptr, 11);
833 }
David Daney826222842009-10-14 12:16:56 -0700834#elif defined(CONFIG_SMP)
Ralf Baechle70342282013-01-22 12:59:30 +0100835# ifdef CONFIG_MIPS_MT_SMTC
Ralf Baechle41c594a2006-04-05 09:45:45 +0100836 /*
837 * SMTC uses TCBind value as "CPU" index
838 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000839 uasm_i_mfc0(p, ptr, C0_TCBIND);
David Daney3be60222010-04-28 12:16:17 -0700840 uasm_i_dsrl_safe(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100841# else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842 /*
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000843 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700844 * stored in CONTEXT.
845 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000846 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
David Daney3be60222010-04-28 12:16:17 -0700847 uasm_i_dsrl_safe(p, ptr, ptr, 23);
David Daney826222842009-10-14 12:16:56 -0700848# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000849 UASM_i_LA_mostly(p, tmp, pgdc);
850 uasm_i_daddu(p, ptr, ptr, tmp);
851 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
852 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700853#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000854 UASM_i_LA_mostly(p, ptr, pgdc);
855 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700856#endif
857
Thiemo Seufere30ec452008-01-28 20:05:38 +0000858 uasm_l_vmalloc_done(l, *p);
Ralf Baechle242954b2006-10-24 02:29:01 +0100859
David Daney3be60222010-04-28 12:16:17 -0700860 /* get pgd offset in bytes */
861 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
Ralf Baechle242954b2006-10-24 02:29:01 +0100862
Thiemo Seufere30ec452008-01-28 20:05:38 +0000863 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
864 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
David Daney325f8a02009-12-04 13:52:36 -0800865#ifndef __PAGETABLE_PMD_FOLDED
Thiemo Seufere30ec452008-01-28 20:05:38 +0000866 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
867 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
David Daney3be60222010-04-28 12:16:17 -0700868 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000869 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
870 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
David Daney325f8a02009-12-04 13:52:36 -0800871#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872}
873
874/*
875 * BVADDR is the faulting address, PTR is scratch.
876 * PTR will hold the pgd for vmalloc.
877 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000878static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000879build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
David Daney1ec56322010-04-28 12:16:18 -0700880 unsigned int bvaddr, unsigned int ptr,
881 enum vmalloc64_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700882{
883 long swpd = (long)swapper_pg_dir;
David Daney1ec56322010-04-28 12:16:18 -0700884 int single_insn_swpd;
885 int did_vmalloc_branch = 0;
886
887 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700888
Thiemo Seufere30ec452008-01-28 20:05:38 +0000889 uasm_l_vmalloc(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
David Daney2c8c53e2010-12-27 18:07:57 -0800891 if (mode != not_refill && check_for_high_segbits) {
David Daney1ec56322010-04-28 12:16:18 -0700892 if (single_insn_swpd) {
893 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
894 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
895 did_vmalloc_branch = 1;
896 /* fall through */
897 } else {
898 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
899 }
900 }
901 if (!did_vmalloc_branch) {
902 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
903 uasm_il_b(p, r, label_vmalloc_done);
904 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
905 } else {
906 UASM_i_LA_mostly(p, ptr, swpd);
907 uasm_il_b(p, r, label_vmalloc_done);
908 if (uasm_in_compat_space_p(swpd))
909 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
910 else
911 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
912 }
913 }
David Daney2c8c53e2010-12-27 18:07:57 -0800914 if (mode != not_refill && check_for_high_segbits) {
David Daney1ec56322010-04-28 12:16:18 -0700915 uasm_l_large_segbits_fault(l, *p);
916 /*
917 * We get here if we are an xsseg address, or if we are
918 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
919 *
920 * Ignoring xsseg (assume disabled so would generate
921 * (address errors?), the only remaining possibility
922 * is the upper xuseg addresses. On processors with
923 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
924 * addresses would have taken an address error. We try
925 * to mimic that here by taking a load/istream page
926 * fault.
927 */
928 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
929 uasm_i_jr(p, ptr);
David Daney2c8c53e2010-12-27 18:07:57 -0800930
931 if (mode == refill_scratch) {
932 if (scratch_reg > 0)
933 UASM_i_MFC0(p, 1, 31, scratch_reg);
934 else
935 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
936 } else {
937 uasm_i_nop(p);
938 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700939 }
940}
941
Ralf Baechle875d43e2005-09-03 15:56:16 -0700942#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700943
944/*
945 * TMP and PTR are scratch.
946 * TMP will be clobbered, PTR will hold the pgd entry.
947 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000948static void __cpuinit __maybe_unused
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
950{
951 long pgdc = (long)pgd_current;
952
953 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
954#ifdef CONFIG_SMP
Ralf Baechle70342282013-01-22 12:59:30 +0100955#ifdef CONFIG_MIPS_MT_SMTC
Ralf Baechle41c594a2006-04-05 09:45:45 +0100956 /*
957 * SMTC uses TCBind value as "CPU" index
958 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000959 uasm_i_mfc0(p, ptr, C0_TCBIND);
960 UASM_i_LA_mostly(p, tmp, pgdc);
961 uasm_i_srl(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100962#else
963 /*
964 * smp_processor_id() << 3 is stored in CONTEXT.
Ralf Baechle70342282013-01-22 12:59:30 +0100965 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000966 uasm_i_mfc0(p, ptr, C0_CONTEXT);
967 UASM_i_LA_mostly(p, tmp, pgdc);
968 uasm_i_srl(p, ptr, ptr, 23);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100969#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000970 uasm_i_addu(p, ptr, tmp, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000972 UASM_i_LA_mostly(p, ptr, pgdc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000974 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
975 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
976 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
977 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
978 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700979}
980
Ralf Baechle875d43e2005-09-03 15:56:16 -0700981#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700982
Ralf Baechle234fcd12008-03-08 09:56:28 +0000983static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984{
Ralf Baechle242954b2006-10-24 02:29:01 +0100985 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
987
Ralf Baechle10cc3522007-10-11 23:46:15 +0100988 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989 case CPU_VR41XX:
990 case CPU_VR4111:
991 case CPU_VR4121:
992 case CPU_VR4122:
993 case CPU_VR4131:
994 case CPU_VR4181:
995 case CPU_VR4181A:
996 case CPU_VR4133:
997 shift += 2;
998 break;
999
1000 default:
1001 break;
1002 }
1003
1004 if (shift)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001005 UASM_i_SRL(p, ctx, ctx, shift);
1006 uasm_i_andi(p, ctx, ctx, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001007}
1008
Ralf Baechle234fcd12008-03-08 09:56:28 +00001009static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001010{
1011 /*
1012 * Bug workaround for the Nevada. It seems as if under certain
1013 * circumstances the move from cp0_context might produce a
1014 * bogus result when the mfc0 instruction and its consumer are
1015 * in a different cacheline or a load instruction, probably any
1016 * memory reference, is between them.
1017 */
Ralf Baechle10cc3522007-10-11 23:46:15 +01001018 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001019 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +00001020 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 GET_CONTEXT(p, tmp); /* get context reg */
1022 break;
1023
1024 default:
1025 GET_CONTEXT(p, tmp); /* get context reg */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001026 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001027 break;
1028 }
1029
1030 build_adjust_context(p, tmp);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001031 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032}
1033
Ralf Baechle234fcd12008-03-08 09:56:28 +00001034static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035 unsigned int ptep)
1036{
1037 /*
1038 * 64bit address support (36bit on a 32bit CPU) in a 32bit
1039 * Kernel is a special case. Only a few CPUs use it.
1040 */
1041#ifdef CONFIG_64BIT_PHYS_ADDR
1042 if (cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001043 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
1044 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
Steven J. Hill05857c62012-09-13 16:51:46 -05001045 if (cpu_has_rixi) {
David Daney748e7872012-08-23 10:02:03 -07001046 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -08001047 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
David Daney748e7872012-08-23 10:02:03 -07001048 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -08001049 } else {
David Daney3be60222010-04-28 12:16:17 -07001050 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
David Daney6dd93442010-02-10 15:12:47 -08001051 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
David Daney3be60222010-04-28 12:16:17 -07001052 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
David Daney6dd93442010-02-10 15:12:47 -08001053 }
David Daney9b8c3892010-02-10 15:12:44 -08001054 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055 } else {
1056 int pte_off_even = sizeof(pte_t) / 2;
1057 int pte_off_odd = pte_off_even + sizeof(pte_t);
1058
1059 /* The pte entries are pre-shifted */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001060 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
David Daney9b8c3892010-02-10 15:12:44 -08001061 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001062 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
David Daney9b8c3892010-02-10 15:12:44 -08001063 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001064 }
1065#else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001066 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
1067 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001068 if (r45k_bvahwbug())
1069 build_tlb_probe_entry(p);
Steven J. Hill05857c62012-09-13 16:51:46 -05001070 if (cpu_has_rixi) {
David Daney748e7872012-08-23 10:02:03 -07001071 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -08001072 if (r4k_250MHZhwbug())
1073 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1074 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
David Daney748e7872012-08-23 10:02:03 -07001075 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -08001076 } else {
1077 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
1078 if (r4k_250MHZhwbug())
1079 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1080 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1081 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
1082 if (r45k_bvahwbug())
1083 uasm_i_mfc0(p, tmp, C0_INDEX);
1084 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001085 if (r4k_250MHZhwbug())
David Daney9b8c3892010-02-10 15:12:44 -08001086 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1087 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088#endif
1089}
1090
David Daney2c8c53e2010-12-27 18:07:57 -08001091struct mips_huge_tlb_info {
1092 int huge_pte;
1093 int restore_scratch;
1094};
1095
1096static struct mips_huge_tlb_info __cpuinit
1097build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1098 struct uasm_reloc **r, unsigned int tmp,
1099 unsigned int ptr, int c0_scratch)
1100{
1101 struct mips_huge_tlb_info rv;
1102 unsigned int even, odd;
1103 int vmalloc_branch_delay_filled = 0;
1104 const int scratch = 1; /* Our extra working register */
1105
1106 rv.huge_pte = scratch;
1107 rv.restore_scratch = 0;
1108
1109 if (check_for_high_segbits) {
1110 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1111
1112 if (pgd_reg != -1)
1113 UASM_i_MFC0(p, ptr, 31, pgd_reg);
1114 else
1115 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1116
1117 if (c0_scratch >= 0)
1118 UASM_i_MTC0(p, scratch, 31, c0_scratch);
1119 else
1120 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1121
1122 uasm_i_dsrl_safe(p, scratch, tmp,
1123 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1124 uasm_il_bnez(p, r, scratch, label_vmalloc);
1125
1126 if (pgd_reg == -1) {
1127 vmalloc_branch_delay_filled = 1;
1128 /* Clear lower 23 bits of context. */
1129 uasm_i_dins(p, ptr, 0, 0, 23);
1130 }
1131 } else {
1132 if (pgd_reg != -1)
1133 UASM_i_MFC0(p, ptr, 31, pgd_reg);
1134 else
1135 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1136
1137 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1138
1139 if (c0_scratch >= 0)
1140 UASM_i_MTC0(p, scratch, 31, c0_scratch);
1141 else
1142 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1143
1144 if (pgd_reg == -1)
1145 /* Clear lower 23 bits of context. */
1146 uasm_i_dins(p, ptr, 0, 0, 23);
1147
1148 uasm_il_bltz(p, r, tmp, label_vmalloc);
1149 }
1150
1151 if (pgd_reg == -1) {
1152 vmalloc_branch_delay_filled = 1;
Ralf Baechle70342282013-01-22 12:59:30 +01001153 /* 1 0 1 0 1 << 6 xkphys cached */
David Daney2c8c53e2010-12-27 18:07:57 -08001154 uasm_i_ori(p, ptr, ptr, 0x540);
1155 uasm_i_drotr(p, ptr, ptr, 11);
1156 }
1157
1158#ifdef __PAGETABLE_PMD_FOLDED
1159#define LOC_PTEP scratch
1160#else
1161#define LOC_PTEP ptr
1162#endif
1163
1164 if (!vmalloc_branch_delay_filled)
1165 /* get pgd offset in bytes */
1166 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1167
1168 uasm_l_vmalloc_done(l, *p);
1169
1170 /*
Ralf Baechle70342282013-01-22 12:59:30 +01001171 * tmp ptr
1172 * fall-through case = badvaddr *pgd_current
1173 * vmalloc case = badvaddr swapper_pg_dir
David Daney2c8c53e2010-12-27 18:07:57 -08001174 */
1175
1176 if (vmalloc_branch_delay_filled)
1177 /* get pgd offset in bytes */
1178 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1179
1180#ifdef __PAGETABLE_PMD_FOLDED
1181 GET_CONTEXT(p, tmp); /* get context reg */
1182#endif
1183 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1184
1185 if (use_lwx_insns()) {
1186 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1187 } else {
1188 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1189 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1190 }
1191
1192#ifndef __PAGETABLE_PMD_FOLDED
1193 /* get pmd offset in bytes */
1194 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1195 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1196 GET_CONTEXT(p, tmp); /* get context reg */
1197
1198 if (use_lwx_insns()) {
1199 UASM_i_LWX(p, scratch, scratch, ptr);
1200 } else {
1201 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1202 UASM_i_LW(p, scratch, 0, ptr);
1203 }
1204#endif
1205 /* Adjust the context during the load latency. */
1206 build_adjust_context(p, tmp);
1207
David Daneyaa1762f2012-10-17 00:48:10 +02001208#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney2c8c53e2010-12-27 18:07:57 -08001209 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1210 /*
1211 * The in the LWX case we don't want to do the load in the
Ralf Baechle70342282013-01-22 12:59:30 +01001212 * delay slot. It cannot issue in the same cycle and may be
David Daney2c8c53e2010-12-27 18:07:57 -08001213 * speculative and unneeded.
1214 */
1215 if (use_lwx_insns())
1216 uasm_i_nop(p);
David Daneyaa1762f2012-10-17 00:48:10 +02001217#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
David Daney2c8c53e2010-12-27 18:07:57 -08001218
1219
1220 /* build_update_entries */
1221 if (use_lwx_insns()) {
1222 even = ptr;
1223 odd = tmp;
1224 UASM_i_LWX(p, even, scratch, tmp);
1225 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1226 UASM_i_LWX(p, odd, scratch, tmp);
1227 } else {
1228 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1229 even = tmp;
1230 odd = ptr;
1231 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1232 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1233 }
Steven J. Hill05857c62012-09-13 16:51:46 -05001234 if (cpu_has_rixi) {
David Daney748e7872012-08-23 10:02:03 -07001235 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
David Daney2c8c53e2010-12-27 18:07:57 -08001236 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
David Daney748e7872012-08-23 10:02:03 -07001237 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
David Daney2c8c53e2010-12-27 18:07:57 -08001238 } else {
1239 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1240 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1241 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1242 }
1243 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1244
1245 if (c0_scratch >= 0) {
1246 UASM_i_MFC0(p, scratch, 31, c0_scratch);
1247 build_tlb_write_entry(p, l, r, tlb_random);
1248 uasm_l_leave(l, *p);
1249 rv.restore_scratch = 1;
1250 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
1251 build_tlb_write_entry(p, l, r, tlb_random);
1252 uasm_l_leave(l, *p);
1253 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1254 } else {
1255 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1256 build_tlb_write_entry(p, l, r, tlb_random);
1257 uasm_l_leave(l, *p);
1258 rv.restore_scratch = 1;
1259 }
1260
1261 uasm_i_eret(p); /* return from trap */
1262
1263 return rv;
1264}
1265
David Daneye6f72d32009-05-20 11:40:58 -07001266/*
1267 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1268 * because EXL == 0. If we wrap, we can also use the 32 instruction
1269 * slots before the XTLB refill exception handler which belong to the
1270 * unused TLB refill exception.
1271 */
1272#define MIPS64_REFILL_INSNS 32
1273
Ralf Baechle234fcd12008-03-08 09:56:28 +00001274static void __cpuinit build_r4000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275{
1276 u32 *p = tlb_handler;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001277 struct uasm_label *l = labels;
1278 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001279 u32 *f;
1280 unsigned int final_len;
Ralf Baechle4a9040f2011-03-29 10:54:54 +02001281 struct mips_huge_tlb_info htlb_info __maybe_unused;
1282 enum vmalloc64_mode vmalloc_mode __maybe_unused;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283
1284 memset(tlb_handler, 0, sizeof(tlb_handler));
1285 memset(labels, 0, sizeof(labels));
1286 memset(relocs, 0, sizeof(relocs));
1287 memset(final_handler, 0, sizeof(final_handler));
1288
David Daney2c8c53e2010-12-27 18:07:57 -08001289 if ((scratch_reg > 0 || scratchpad_available()) && use_bbit_insns()) {
1290 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1291 scratch_reg);
1292 vmalloc_mode = refill_scratch;
1293 } else {
1294 htlb_info.huge_pte = K0;
1295 htlb_info.restore_scratch = 0;
1296 vmalloc_mode = refill_noscratch;
1297 /*
1298 * create the plain linear handler
1299 */
1300 if (bcm1250_m3_war()) {
1301 unsigned int segbits = 44;
1302
1303 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1304 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1305 uasm_i_xor(&p, K0, K0, K1);
1306 uasm_i_dsrl_safe(&p, K1, K0, 62);
1307 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1308 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1309 uasm_i_or(&p, K0, K0, K1);
1310 uasm_il_bnez(&p, &r, K0, label_leave);
1311 /* No need for uasm_i_nop */
1312 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001313
Ralf Baechle875d43e2005-09-03 15:56:16 -07001314#ifdef CONFIG_64BIT
David Daney2c8c53e2010-12-27 18:07:57 -08001315 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316#else
David Daney2c8c53e2010-12-27 18:07:57 -08001317 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318#endif
1319
David Daneyaa1762f2012-10-17 00:48:10 +02001320#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney2c8c53e2010-12-27 18:07:57 -08001321 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
David Daneyfd062c82009-05-27 17:47:44 -07001322#endif
1323
David Daney2c8c53e2010-12-27 18:07:57 -08001324 build_get_ptep(&p, K0, K1);
1325 build_update_entries(&p, K0, K1);
1326 build_tlb_write_entry(&p, &l, &r, tlb_random);
1327 uasm_l_leave(&l, p);
1328 uasm_i_eret(&p); /* return from trap */
1329 }
David Daneyaa1762f2012-10-17 00:48:10 +02001330#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07001331 uasm_l_tlb_huge_update(&l, p);
David Daney2c8c53e2010-12-27 18:07:57 -08001332 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1333 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1334 htlb_info.restore_scratch);
David Daneyfd062c82009-05-27 17:47:44 -07001335#endif
1336
Ralf Baechle875d43e2005-09-03 15:56:16 -07001337#ifdef CONFIG_64BIT
David Daney2c8c53e2010-12-27 18:07:57 -08001338 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001339#endif
1340
1341 /*
1342 * Overflow check: For the 64bit handler, we need at least one
1343 * free instruction slot for the wrap-around branch. In worst
1344 * case, if the intended insertion point is a delay slot, we
Matt LaPlante4b3f6862006-10-03 22:21:02 +02001345 * need three, with the second nop'ed and the third being
Linus Torvalds1da177e2005-04-16 15:20:36 -07001346 * unused.
1347 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +08001348 /* Loongson2 ebase is different than r4k, we have more space */
1349#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 if ((p - tlb_handler) > 64)
1351 panic("TLB refill handler space exceeded");
1352#else
David Daneye6f72d32009-05-20 11:40:58 -07001353 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1354 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1355 && uasm_insn_has_bdelay(relocs,
1356 tlb_handler + MIPS64_REFILL_INSNS - 3)))
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 panic("TLB refill handler space exceeded");
1358#endif
1359
1360 /*
1361 * Now fold the handler in the TLB refill handler space.
1362 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +08001363#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364 f = final_handler;
1365 /* Simplest case, just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001366 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001367 final_len = p - tlb_handler;
Ralf Baechle875d43e2005-09-03 15:56:16 -07001368#else /* CONFIG_64BIT */
David Daneye6f72d32009-05-20 11:40:58 -07001369 f = final_handler + MIPS64_REFILL_INSNS;
1370 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001371 /* Just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001372 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001373 final_len = p - tlb_handler;
1374 } else {
David Daneyaa1762f2012-10-17 00:48:10 +02001375#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07001376 const enum label_id ls = label_tlb_huge_update;
David Daney95affdd2009-05-20 11:40:59 -07001377#else
1378 const enum label_id ls = label_vmalloc;
1379#endif
1380 u32 *split;
1381 int ov = 0;
1382 int i;
1383
1384 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1385 ;
1386 BUG_ON(i == ARRAY_SIZE(labels));
1387 split = labels[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001388
1389 /*
David Daney95affdd2009-05-20 11:40:59 -07001390 * See if we have overflown one way or the other.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001391 */
David Daney95affdd2009-05-20 11:40:59 -07001392 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1393 split < p - MIPS64_REFILL_INSNS)
1394 ov = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
David Daney95affdd2009-05-20 11:40:59 -07001396 if (ov) {
1397 /*
1398 * Split two instructions before the end. One
1399 * for the branch and one for the instruction
1400 * in the delay slot.
1401 */
1402 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
1403
1404 /*
1405 * If the branch would fall in a delay slot,
1406 * we must back up an additional instruction
1407 * so that it is no longer in a delay slot.
1408 */
1409 if (uasm_insn_has_bdelay(relocs, split - 1))
1410 split--;
1411 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001412 /* Copy first part of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001413 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001414 f += split - tlb_handler;
1415
David Daney95affdd2009-05-20 11:40:59 -07001416 if (ov) {
1417 /* Insert branch. */
1418 uasm_l_split(&l, final_handler);
1419 uasm_il_b(&f, &r, label_split);
1420 if (uasm_insn_has_bdelay(relocs, split))
1421 uasm_i_nop(&f);
1422 else {
1423 uasm_copy_handler(relocs, labels,
1424 split, split + 1, f);
1425 uasm_move_labels(labels, f, f + 1, -1);
1426 f++;
1427 split++;
1428 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001429 }
1430
1431 /* Copy the rest of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001432 uasm_copy_handler(relocs, labels, split, p, final_handler);
David Daneye6f72d32009-05-20 11:40:58 -07001433 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1434 (p - split);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001435 }
Ralf Baechle875d43e2005-09-03 15:56:16 -07001436#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001437
Thiemo Seufere30ec452008-01-28 20:05:38 +00001438 uasm_resolve_relocs(relocs, labels);
1439 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1440 final_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001441
Ralf Baechle91b05e62006-03-29 18:53:00 +01001442 memcpy((void *)ebase, final_handler, 0x100);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001443
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001444 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445}
1446
1447/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001448 * 128 instructions for the fastpath handler is generous and should
1449 * never be exceeded.
1450 */
1451#define FASTPATH_SIZE 128
1452
Franck Bui-Huucbdbe072007-10-18 09:11:16 +02001453u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1454u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1455u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
David Daney3d8bfdd2010-12-21 14:19:11 -08001456#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001457u32 tlbmiss_handler_setup_pgd_array[16] __cacheline_aligned;
David Daney3d8bfdd2010-12-21 14:19:11 -08001458
1459static void __cpuinit build_r4000_setup_pgd(void)
1460{
1461 const int a0 = 4;
1462 const int a1 = 5;
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001463 u32 *p = tlbmiss_handler_setup_pgd_array;
David Daney3d8bfdd2010-12-21 14:19:11 -08001464 struct uasm_label *l = labels;
1465 struct uasm_reloc *r = relocs;
1466
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001467 memset(tlbmiss_handler_setup_pgd_array, 0, sizeof(tlbmiss_handler_setup_pgd_array));
David Daney3d8bfdd2010-12-21 14:19:11 -08001468 memset(labels, 0, sizeof(labels));
1469 memset(relocs, 0, sizeof(relocs));
1470
1471 pgd_reg = allocate_kscratch();
1472
1473 if (pgd_reg == -1) {
1474 /* PGD << 11 in c0_Context */
1475 /*
1476 * If it is a ckseg0 address, convert to a physical
1477 * address. Shifting right by 29 and adding 4 will
1478 * result in zero for these addresses.
1479 *
1480 */
1481 UASM_i_SRA(&p, a1, a0, 29);
1482 UASM_i_ADDIU(&p, a1, a1, 4);
1483 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1484 uasm_i_nop(&p);
1485 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1486 uasm_l_tlbl_goaround1(&l, p);
1487 UASM_i_SLL(&p, a0, a0, 11);
1488 uasm_i_jr(&p, 31);
1489 UASM_i_MTC0(&p, a0, C0_CONTEXT);
1490 } else {
1491 /* PGD in c0_KScratch */
1492 uasm_i_jr(&p, 31);
1493 UASM_i_MTC0(&p, a0, 31, pgd_reg);
1494 }
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001495 if (p - tlbmiss_handler_setup_pgd_array > ARRAY_SIZE(tlbmiss_handler_setup_pgd_array))
1496 panic("tlbmiss_handler_setup_pgd_array space exceeded");
David Daney3d8bfdd2010-12-21 14:19:11 -08001497 uasm_resolve_relocs(relocs, labels);
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001498 pr_debug("Wrote tlbmiss_handler_setup_pgd_array (%u instructions).\n",
1499 (unsigned int)(p - tlbmiss_handler_setup_pgd_array));
David Daney3d8bfdd2010-12-21 14:19:11 -08001500
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001501 dump_handler("tlbmiss_handler",
Ralf Baechle0bfbf6a2013-03-21 11:28:10 +01001502 tlbmiss_handler_setup_pgd_array,
1503 ARRAY_SIZE(tlbmiss_handler_setup_pgd_array));
David Daney3d8bfdd2010-12-21 14:19:11 -08001504}
1505#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001506
Ralf Baechle234fcd12008-03-08 09:56:28 +00001507static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001508iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001509{
1510#ifdef CONFIG_SMP
1511# ifdef CONFIG_64BIT_PHYS_ADDR
1512 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001513 uasm_i_lld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001514 else
1515# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001516 UASM_i_LL(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001517#else
1518# ifdef CONFIG_64BIT_PHYS_ADDR
1519 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001520 uasm_i_ld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521 else
1522# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001523 UASM_i_LW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001524#endif
1525}
1526
Ralf Baechle234fcd12008-03-08 09:56:28 +00001527static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001528iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001529 unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001531#ifdef CONFIG_64BIT_PHYS_ADDR
1532 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1533#endif
1534
Thiemo Seufere30ec452008-01-28 20:05:38 +00001535 uasm_i_ori(p, pte, pte, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001536#ifdef CONFIG_SMP
1537# ifdef CONFIG_64BIT_PHYS_ADDR
1538 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001539 uasm_i_scd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001540 else
1541# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001542 UASM_i_SC(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001543
1544 if (r10000_llsc_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +00001545 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001547 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001548
1549# ifdef CONFIG_64BIT_PHYS_ADDR
1550 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001551 /* no uasm_i_nop needed */
1552 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1553 uasm_i_ori(p, pte, pte, hwmode);
1554 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1555 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1556 /* no uasm_i_nop needed */
1557 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001558 } else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001559 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001560# else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001561 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001562# endif
1563#else
1564# ifdef CONFIG_64BIT_PHYS_ADDR
1565 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001566 uasm_i_sd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567 else
1568# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001569 UASM_i_SW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001570
1571# ifdef CONFIG_64BIT_PHYS_ADDR
1572 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001573 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1574 uasm_i_ori(p, pte, pte, hwmode);
1575 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1576 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001577 }
1578# endif
1579#endif
1580}
1581
1582/*
1583 * Check if PTE is present, if not then jump to LABEL. PTR points to
1584 * the page table where this PTE is located, PTE will be re-loaded
1585 * with it's original value.
1586 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001587static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001588build_pte_present(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001589 int pte, int ptr, int scratch, enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001590{
David Daneybf286072011-07-05 16:34:46 -07001591 int t = scratch >= 0 ? scratch : pte;
1592
Steven J. Hill05857c62012-09-13 16:51:46 -05001593 if (cpu_has_rixi) {
David Daneycc33ae42010-12-20 15:54:50 -08001594 if (use_bbit_insns()) {
1595 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1596 uasm_i_nop(p);
1597 } else {
David Daneybf286072011-07-05 16:34:46 -07001598 uasm_i_andi(p, t, pte, _PAGE_PRESENT);
1599 uasm_il_beqz(p, r, t, lid);
1600 if (pte == t)
1601 /* You lose the SMP race :-(*/
1602 iPTE_LW(p, pte, ptr);
David Daneycc33ae42010-12-20 15:54:50 -08001603 }
David Daney6dd93442010-02-10 15:12:47 -08001604 } else {
David Daneybf286072011-07-05 16:34:46 -07001605 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_READ);
1606 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_READ);
1607 uasm_il_bnez(p, r, t, lid);
1608 if (pte == t)
1609 /* You lose the SMP race :-(*/
1610 iPTE_LW(p, pte, ptr);
David Daney6dd93442010-02-10 15:12:47 -08001611 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001612}
1613
1614/* Make PTE valid, store result in PTR. */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001615static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001616build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001617 unsigned int ptr)
1618{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001619 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1620
1621 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001622}
1623
1624/*
1625 * Check if PTE can be written to, if not branch to LABEL. Regardless
1626 * restore PTE with value from PTR when done.
1627 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001628static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001629build_pte_writable(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001630 unsigned int pte, unsigned int ptr, int scratch,
1631 enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001632{
David Daneybf286072011-07-05 16:34:46 -07001633 int t = scratch >= 0 ? scratch : pte;
1634
1635 uasm_i_andi(p, t, pte, _PAGE_PRESENT | _PAGE_WRITE);
1636 uasm_i_xori(p, t, t, _PAGE_PRESENT | _PAGE_WRITE);
1637 uasm_il_bnez(p, r, t, lid);
1638 if (pte == t)
1639 /* You lose the SMP race :-(*/
David Daneycc33ae42010-12-20 15:54:50 -08001640 iPTE_LW(p, pte, ptr);
David Daneybf286072011-07-05 16:34:46 -07001641 else
1642 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001643}
1644
1645/* Make PTE writable, update software status bits as well, then store
1646 * at PTR.
1647 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001648static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001649build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001650 unsigned int ptr)
1651{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001652 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1653 | _PAGE_DIRTY);
1654
1655 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001656}
1657
1658/*
1659 * Check if PTE can be modified, if not branch to LABEL. Regardless
1660 * restore PTE with value from PTR when done.
1661 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001662static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001663build_pte_modifiable(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001664 unsigned int pte, unsigned int ptr, int scratch,
1665 enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001666{
David Daneycc33ae42010-12-20 15:54:50 -08001667 if (use_bbit_insns()) {
1668 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1669 uasm_i_nop(p);
1670 } else {
David Daneybf286072011-07-05 16:34:46 -07001671 int t = scratch >= 0 ? scratch : pte;
1672 uasm_i_andi(p, t, pte, _PAGE_WRITE);
1673 uasm_il_beqz(p, r, t, lid);
1674 if (pte == t)
1675 /* You lose the SMP race :-(*/
1676 iPTE_LW(p, pte, ptr);
David Daneycc33ae42010-12-20 15:54:50 -08001677 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001678}
1679
David Daney826222842009-10-14 12:16:56 -07001680#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
David Daney3d8bfdd2010-12-21 14:19:11 -08001681
1682
Linus Torvalds1da177e2005-04-16 15:20:36 -07001683/*
1684 * R3000 style TLB load/store/modify handlers.
1685 */
1686
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001687/*
1688 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1689 * Then it returns.
1690 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001691static void __cpuinit
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001692build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001693{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001694 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1695 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1696 uasm_i_tlbwi(p);
1697 uasm_i_jr(p, tmp);
1698 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001699}
1700
1701/*
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001702 * This places the pte into ENTRYLO0 and writes it with tlbwi
1703 * or tlbwr as appropriate. This is because the index register
1704 * may have the probe fail bit set as a result of a trap on a
1705 * kseg2 access, i.e. without refill. Then it returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001706 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001707static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001708build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1709 struct uasm_reloc **r, unsigned int pte,
1710 unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001711{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001712 uasm_i_mfc0(p, tmp, C0_INDEX);
1713 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1714 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1715 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1716 uasm_i_tlbwi(p); /* cp0 delay */
1717 uasm_i_jr(p, tmp);
1718 uasm_i_rfe(p); /* branch delay */
1719 uasm_l_r3000_write_probe_fail(l, *p);
1720 uasm_i_tlbwr(p); /* cp0 delay */
1721 uasm_i_jr(p, tmp);
1722 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001723}
1724
Ralf Baechle234fcd12008-03-08 09:56:28 +00001725static void __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07001726build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1727 unsigned int ptr)
1728{
1729 long pgdc = (long)pgd_current;
1730
Thiemo Seufere30ec452008-01-28 20:05:38 +00001731 uasm_i_mfc0(p, pte, C0_BADVADDR);
1732 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1733 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1734 uasm_i_srl(p, pte, pte, 22); /* load delay */
1735 uasm_i_sll(p, pte, pte, 2);
1736 uasm_i_addu(p, ptr, ptr, pte);
1737 uasm_i_mfc0(p, pte, C0_CONTEXT);
1738 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1739 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1740 uasm_i_addu(p, ptr, ptr, pte);
1741 uasm_i_lw(p, pte, 0, ptr);
1742 uasm_i_tlbp(p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001743}
1744
Ralf Baechle234fcd12008-03-08 09:56:28 +00001745static void __cpuinit build_r3000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001746{
1747 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001748 struct uasm_label *l = labels;
1749 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001750
1751 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1752 memset(labels, 0, sizeof(labels));
1753 memset(relocs, 0, sizeof(relocs));
1754
1755 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybf286072011-07-05 16:34:46 -07001756 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001757 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001758 build_make_valid(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001759 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001760
Thiemo Seufere30ec452008-01-28 20:05:38 +00001761 uasm_l_nopage_tlbl(&l, p);
1762 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1763 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001764
1765 if ((p - handle_tlbl) > FASTPATH_SIZE)
1766 panic("TLB load handler fastpath space exceeded");
1767
Thiemo Seufere30ec452008-01-28 20:05:38 +00001768 uasm_resolve_relocs(relocs, labels);
1769 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1770 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001771
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001772 dump_handler("r3000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001773}
1774
Ralf Baechle234fcd12008-03-08 09:56:28 +00001775static void __cpuinit build_r3000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
1777 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001778 struct uasm_label *l = labels;
1779 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780
1781 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1782 memset(labels, 0, sizeof(labels));
1783 memset(relocs, 0, sizeof(relocs));
1784
1785 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybf286072011-07-05 16:34:46 -07001786 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001787 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001788 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001789 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790
Thiemo Seufere30ec452008-01-28 20:05:38 +00001791 uasm_l_nopage_tlbs(&l, p);
1792 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1793 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001794
1795 if ((p - handle_tlbs) > FASTPATH_SIZE)
1796 panic("TLB store handler fastpath space exceeded");
1797
Thiemo Seufere30ec452008-01-28 20:05:38 +00001798 uasm_resolve_relocs(relocs, labels);
1799 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1800 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001801
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001802 dump_handler("r3000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001803}
1804
Ralf Baechle234fcd12008-03-08 09:56:28 +00001805static void __cpuinit build_r3000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001806{
1807 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001808 struct uasm_label *l = labels;
1809 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001810
1811 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1812 memset(labels, 0, sizeof(labels));
1813 memset(relocs, 0, sizeof(relocs));
1814
1815 build_r3000_tlbchange_handler_head(&p, K0, K1);
Ralf Baechled954ffe2011-08-02 22:52:48 +01001816 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001817 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001818 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001819 build_r3000_pte_reload_tlbwi(&p, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001820
Thiemo Seufere30ec452008-01-28 20:05:38 +00001821 uasm_l_nopage_tlbm(&l, p);
1822 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1823 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001824
1825 if ((p - handle_tlbm) > FASTPATH_SIZE)
1826 panic("TLB modify handler fastpath space exceeded");
1827
Thiemo Seufere30ec452008-01-28 20:05:38 +00001828 uasm_resolve_relocs(relocs, labels);
1829 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1830 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001832 dump_handler("r3000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001833}
David Daney826222842009-10-14 12:16:56 -07001834#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001835
1836/*
1837 * R4000 style TLB load/store/modify handlers.
1838 */
David Daneybf286072011-07-05 16:34:46 -07001839static struct work_registers __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001840build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
David Daneybf286072011-07-05 16:34:46 -07001841 struct uasm_reloc **r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001842{
David Daneybf286072011-07-05 16:34:46 -07001843 struct work_registers wr = build_get_work_registers(p);
1844
Ralf Baechle875d43e2005-09-03 15:56:16 -07001845#ifdef CONFIG_64BIT
David Daneybf286072011-07-05 16:34:46 -07001846 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001847#else
David Daneybf286072011-07-05 16:34:46 -07001848 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849#endif
1850
David Daneyaa1762f2012-10-17 00:48:10 +02001851#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07001852 /*
1853 * For huge tlb entries, pmd doesn't contain an address but
1854 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1855 * see if we need to jump to huge tlb processing.
1856 */
David Daneybf286072011-07-05 16:34:46 -07001857 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
David Daneyfd062c82009-05-27 17:47:44 -07001858#endif
1859
David Daneybf286072011-07-05 16:34:46 -07001860 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
1861 UASM_i_LW(p, wr.r2, 0, wr.r2);
1862 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1863 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1864 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865
1866#ifdef CONFIG_SMP
Thiemo Seufere30ec452008-01-28 20:05:38 +00001867 uasm_l_smp_pgtable_change(l, *p);
1868#endif
David Daneybf286072011-07-05 16:34:46 -07001869 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001870 if (!m4kc_tlbp_war())
1871 build_tlb_probe_entry(p);
David Daneybf286072011-07-05 16:34:46 -07001872 return wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001873}
1874
Ralf Baechle234fcd12008-03-08 09:56:28 +00001875static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001876build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1877 struct uasm_reloc **r, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001878 unsigned int ptr)
1879{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001880 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1881 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001882 build_update_entries(p, tmp, ptr);
1883 build_tlb_write_entry(p, l, r, tlb_indexed);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001884 uasm_l_leave(l, *p);
David Daneybf286072011-07-05 16:34:46 -07001885 build_restore_work_registers(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001886 uasm_i_eret(p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001887
Ralf Baechle875d43e2005-09-03 15:56:16 -07001888#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -07001889 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001890#endif
1891}
1892
Ralf Baechle234fcd12008-03-08 09:56:28 +00001893static void __cpuinit build_r4000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001894{
1895 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001896 struct uasm_label *l = labels;
1897 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07001898 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001899
1900 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1901 memset(labels, 0, sizeof(labels));
1902 memset(relocs, 0, sizeof(relocs));
1903
1904 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +01001905 unsigned int segbits = 44;
1906
1907 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1908 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001909 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -07001910 uasm_i_dsrl_safe(&p, K1, K0, 62);
1911 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1912 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +01001913 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001914 uasm_il_bnez(&p, &r, K0, label_leave);
1915 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001916 }
1917
David Daneybf286072011-07-05 16:34:46 -07001918 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
1919 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001920 if (m4kc_tlbp_war())
1921 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001922
Steven J. Hill05857c62012-09-13 16:51:46 -05001923 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -08001924 /*
1925 * If the page is not _PAGE_VALID, RI or XI could not
1926 * have triggered it. Skip the expensive test..
1927 */
David Daneycc33ae42010-12-20 15:54:50 -08001928 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07001929 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
David Daneycc33ae42010-12-20 15:54:50 -08001930 label_tlbl_goaround1);
1931 } else {
David Daneybf286072011-07-05 16:34:46 -07001932 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
1933 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
David Daneycc33ae42010-12-20 15:54:50 -08001934 }
David Daney6dd93442010-02-10 15:12:47 -08001935 uasm_i_nop(&p);
1936
1937 uasm_i_tlbr(&p);
Ralf Baechle73acc7d2013-06-20 14:56:17 +02001938
1939 switch (current_cpu_type()) {
1940 default:
1941 if (cpu_has_mips_r2) {
1942 uasm_i_ehb(&p);
1943
1944 case CPU_CAVIUM_OCTEON:
1945 case CPU_CAVIUM_OCTEON_PLUS:
1946 case CPU_CAVIUM_OCTEON2:
1947 break;
1948 }
1949 }
1950
David Daney6dd93442010-02-10 15:12:47 -08001951 /* Examine entrylo 0 or 1 based on ptr. */
David Daneycc33ae42010-12-20 15:54:50 -08001952 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07001953 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
David Daneycc33ae42010-12-20 15:54:50 -08001954 } else {
David Daneybf286072011-07-05 16:34:46 -07001955 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
1956 uasm_i_beqz(&p, wr.r3, 8);
David Daneycc33ae42010-12-20 15:54:50 -08001957 }
David Daneybf286072011-07-05 16:34:46 -07001958 /* load it in the delay slot*/
1959 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
1960 /* load it if ptr is odd */
1961 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
David Daney6dd93442010-02-10 15:12:47 -08001962 /*
David Daneybf286072011-07-05 16:34:46 -07001963 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
David Daney6dd93442010-02-10 15:12:47 -08001964 * XI must have triggered it.
1965 */
David Daneycc33ae42010-12-20 15:54:50 -08001966 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07001967 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
1968 uasm_i_nop(&p);
David Daneycc33ae42010-12-20 15:54:50 -08001969 uasm_l_tlbl_goaround1(&l, p);
1970 } else {
David Daneybf286072011-07-05 16:34:46 -07001971 uasm_i_andi(&p, wr.r3, wr.r3, 2);
1972 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
1973 uasm_i_nop(&p);
David Daneycc33ae42010-12-20 15:54:50 -08001974 }
David Daneybf286072011-07-05 16:34:46 -07001975 uasm_l_tlbl_goaround1(&l, p);
David Daney6dd93442010-02-10 15:12:47 -08001976 }
David Daneybf286072011-07-05 16:34:46 -07001977 build_make_valid(&p, &r, wr.r1, wr.r2);
1978 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
David Daneyaa1762f2012-10-17 00:48:10 +02001980#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07001981 /*
1982 * This is the entry point when build_r4000_tlbchange_handler_head
1983 * spots a huge page.
1984 */
1985 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07001986 iPTE_LW(&p, wr.r1, wr.r2);
1987 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
David Daneyfd062c82009-05-27 17:47:44 -07001988 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001989
Steven J. Hill05857c62012-09-13 16:51:46 -05001990 if (cpu_has_rixi) {
David Daney6dd93442010-02-10 15:12:47 -08001991 /*
1992 * If the page is not _PAGE_VALID, RI or XI could not
1993 * have triggered it. Skip the expensive test..
1994 */
David Daneycc33ae42010-12-20 15:54:50 -08001995 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07001996 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
David Daneycc33ae42010-12-20 15:54:50 -08001997 label_tlbl_goaround2);
1998 } else {
David Daneybf286072011-07-05 16:34:46 -07001999 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2000 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002001 }
David Daney6dd93442010-02-10 15:12:47 -08002002 uasm_i_nop(&p);
2003
2004 uasm_i_tlbr(&p);
Ralf Baechle73acc7d2013-06-20 14:56:17 +02002005
2006 switch (current_cpu_type()) {
2007 default:
2008 if (cpu_has_mips_r2) {
2009 uasm_i_ehb(&p);
2010
2011 case CPU_CAVIUM_OCTEON:
2012 case CPU_CAVIUM_OCTEON_PLUS:
2013 case CPU_CAVIUM_OCTEON2:
2014 break;
2015 }
2016 }
2017
David Daney6dd93442010-02-10 15:12:47 -08002018 /* Examine entrylo 0 or 1 based on ptr. */
David Daneycc33ae42010-12-20 15:54:50 -08002019 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002020 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
David Daneycc33ae42010-12-20 15:54:50 -08002021 } else {
David Daneybf286072011-07-05 16:34:46 -07002022 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2023 uasm_i_beqz(&p, wr.r3, 8);
David Daneycc33ae42010-12-20 15:54:50 -08002024 }
David Daneybf286072011-07-05 16:34:46 -07002025 /* load it in the delay slot*/
2026 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2027 /* load it if ptr is odd */
2028 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
David Daney6dd93442010-02-10 15:12:47 -08002029 /*
David Daneybf286072011-07-05 16:34:46 -07002030 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
David Daney6dd93442010-02-10 15:12:47 -08002031 * XI must have triggered it.
2032 */
David Daneycc33ae42010-12-20 15:54:50 -08002033 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002034 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002035 } else {
David Daneybf286072011-07-05 16:34:46 -07002036 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2037 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002038 }
David Daney0f4ccbc2011-09-16 18:06:02 -07002039 if (PM_DEFAULT_MASK == 0)
2040 uasm_i_nop(&p);
David Daney6dd93442010-02-10 15:12:47 -08002041 /*
2042 * We clobbered C0_PAGEMASK, restore it. On the other branch
2043 * it is restored in build_huge_tlb_write_entry.
2044 */
David Daneybf286072011-07-05 16:34:46 -07002045 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
David Daney6dd93442010-02-10 15:12:47 -08002046
2047 uasm_l_tlbl_goaround2(&l, p);
2048 }
David Daneybf286072011-07-05 16:34:46 -07002049 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
2050 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
David Daneyfd062c82009-05-27 17:47:44 -07002051#endif
2052
Thiemo Seufere30ec452008-01-28 20:05:38 +00002053 uasm_l_nopage_tlbl(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002054 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002055#ifdef CONFIG_CPU_MICROMIPS
2056 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2057 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2058 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2059 uasm_i_jr(&p, K0);
2060 } else
2061#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002062 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2063 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
2065 if ((p - handle_tlbl) > FASTPATH_SIZE)
2066 panic("TLB load handler fastpath space exceeded");
2067
Thiemo Seufere30ec452008-01-28 20:05:38 +00002068 uasm_resolve_relocs(relocs, labels);
2069 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2070 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002072 dump_handler("r4000_tlb_load", handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002073}
2074
Ralf Baechle234fcd12008-03-08 09:56:28 +00002075static void __cpuinit build_r4000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002076{
2077 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00002078 struct uasm_label *l = labels;
2079 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07002080 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002081
2082 memset(handle_tlbs, 0, sizeof(handle_tlbs));
2083 memset(labels, 0, sizeof(labels));
2084 memset(relocs, 0, sizeof(relocs));
2085
David Daneybf286072011-07-05 16:34:46 -07002086 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2087 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002088 if (m4kc_tlbp_war())
2089 build_tlb_probe_entry(&p);
David Daneybf286072011-07-05 16:34:46 -07002090 build_make_write(&p, &r, wr.r1, wr.r2);
2091 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002092
David Daneyaa1762f2012-10-17 00:48:10 +02002093#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002094 /*
2095 * This is the entry point when
2096 * build_r4000_tlbchange_handler_head spots a huge page.
2097 */
2098 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002099 iPTE_LW(&p, wr.r1, wr.r2);
2100 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
David Daneyfd062c82009-05-27 17:47:44 -07002101 build_tlb_probe_entry(&p);
David Daneybf286072011-07-05 16:34:46 -07002102 uasm_i_ori(&p, wr.r1, wr.r1,
David Daneyfd062c82009-05-27 17:47:44 -07002103 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
David Daneybf286072011-07-05 16:34:46 -07002104 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
David Daneyfd062c82009-05-27 17:47:44 -07002105#endif
2106
Thiemo Seufere30ec452008-01-28 20:05:38 +00002107 uasm_l_nopage_tlbs(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002108 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002109#ifdef CONFIG_CPU_MICROMIPS
2110 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2111 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2112 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2113 uasm_i_jr(&p, K0);
2114 } else
2115#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002116 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2117 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002118
2119 if ((p - handle_tlbs) > FASTPATH_SIZE)
2120 panic("TLB store handler fastpath space exceeded");
2121
Thiemo Seufere30ec452008-01-28 20:05:38 +00002122 uasm_resolve_relocs(relocs, labels);
2123 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2124 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002125
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002126 dump_handler("r4000_tlb_store", handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002127}
2128
Ralf Baechle234fcd12008-03-08 09:56:28 +00002129static void __cpuinit build_r4000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002130{
2131 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00002132 struct uasm_label *l = labels;
2133 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07002134 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002135
2136 memset(handle_tlbm, 0, sizeof(handle_tlbm));
2137 memset(labels, 0, sizeof(labels));
2138 memset(relocs, 0, sizeof(relocs));
2139
David Daneybf286072011-07-05 16:34:46 -07002140 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2141 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002142 if (m4kc_tlbp_war())
2143 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002144 /* Present and writable bits set, set accessed and dirty bits. */
David Daneybf286072011-07-05 16:34:46 -07002145 build_make_write(&p, &r, wr.r1, wr.r2);
2146 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002147
David Daneyaa1762f2012-10-17 00:48:10 +02002148#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002149 /*
2150 * This is the entry point when
2151 * build_r4000_tlbchange_handler_head spots a huge page.
2152 */
2153 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002154 iPTE_LW(&p, wr.r1, wr.r2);
2155 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
David Daneyfd062c82009-05-27 17:47:44 -07002156 build_tlb_probe_entry(&p);
David Daneybf286072011-07-05 16:34:46 -07002157 uasm_i_ori(&p, wr.r1, wr.r1,
David Daneyfd062c82009-05-27 17:47:44 -07002158 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
David Daneybf286072011-07-05 16:34:46 -07002159 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2);
David Daneyfd062c82009-05-27 17:47:44 -07002160#endif
2161
Thiemo Seufere30ec452008-01-28 20:05:38 +00002162 uasm_l_nopage_tlbm(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002163 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002164#ifdef CONFIG_CPU_MICROMIPS
2165 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2166 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2167 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2168 uasm_i_jr(&p, K0);
2169 } else
2170#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002171 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2172 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002173
2174 if ((p - handle_tlbm) > FASTPATH_SIZE)
2175 panic("TLB modify handler fastpath space exceeded");
2176
Thiemo Seufere30ec452008-01-28 20:05:38 +00002177 uasm_resolve_relocs(relocs, labels);
2178 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2179 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002180
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002181 dump_handler("r4000_tlb_modify", handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002182}
2183
Jonas Gorskia3d90862013-06-21 17:48:48 +00002184static void __cpuinit flush_tlb_handlers(void)
2185{
2186 local_flush_icache_range((unsigned long)handle_tlbl,
2187 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
2188 local_flush_icache_range((unsigned long)handle_tlbs,
2189 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
2190 local_flush_icache_range((unsigned long)handle_tlbm,
2191 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
2192#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2193 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd_array,
2194 (unsigned long)tlbmiss_handler_setup_pgd_array + sizeof(handle_tlbm));
2195#endif
2196}
2197
Ralf Baechle234fcd12008-03-08 09:56:28 +00002198void __cpuinit build_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002199{
2200 /*
2201 * The refill handler is generated per-CPU, multi-node systems
2202 * may have local storage for it. The other handlers are only
2203 * needed once.
2204 */
2205 static int run_once = 0;
2206
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002207 output_pgtable_bits_defines();
2208
David Daney1ec56322010-04-28 12:16:18 -07002209#ifdef CONFIG_64BIT
2210 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2211#endif
2212
Ralf Baechle10cc3522007-10-11 23:46:15 +01002213 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002214 case CPU_R2000:
2215 case CPU_R3000:
2216 case CPU_R3000A:
2217 case CPU_R3081E:
2218 case CPU_TX3912:
2219 case CPU_TX3922:
2220 case CPU_TX3927:
David Daney826222842009-10-14 12:16:56 -07002221#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Huacai Chen87599342013-03-17 11:49:38 +00002222 if (cpu_has_local_ebase)
2223 build_r3000_tlb_refill_handler();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002224 if (!run_once) {
Huacai Chen87599342013-03-17 11:49:38 +00002225 if (!cpu_has_local_ebase)
2226 build_r3000_tlb_refill_handler();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002227 build_r3000_tlb_load_handler();
2228 build_r3000_tlb_store_handler();
2229 build_r3000_tlb_modify_handler();
Jonas Gorskia3d90862013-06-21 17:48:48 +00002230 flush_tlb_handlers();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002231 run_once++;
2232 }
David Daney826222842009-10-14 12:16:56 -07002233#else
2234 panic("No R3000 TLB refill handler");
2235#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002236 break;
2237
2238 case CPU_R6000:
2239 case CPU_R6000A:
2240 panic("No R6000 TLB refill handler yet");
2241 break;
2242
2243 case CPU_R8000:
2244 panic("No R8000 TLB refill handler yet");
2245 break;
2246
2247 default:
Linus Torvalds1da177e2005-04-16 15:20:36 -07002248 if (!run_once) {
David Daneybf286072011-07-05 16:34:46 -07002249 scratch_reg = allocate_kscratch();
David Daney3d8bfdd2010-12-21 14:19:11 -08002250#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
2251 build_r4000_setup_pgd();
2252#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002253 build_r4000_tlb_load_handler();
2254 build_r4000_tlb_store_handler();
2255 build_r4000_tlb_modify_handler();
Huacai Chen87599342013-03-17 11:49:38 +00002256 if (!cpu_has_local_ebase)
2257 build_r4000_tlb_refill_handler();
Jonas Gorskia3d90862013-06-21 17:48:48 +00002258 flush_tlb_handlers();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002259 run_once++;
2260 }
Huacai Chen87599342013-03-17 11:49:38 +00002261 if (cpu_has_local_ebase)
2262 build_r4000_tlb_refill_handler();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002263 }
2264}