blob: 3cc5b2e4263c0def6d9524fc7dca238ada8ac4e3 [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
Ralf Baechle70342282013-01-22 12:59:30 +01008 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
9 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
Ralf Baechle41c594a2006-04-05 09:45:45 +010010 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
David Daneyfd062c82009-05-27 17:47:44 -070011 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
Steven J. Hill113c62d2012-07-06 23:56:00 +020012 * Copyright (C) 2011 MIPS Technologies, Inc.
Ralf Baechle41c594a2006-04-05 09:45:45 +010013 *
14 * ... and the days got worse and worse and now you see
Adam Buchbinder92a76f62016-02-25 00:44:58 -080015 * I've gone completely out of my mind.
Ralf Baechle41c594a2006-04-05 09:45:45 +010016 *
17 * They're coming to take me a away haha
18 * they're coming to take me a away hoho hihi haha
19 * to the funny farm where code is beautiful all the time ...
20 *
21 * (Condolences to Napoleon XIV)
Linus Torvalds1da177e2005-04-16 15:20:36 -070022 */
23
David Daney95affdd2009-05-20 11:40:59 -070024#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070025#include <linux/kernel.h>
26#include <linux/types.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010027#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070028#include <linux/string.h>
David Daney3d8bfdd2010-12-21 14:19:11 -080029#include <linux/cache.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070030
David Daney3d8bfdd2010-12-21 14:19:11 -080031#include <asm/cacheflush.h>
Ralf Baechle69f24d12013-09-17 10:25:47 +020032#include <asm/cpu-type.h>
David Daney3d8bfdd2010-12-21 14:19:11 -080033#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070034#include <asm/war.h>
Florian Fainelli3482d712010-01-28 15:21:24 +010035#include <asm/uasm.h>
David Howellsb81947c2012-03-28 18:30:02 +010036#include <asm/setup.h>
Thiemo Seufere30ec452008-01-28 20:05:38 +000037
Paul Gortmakera2d25e62015-04-27 18:47:59 -040038static int mips_xpa_disabled;
Steven J. Hillc5b36782015-02-26 18:16:38 -060039
40static int __init xpa_disable(char *s)
41{
42 mips_xpa_disabled = 1;
43
44 return 1;
45}
46
47__setup("noxpa", xpa_disable);
48
David Daney1ec56322010-04-28 12:16:18 -070049/*
50 * TLB load/store/modify handlers.
51 *
52 * Only the fastpath gets synthesized at runtime, the slowpath for
53 * do_page_fault remains normal asm.
54 */
55extern void tlb_do_page_fault_0(void);
56extern void tlb_do_page_fault_1(void);
57
David Daneybf286072011-07-05 16:34:46 -070058struct work_registers {
59 int r1;
60 int r2;
61 int r3;
62};
63
64struct tlb_reg_save {
65 unsigned long a;
66 unsigned long b;
67} ____cacheline_aligned_in_smp;
68
69static struct tlb_reg_save handler_reg_save[NR_CPUS];
David Daney1ec56322010-04-28 12:16:18 -070070
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010071static inline int r45k_bvahwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070072{
73 /* XXX: We should probe for the presence of this bug, but we don't. */
74 return 0;
75}
76
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010077static inline int r4k_250MHZhwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070078{
79 /* XXX: We should probe for the presence of this bug, but we don't. */
80 return 0;
81}
82
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010083static inline int __maybe_unused bcm1250_m3_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070084{
85 return BCM1250_M3_WAR;
86}
87
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010088static inline int __maybe_unused r10000_llsc_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070089{
90 return R10000_LLSC_WAR;
91}
92
David Daneycc33ae42010-12-20 15:54:50 -080093static int use_bbit_insns(void)
94{
95 switch (current_cpu_type()) {
96 case CPU_CAVIUM_OCTEON:
97 case CPU_CAVIUM_OCTEON_PLUS:
98 case CPU_CAVIUM_OCTEON2:
David Daney4723b202013-07-29 15:07:03 -070099 case CPU_CAVIUM_OCTEON3:
David Daneycc33ae42010-12-20 15:54:50 -0800100 return 1;
101 default:
102 return 0;
103 }
104}
105
David Daney2c8c53e2010-12-27 18:07:57 -0800106static int use_lwx_insns(void)
107{
108 switch (current_cpu_type()) {
109 case CPU_CAVIUM_OCTEON2:
David Daney4723b202013-07-29 15:07:03 -0700110 case CPU_CAVIUM_OCTEON3:
David Daney2c8c53e2010-12-27 18:07:57 -0800111 return 1;
112 default:
113 return 0;
114 }
115}
116#if defined(CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE) && \
117 CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE > 0
118static bool scratchpad_available(void)
119{
120 return true;
121}
122static int scratchpad_offset(int i)
123{
124 /*
125 * CVMSEG starts at address -32768 and extends for
126 * CAVIUM_OCTEON_CVMSEG_SIZE 128 byte cache lines.
127 */
128 i += 1; /* Kernel use starts at the top and works down. */
129 return CONFIG_CAVIUM_OCTEON_CVMSEG_SIZE * 128 - (8 * i) - 32768;
130}
131#else
132static bool scratchpad_available(void)
133{
134 return false;
135}
136static int scratchpad_offset(int i)
137{
138 BUG();
David Daneye1c87d22011-01-19 15:24:42 -0800139 /* Really unreachable, but evidently some GCC want this. */
140 return 0;
David Daney2c8c53e2010-12-27 18:07:57 -0800141}
142#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700143/*
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100144 * Found by experiment: At least some revisions of the 4kc throw under
145 * some circumstances a machine check exception, triggered by invalid
146 * values in the index register. Delaying the tlbp instruction until
147 * after the next branch, plus adding an additional nop in front of
148 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
149 * why; it's not an issue caused by the core RTL.
150 *
151 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000152static int m4kc_tlbp_war(void)
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100153{
154 return (current_cpu_data.processor_id & 0xffff00) ==
155 (PRID_COMP_MIPS | PRID_IMP_4KC);
156}
157
Thiemo Seufere30ec452008-01-28 20:05:38 +0000158/* Handle labels (which must be positive integers). */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700159enum label_id {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000160 label_second_part = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700161 label_leave,
162 label_vmalloc,
163 label_vmalloc_done,
Ralf Baechle02a54172012-10-13 22:46:26 +0200164 label_tlbw_hazard_0,
165 label_split = label_tlbw_hazard_0 + 8,
David Daney6dd93442010-02-10 15:12:47 -0800166 label_tlbl_goaround1,
167 label_tlbl_goaround2,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700168 label_nopage_tlbl,
169 label_nopage_tlbs,
170 label_nopage_tlbm,
171 label_smp_pgtable_change,
172 label_r3000_write_probe_fail,
David Daney1ec56322010-04-28 12:16:18 -0700173 label_large_segbits_fault,
David Daneyaa1762f2012-10-17 00:48:10 +0200174#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700175 label_tlb_huge_update,
176#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700177};
178
Thiemo Seufere30ec452008-01-28 20:05:38 +0000179UASM_L_LA(_second_part)
180UASM_L_LA(_leave)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000181UASM_L_LA(_vmalloc)
182UASM_L_LA(_vmalloc_done)
Ralf Baechle02a54172012-10-13 22:46:26 +0200183/* _tlbw_hazard_x is handled differently. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000184UASM_L_LA(_split)
David Daney6dd93442010-02-10 15:12:47 -0800185UASM_L_LA(_tlbl_goaround1)
186UASM_L_LA(_tlbl_goaround2)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000187UASM_L_LA(_nopage_tlbl)
188UASM_L_LA(_nopage_tlbs)
189UASM_L_LA(_nopage_tlbm)
190UASM_L_LA(_smp_pgtable_change)
191UASM_L_LA(_r3000_write_probe_fail)
David Daney1ec56322010-04-28 12:16:18 -0700192UASM_L_LA(_large_segbits_fault)
David Daneyaa1762f2012-10-17 00:48:10 +0200193#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -0700194UASM_L_LA(_tlb_huge_update)
195#endif
Atsushi Nemoto656be922006-10-26 00:08:31 +0900196
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000197static int hazard_instance;
Ralf Baechle02a54172012-10-13 22:46:26 +0200198
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000199static void uasm_bgezl_hazard(u32 **p, struct uasm_reloc **r, int instance)
Ralf Baechle02a54172012-10-13 22:46:26 +0200200{
201 switch (instance) {
202 case 0 ... 7:
203 uasm_il_bgezl(p, r, 0, label_tlbw_hazard_0 + instance);
204 return;
205 default:
206 BUG();
207 }
208}
209
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000210static void uasm_bgezl_label(struct uasm_label **l, u32 **p, int instance)
Ralf Baechle02a54172012-10-13 22:46:26 +0200211{
212 switch (instance) {
213 case 0 ... 7:
214 uasm_build_label(l, *p, label_tlbw_hazard_0 + instance);
215 break;
216 default:
217 BUG();
218 }
219}
220
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200221/*
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200222 * pgtable bits are assigned dynamically depending on processor feature
223 * and statically based on kernel configuration. This spits out the actual
Ralf Baechle70342282013-01-22 12:59:30 +0100224 * values the kernel is using. Required to make sense from disassembled
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200225 * TLB exception handlers.
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200226 */
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200227static void output_pgtable_bits_defines(void)
228{
229#define pr_define(fmt, ...) \
230 pr_debug("#define " fmt, ##__VA_ARGS__)
231
232 pr_debug("#include <asm/asm.h>\n");
233 pr_debug("#include <asm/regdef.h>\n");
234 pr_debug("\n");
235
236 pr_define("_PAGE_PRESENT_SHIFT %d\n", _PAGE_PRESENT_SHIFT);
Paul Burton780602d2016-04-19 09:25:03 +0100237 pr_define("_PAGE_NO_READ_SHIFT %d\n", _PAGE_NO_READ_SHIFT);
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200238 pr_define("_PAGE_WRITE_SHIFT %d\n", _PAGE_WRITE_SHIFT);
239 pr_define("_PAGE_ACCESSED_SHIFT %d\n", _PAGE_ACCESSED_SHIFT);
240 pr_define("_PAGE_MODIFIED_SHIFT %d\n", _PAGE_MODIFIED_SHIFT);
Ralf Baechle970d0322012-10-18 13:54:15 +0200241#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200242 pr_define("_PAGE_HUGE_SHIFT %d\n", _PAGE_HUGE_SHIFT);
243#endif
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200244#ifdef _PAGE_NO_EXEC_SHIFT
Paul Burton780602d2016-04-19 09:25:03 +0100245 if (cpu_has_rixi)
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200246 pr_define("_PAGE_NO_EXEC_SHIFT %d\n", _PAGE_NO_EXEC_SHIFT);
Steven J. Hillbe0c37c2015-02-26 18:16:37 -0600247#endif
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200248 pr_define("_PAGE_GLOBAL_SHIFT %d\n", _PAGE_GLOBAL_SHIFT);
249 pr_define("_PAGE_VALID_SHIFT %d\n", _PAGE_VALID_SHIFT);
250 pr_define("_PAGE_DIRTY_SHIFT %d\n", _PAGE_DIRTY_SHIFT);
251 pr_define("_PFN_SHIFT %d\n", _PFN_SHIFT);
252 pr_debug("\n");
253}
254
255static inline void dump_handler(const char *symbol, const u32 *handler, int count)
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200256{
257 int i;
258
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200259 pr_debug("LEAF(%s)\n", symbol);
260
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200261 pr_debug("\t.set push\n");
262 pr_debug("\t.set noreorder\n");
263
264 for (i = 0; i < count; i++)
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200265 pr_debug("\t.word\t0x%08x\t\t# %p\n", handler[i], &handler[i]);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200266
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200267 pr_debug("\t.set\tpop\n");
268
269 pr_debug("\tEND(%s)\n", symbol);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200270}
271
Linus Torvalds1da177e2005-04-16 15:20:36 -0700272/* The only general purpose registers allowed in TLB handlers. */
273#define K0 26
274#define K1 27
275
276/* Some CP0 registers */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100277#define C0_INDEX 0, 0
278#define C0_ENTRYLO0 2, 0
279#define C0_TCBIND 2, 2
280#define C0_ENTRYLO1 3, 0
281#define C0_CONTEXT 4, 0
David Daneyfd062c82009-05-27 17:47:44 -0700282#define C0_PAGEMASK 5, 0
Huacai Chen380cd582016-03-03 09:45:12 +0800283#define C0_PWBASE 5, 5
284#define C0_PWFIELD 5, 6
285#define C0_PWSIZE 5, 7
286#define C0_PWCTL 6, 6
Ralf Baechle41c594a2006-04-05 09:45:45 +0100287#define C0_BADVADDR 8, 0
Huacai Chen380cd582016-03-03 09:45:12 +0800288#define C0_PGD 9, 7
Ralf Baechle41c594a2006-04-05 09:45:45 +0100289#define C0_ENTRYHI 10, 0
290#define C0_EPC 14, 0
291#define C0_XCONTEXT 20, 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700292
Ralf Baechle875d43e2005-09-03 15:56:16 -0700293#ifdef CONFIG_64BIT
Thiemo Seufere30ec452008-01-28 20:05:38 +0000294# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700295#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000296# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700297#endif
298
299/* The worst case length of the handler is around 18 instructions for
300 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
301 * Maximum space available is 32 instructions for R3000 and 64
302 * instructions for R4000.
303 *
304 * We deliberately chose a buffer size of 128, so we won't scribble
305 * over anything important on overflow before we panic.
306 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000307static u32 tlb_handler[128];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700308
309/* simply assume worst case size for labels and relocs */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000310static struct uasm_label labels[128];
311static struct uasm_reloc relocs[128];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000313static int check_for_high_segbits;
Paul Burton00bf1c62015-09-22 11:42:52 -0700314static bool fill_includes_sw_bits;
David Daney3d8bfdd2010-12-21 14:19:11 -0800315
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000316static unsigned int kscratch_used_mask;
David Daney3d8bfdd2010-12-21 14:19:11 -0800317
Jayachandran C7777b932013-06-11 14:41:35 +0000318static inline int __maybe_unused c0_kscratch(void)
319{
320 switch (current_cpu_type()) {
321 case CPU_XLP:
322 case CPU_XLR:
323 return 22;
324 default:
325 return 31;
326 }
327}
328
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000329static int allocate_kscratch(void)
David Daney3d8bfdd2010-12-21 14:19:11 -0800330{
331 int r;
332 unsigned int a = cpu_data[0].kscratch_mask & ~kscratch_used_mask;
333
334 r = ffs(a);
335
336 if (r == 0)
337 return -1;
338
339 r--; /* make it zero based */
340
341 kscratch_used_mask |= (1 << r);
342
343 return r;
344}
345
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000346static int scratch_reg;
347static int pgd_reg;
David Daney2c8c53e2010-12-27 18:07:57 -0800348enum vmalloc64_mode {not_refill, refill_scratch, refill_noscratch};
David Daney3d8bfdd2010-12-21 14:19:11 -0800349
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000350static struct work_registers build_get_work_registers(u32 **p)
David Daneybf286072011-07-05 16:34:46 -0700351{
352 struct work_registers r;
353
Jayachandran C0e6ecc12013-06-11 14:41:36 +0000354 if (scratch_reg >= 0) {
David Daneybf286072011-07-05 16:34:46 -0700355 /* Save in CPU local C0_KScratch? */
Jayachandran C7777b932013-06-11 14:41:35 +0000356 UASM_i_MTC0(p, 1, c0_kscratch(), scratch_reg);
David Daneybf286072011-07-05 16:34:46 -0700357 r.r1 = K0;
358 r.r2 = K1;
359 r.r3 = 1;
360 return r;
361 }
362
363 if (num_possible_cpus() > 1) {
David Daneybf286072011-07-05 16:34:46 -0700364 /* Get smp_processor_id */
Jayachandran Cc2377a42013-08-11 17:10:16 +0530365 UASM_i_CPUID_MFC0(p, K0, SMP_CPUID_REG);
366 UASM_i_SRL_SAFE(p, K0, K0, SMP_CPUID_REGSHIFT);
David Daneybf286072011-07-05 16:34:46 -0700367
368 /* handler_reg_save index in K0 */
369 UASM_i_SLL(p, K0, K0, ilog2(sizeof(struct tlb_reg_save)));
370
371 UASM_i_LA(p, K1, (long)&handler_reg_save);
372 UASM_i_ADDU(p, K0, K0, K1);
373 } else {
374 UASM_i_LA(p, K0, (long)&handler_reg_save);
375 }
376 /* K0 now points to save area, save $1 and $2 */
377 UASM_i_SW(p, 1, offsetof(struct tlb_reg_save, a), K0);
378 UASM_i_SW(p, 2, offsetof(struct tlb_reg_save, b), K0);
379
380 r.r1 = K1;
381 r.r2 = 1;
382 r.r3 = 2;
383 return r;
384}
385
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000386static void build_restore_work_registers(u32 **p)
David Daneybf286072011-07-05 16:34:46 -0700387{
Jayachandran C0e6ecc12013-06-11 14:41:36 +0000388 if (scratch_reg >= 0) {
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000389 uasm_i_ehb(p);
Jayachandran C7777b932013-06-11 14:41:35 +0000390 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
David Daneybf286072011-07-05 16:34:46 -0700391 return;
392 }
393 /* K0 already points to save area, restore $1 and $2 */
394 UASM_i_LW(p, 1, offsetof(struct tlb_reg_save, a), K0);
395 UASM_i_LW(p, 2, offsetof(struct tlb_reg_save, b), K0);
396}
397
David Daney2c8c53e2010-12-27 18:07:57 -0800398#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
399
David Daney826222842009-10-14 12:16:56 -0700400/*
401 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
402 * we cannot do r3000 under these circumstances.
David Daney3d8bfdd2010-12-21 14:19:11 -0800403 *
404 * Declare pgd_current here instead of including mmu_context.h to avoid type
405 * conflicts for tlbmiss_handler_setup_pgd
David Daney826222842009-10-14 12:16:56 -0700406 */
David Daney3d8bfdd2010-12-21 14:19:11 -0800407extern unsigned long pgd_current[];
David Daney826222842009-10-14 12:16:56 -0700408
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409/*
410 * The R3000 TLB handler is simple.
411 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000412static void build_r3000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700413{
414 long pgdc = (long)pgd_current;
415 u32 *p;
416
417 memset(tlb_handler, 0, sizeof(tlb_handler));
418 p = tlb_handler;
419
Thiemo Seufere30ec452008-01-28 20:05:38 +0000420 uasm_i_mfc0(&p, K0, C0_BADVADDR);
421 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
422 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
423 uasm_i_srl(&p, K0, K0, 22); /* load delay */
424 uasm_i_sll(&p, K0, K0, 2);
425 uasm_i_addu(&p, K1, K1, K0);
426 uasm_i_mfc0(&p, K0, C0_CONTEXT);
427 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
428 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
429 uasm_i_addu(&p, K1, K1, K0);
430 uasm_i_lw(&p, K0, 0, K1);
431 uasm_i_nop(&p); /* load delay */
432 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
433 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
434 uasm_i_tlbwr(&p); /* cp0 delay */
435 uasm_i_jr(&p, K1);
436 uasm_i_rfe(&p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700437
438 if (p > tlb_handler + 32)
439 panic("TLB refill handler space exceeded");
440
Thiemo Seufere30ec452008-01-28 20:05:38 +0000441 pr_debug("Wrote TLB refill handler (%u instructions).\n",
442 (unsigned int)(p - tlb_handler));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700443
Ralf Baechle91b05e62006-03-29 18:53:00 +0100444 memcpy((void *)ebase, tlb_handler, 0x80);
Leonid Yegoshin10620802014-07-11 15:18:05 -0700445 local_flush_icache_range(ebase, ebase + 0x80);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200446
Ralf Baechlea2c763e2012-10-16 22:20:26 +0200447 dump_handler("r3000_tlb_refill", (u32 *)ebase, 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700448}
David Daney826222842009-10-14 12:16:56 -0700449#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700450
451/*
452 * The R4000 TLB handler is much more complicated. We have two
453 * consecutive handler areas with 32 instructions space each.
454 * Since they aren't used at the same time, we can overflow in the
455 * other one.To keep things simple, we first assume linear space,
456 * then we relocate it to the final handler layout as needed.
457 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000458static u32 final_handler[64];
Linus Torvalds1da177e2005-04-16 15:20:36 -0700459
460/*
461 * Hazards
462 *
463 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
464 * 2. A timing hazard exists for the TLBP instruction.
465 *
Ralf Baechle70342282013-01-22 12:59:30 +0100466 * stalling_instruction
467 * TLBP
Linus Torvalds1da177e2005-04-16 15:20:36 -0700468 *
469 * The JTLB is being read for the TLBP throughout the stall generated by the
470 * previous instruction. This is not really correct as the stalling instruction
471 * can modify the address used to access the JTLB. The failure symptom is that
472 * the TLBP instruction will use an address created for the stalling instruction
473 * and not the address held in C0_ENHI and thus report the wrong results.
474 *
475 * The software work-around is to not allow the instruction preceding the TLBP
476 * to stall - make it an NOP or some other instruction guaranteed not to stall.
477 *
Ralf Baechle70342282013-01-22 12:59:30 +0100478 * Errata 2 will not be fixed. This errata is also on the R5000.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700479 *
480 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
481 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000482static void __maybe_unused build_tlb_probe_entry(u32 **p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700483{
Ralf Baechle10cc3522007-10-11 23:46:15 +0100484 switch (current_cpu_type()) {
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200485 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
Thiemo Seuferf5b4d952005-09-09 17:11:50 +0000486 case CPU_R4600:
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200487 case CPU_R4700:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700488 case CPU_R5000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700489 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000490 uasm_i_nop(p);
491 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700492 break;
493
494 default:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000495 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700496 break;
497 }
498}
499
500/*
501 * Write random or indexed TLB entry, and care about the hazards from
Lucas De Marchi25985ed2011-03-30 22:57:33 -0300502 * the preceding mtc0 and for the following eret.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700503 */
504enum tlb_write_entry { tlb_random, tlb_indexed };
505
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000506static void build_tlb_write_entry(u32 **p, struct uasm_label **l,
507 struct uasm_reloc **r,
508 enum tlb_write_entry wmode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700509{
510 void(*tlbw)(u32 **) = NULL;
511
512 switch (wmode) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000513 case tlb_random: tlbw = uasm_i_tlbwr; break;
514 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700515 }
516
Ralf Baechle9eaffa82015-03-25 13:18:27 +0100517 if (cpu_has_mips_r2_r6) {
518 if (cpu_has_mips_r2_exec_hazard)
David Daney41f0e4d2009-05-12 12:41:53 -0700519 uasm_i_ehb(p);
Ralf Baechle161548b2008-01-29 10:14:54 +0000520 tlbw(p);
521 return;
522 }
523
Ralf Baechle10cc3522007-10-11 23:46:15 +0100524 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700525 case CPU_R4000PC:
526 case CPU_R4000SC:
527 case CPU_R4000MC:
528 case CPU_R4400PC:
529 case CPU_R4400SC:
530 case CPU_R4400MC:
531 /*
532 * This branch uses up a mtc0 hazard nop slot and saves
533 * two nops after the tlbw instruction.
534 */
Ralf Baechle02a54172012-10-13 22:46:26 +0200535 uasm_bgezl_hazard(p, r, hazard_instance);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536 tlbw(p);
Ralf Baechle02a54172012-10-13 22:46:26 +0200537 uasm_bgezl_label(l, p, hazard_instance);
538 hazard_instance++;
Thiemo Seufere30ec452008-01-28 20:05:38 +0000539 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540 break;
541
542 case CPU_R4600:
543 case CPU_R4700:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000544 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000545 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000546 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000547 break;
548
Ralf Baechle359187d2012-10-16 22:13:06 +0200549 case CPU_R5000:
Ralf Baechle359187d2012-10-16 22:13:06 +0200550 case CPU_NEVADA:
551 uasm_i_nop(p); /* QED specifies 2 nops hazard */
552 uasm_i_nop(p); /* QED specifies 2 nops hazard */
553 tlbw(p);
554 break;
555
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000556 case CPU_R4300:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700557 case CPU_5KC:
558 case CPU_TX49XX:
Pete Popovbdf21b12005-07-14 17:47:57 +0000559 case CPU_PR4450:
Jayachandran Cefa0f812011-05-07 01:36:21 +0530560 case CPU_XLR:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000561 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700562 tlbw(p);
563 break;
564
565 case CPU_R10000:
566 case CPU_R12000:
Kumba44d921b2006-05-16 22:23:59 -0400567 case CPU_R14000:
Joshua Kinard30577392015-01-21 07:59:45 -0500568 case CPU_R16000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700569 case CPU_4KC:
Thomas Bogendoerferb1ec4c82008-03-26 16:42:54 +0100570 case CPU_4KEC:
Steven J. Hill113c62d2012-07-06 23:56:00 +0200571 case CPU_M14KC:
Steven J. Hillf8fa4812012-12-07 03:51:35 +0000572 case CPU_M14KEC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700573 case CPU_SB1:
Andrew Isaacson93ce2f522005-10-19 23:56:20 -0700574 case CPU_SB1A:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700575 case CPU_4KSC:
576 case CPU_20KC:
577 case CPU_25KF:
Kevin Cernekee602977b2010-10-16 14:22:30 -0700578 case CPU_BMIPS32:
579 case CPU_BMIPS3300:
580 case CPU_BMIPS4350:
581 case CPU_BMIPS4380:
582 case CPU_BMIPS5000:
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800583 case CPU_LOONGSON2:
Huacai Chenc579d312014-03-21 18:44:00 +0800584 case CPU_LOONGSON3:
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900585 case CPU_R5500:
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100586 if (m4kc_tlbp_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +0000587 uasm_i_nop(p);
Manuel Lauss2f794d02009-03-25 17:49:30 +0100588 case CPU_ALCHEMY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 tlbw(p);
590 break;
591
Linus Torvalds1da177e2005-04-16 15:20:36 -0700592 case CPU_RM7000:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000593 uasm_i_nop(p);
594 uasm_i_nop(p);
595 uasm_i_nop(p);
596 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700597 tlbw(p);
598 break;
599
Linus Torvalds1da177e2005-04-16 15:20:36 -0700600 case CPU_VR4111:
601 case CPU_VR4121:
602 case CPU_VR4122:
603 case CPU_VR4181:
604 case CPU_VR4181A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000605 uasm_i_nop(p);
606 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700607 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000608 uasm_i_nop(p);
609 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700610 break;
611
612 case CPU_VR4131:
613 case CPU_VR4133:
Ralf Baechle7623deb2005-08-29 16:49:55 +0000614 case CPU_R5432:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000615 uasm_i_nop(p);
616 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617 tlbw(p);
618 break;
619
Lars-Peter Clausen83ccf692010-07-17 11:07:51 +0000620 case CPU_JZRISC:
621 tlbw(p);
622 uasm_i_nop(p);
623 break;
624
Linus Torvalds1da177e2005-04-16 15:20:36 -0700625 default:
626 panic("No TLB refill handler yet (CPU type: %d)",
Wu Zhangjind7b12052010-12-26 04:42:37 +0800627 current_cpu_type());
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628 break;
629 }
630}
631
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000632static __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
633 unsigned int reg)
David Daney6dd93442010-02-10 15:12:47 -0800634{
Paul Burton2caa89b2016-04-19 09:25:09 +0100635 if (_PAGE_GLOBAL_SHIFT == 0) {
636 /* pte_t is already in EntryLo format */
637 return;
638 }
639
Paul Burton00bf1c62015-09-22 11:42:52 -0700640 if (cpu_has_rixi && _PAGE_NO_EXEC) {
641 if (fill_includes_sw_bits) {
642 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL));
643 } else {
644 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
645 UASM_i_ROTR(p, reg, reg,
646 ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
647 }
David Daney6dd93442010-02-10 15:12:47 -0800648 } else {
Ralf Baechle34adb282014-11-22 00:16:48 +0100649#ifdef CONFIG_PHYS_ADDR_T_64BIT
David Daney3be60222010-04-28 12:16:17 -0700650 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -0800651#else
652 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
653#endif
654 }
655}
656
David Daneyaa1762f2012-10-17 00:48:10 +0200657#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney6dd93442010-02-10 15:12:47 -0800658
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000659static void build_restore_pagemask(u32 **p, struct uasm_reloc **r,
660 unsigned int tmp, enum label_id lid,
661 int restore_scratch)
David Daney6dd93442010-02-10 15:12:47 -0800662{
David Daney2c8c53e2010-12-27 18:07:57 -0800663 if (restore_scratch) {
664 /* Reset default page size */
665 if (PM_DEFAULT_MASK >> 16) {
666 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
667 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
668 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
669 uasm_il_b(p, r, lid);
670 } else if (PM_DEFAULT_MASK) {
671 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
672 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
673 uasm_il_b(p, r, lid);
674 } else {
675 uasm_i_mtc0(p, 0, C0_PAGEMASK);
676 uasm_il_b(p, r, lid);
677 }
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000678 if (scratch_reg >= 0) {
679 uasm_i_ehb(p);
Jayachandran C7777b932013-06-11 14:41:35 +0000680 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000681 } else {
David Daney2c8c53e2010-12-27 18:07:57 -0800682 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000683 }
David Daney6dd93442010-02-10 15:12:47 -0800684 } else {
David Daney2c8c53e2010-12-27 18:07:57 -0800685 /* Reset default page size */
686 if (PM_DEFAULT_MASK >> 16) {
687 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
688 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
689 uasm_il_b(p, r, lid);
690 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
691 } else if (PM_DEFAULT_MASK) {
692 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
693 uasm_il_b(p, r, lid);
694 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
695 } else {
696 uasm_il_b(p, r, lid);
697 uasm_i_mtc0(p, 0, C0_PAGEMASK);
698 }
David Daney6dd93442010-02-10 15:12:47 -0800699 }
700}
701
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000702static void build_huge_tlb_write_entry(u32 **p, struct uasm_label **l,
703 struct uasm_reloc **r,
704 unsigned int tmp,
705 enum tlb_write_entry wmode,
706 int restore_scratch)
David Daneyfd062c82009-05-27 17:47:44 -0700707{
708 /* Set huge page tlb entry size */
709 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
710 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
711 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
712
713 build_tlb_write_entry(p, l, r, wmode);
714
David Daney2c8c53e2010-12-27 18:07:57 -0800715 build_restore_pagemask(p, r, tmp, label_leave, restore_scratch);
David Daneyfd062c82009-05-27 17:47:44 -0700716}
717
718/*
719 * Check if Huge PTE is present, if so then jump to LABEL.
720 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000721static void
David Daneyfd062c82009-05-27 17:47:44 -0700722build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000723 unsigned int pmd, int lid)
David Daneyfd062c82009-05-27 17:47:44 -0700724{
725 UASM_i_LW(p, tmp, 0, pmd);
David Daneycc33ae42010-12-20 15:54:50 -0800726 if (use_bbit_insns()) {
727 uasm_il_bbit1(p, r, tmp, ilog2(_PAGE_HUGE), lid);
728 } else {
729 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
730 uasm_il_bnez(p, r, tmp, lid);
731 }
David Daneyfd062c82009-05-27 17:47:44 -0700732}
733
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000734static void build_huge_update_entries(u32 **p, unsigned int pte,
735 unsigned int tmp)
David Daneyfd062c82009-05-27 17:47:44 -0700736{
737 int small_sequence;
738
739 /*
740 * A huge PTE describes an area the size of the
741 * configured huge page size. This is twice the
742 * of the large TLB entry size we intend to use.
743 * A TLB entry half the size of the configured
744 * huge page size is configured into entrylo0
745 * and entrylo1 to cover the contiguous huge PTE
746 * address space.
747 */
748 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
749
Ralf Baechle70342282013-01-22 12:59:30 +0100750 /* We can clobber tmp. It isn't used after this.*/
David Daneyfd062c82009-05-27 17:47:44 -0700751 if (!small_sequence)
752 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
753
David Daney6dd93442010-02-10 15:12:47 -0800754 build_convert_pte_to_entrylo(p, pte);
David Daney9b8c3892010-02-10 15:12:44 -0800755 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700756 /* convert to entrylo1 */
757 if (small_sequence)
758 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
759 else
760 UASM_i_ADDU(p, pte, pte, tmp);
761
David Daney9b8c3892010-02-10 15:12:44 -0800762 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700763}
764
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000765static void build_huge_handler_tail(u32 **p, struct uasm_reloc **r,
766 struct uasm_label **l,
767 unsigned int pte,
Huacai Chen59b87252017-03-16 21:00:27 +0800768 unsigned int ptr,
769 unsigned int flush)
David Daneyfd062c82009-05-27 17:47:44 -0700770{
771#ifdef CONFIG_SMP
772 UASM_i_SC(p, pte, 0, ptr);
773 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
774 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
775#else
776 UASM_i_SW(p, pte, 0, ptr);
777#endif
Huacai Chen59b87252017-03-16 21:00:27 +0800778 if (cpu_has_ftlb && flush) {
779 BUG_ON(!cpu_has_tlbinv);
780
781 UASM_i_MFC0(p, ptr, C0_ENTRYHI);
782 uasm_i_ori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
783 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
784 build_tlb_write_entry(p, l, r, tlb_indexed);
785
786 uasm_i_xori(p, ptr, ptr, MIPS_ENTRYHI_EHINV);
787 UASM_i_MTC0(p, ptr, C0_ENTRYHI);
788 build_huge_update_entries(p, pte, ptr);
789 build_huge_tlb_write_entry(p, l, r, pte, tlb_random, 0);
790
791 return;
792 }
793
David Daneyfd062c82009-05-27 17:47:44 -0700794 build_huge_update_entries(p, pte, ptr);
David Daney2c8c53e2010-12-27 18:07:57 -0800795 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed, 0);
David Daneyfd062c82009-05-27 17:47:44 -0700796}
David Daneyaa1762f2012-10-17 00:48:10 +0200797#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
David Daneyfd062c82009-05-27 17:47:44 -0700798
Ralf Baechle875d43e2005-09-03 15:56:16 -0700799#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700800/*
801 * TMP and PTR are scratch.
802 * TMP will be clobbered, PTR will hold the pmd entry.
803 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000804static void
Thiemo Seufere30ec452008-01-28 20:05:38 +0000805build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700806 unsigned int tmp, unsigned int ptr)
807{
David Daney826222842009-10-14 12:16:56 -0700808#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700809 long pgdc = (long)pgd_current;
David Daney826222842009-10-14 12:16:56 -0700810#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700811 /*
812 * The vmalloc handling is not in the hotpath.
813 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000814 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
David Daney1ec56322010-04-28 12:16:18 -0700815
816 if (check_for_high_segbits) {
817 /*
818 * The kernel currently implicitely assumes that the
819 * MIPS SEGBITS parameter for the processor is
820 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
821 * allocate virtual addresses outside the maximum
822 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
823 * that doesn't prevent user code from accessing the
824 * higher xuseg addresses. Here, we make sure that
825 * everything but the lower xuseg addresses goes down
826 * the module_alloc/vmalloc path.
827 */
828 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
829 uasm_il_bnez(p, r, ptr, label_vmalloc);
830 } else {
831 uasm_il_bltz(p, r, tmp, label_vmalloc);
832 }
Thiemo Seufere30ec452008-01-28 20:05:38 +0000833 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834
David Daney3d8bfdd2010-12-21 14:19:11 -0800835 if (pgd_reg != -1) {
836 /* pgd is in pgd_reg */
Huacai Chen380cd582016-03-03 09:45:12 +0800837 if (cpu_has_ldpte)
838 UASM_i_MFC0(p, ptr, C0_PWBASE);
839 else
840 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
David Daney3d8bfdd2010-12-21 14:19:11 -0800841 } else {
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530842#if defined(CONFIG_MIPS_PGD_C0_CONTEXT)
David Daney3d8bfdd2010-12-21 14:19:11 -0800843 /*
844 * &pgd << 11 stored in CONTEXT [23..63].
845 */
846 UASM_i_MFC0(p, ptr, C0_CONTEXT);
847
848 /* Clear lower 23 bits of context. */
849 uasm_i_dins(p, ptr, 0, 0, 23);
850
Ralf Baechle70342282013-01-22 12:59:30 +0100851 /* 1 0 1 0 1 << 6 xkphys cached */
David Daney3d8bfdd2010-12-21 14:19:11 -0800852 uasm_i_ori(p, ptr, ptr, 0x540);
853 uasm_i_drotr(p, ptr, ptr, 11);
David Daney826222842009-10-14 12:16:56 -0700854#elif defined(CONFIG_SMP)
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530855 UASM_i_CPUID_MFC0(p, ptr, SMP_CPUID_REG);
856 uasm_i_dsrl_safe(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
857 UASM_i_LA_mostly(p, tmp, pgdc);
858 uasm_i_daddu(p, ptr, ptr, tmp);
859 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
860 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700861#else
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530862 UASM_i_LA_mostly(p, ptr, pgdc);
863 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700864#endif
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530865 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700866
Thiemo Seufere30ec452008-01-28 20:05:38 +0000867 uasm_l_vmalloc_done(l, *p);
Ralf Baechle242954b2006-10-24 02:29:01 +0100868
David Daney3be60222010-04-28 12:16:17 -0700869 /* get pgd offset in bytes */
870 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
Ralf Baechle242954b2006-10-24 02:29:01 +0100871
Thiemo Seufere30ec452008-01-28 20:05:38 +0000872 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
873 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
David Daney325f8a02009-12-04 13:52:36 -0800874#ifndef __PAGETABLE_PMD_FOLDED
Thiemo Seufere30ec452008-01-28 20:05:38 +0000875 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
876 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
David Daney3be60222010-04-28 12:16:17 -0700877 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000878 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
879 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
David Daney325f8a02009-12-04 13:52:36 -0800880#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881}
882
883/*
884 * BVADDR is the faulting address, PTR is scratch.
885 * PTR will hold the pgd for vmalloc.
886 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000887static void
Thiemo Seufere30ec452008-01-28 20:05:38 +0000888build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
David Daney1ec56322010-04-28 12:16:18 -0700889 unsigned int bvaddr, unsigned int ptr,
890 enum vmalloc64_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700891{
892 long swpd = (long)swapper_pg_dir;
David Daney1ec56322010-04-28 12:16:18 -0700893 int single_insn_swpd;
894 int did_vmalloc_branch = 0;
895
896 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700897
Thiemo Seufere30ec452008-01-28 20:05:38 +0000898 uasm_l_vmalloc(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700899
David Daney2c8c53e2010-12-27 18:07:57 -0800900 if (mode != not_refill && check_for_high_segbits) {
David Daney1ec56322010-04-28 12:16:18 -0700901 if (single_insn_swpd) {
902 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
903 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
904 did_vmalloc_branch = 1;
905 /* fall through */
906 } else {
907 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
908 }
909 }
910 if (!did_vmalloc_branch) {
James Hogan2f8f8c02016-07-08 14:05:56 +0100911 if (single_insn_swpd) {
David Daney1ec56322010-04-28 12:16:18 -0700912 uasm_il_b(p, r, label_vmalloc_done);
913 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
914 } else {
915 UASM_i_LA_mostly(p, ptr, swpd);
916 uasm_il_b(p, r, label_vmalloc_done);
917 if (uasm_in_compat_space_p(swpd))
918 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
919 else
920 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
921 }
922 }
David Daney2c8c53e2010-12-27 18:07:57 -0800923 if (mode != not_refill && check_for_high_segbits) {
David Daney1ec56322010-04-28 12:16:18 -0700924 uasm_l_large_segbits_fault(l, *p);
925 /*
926 * We get here if we are an xsseg address, or if we are
927 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
928 *
929 * Ignoring xsseg (assume disabled so would generate
930 * (address errors?), the only remaining possibility
931 * is the upper xuseg addresses. On processors with
932 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
933 * addresses would have taken an address error. We try
934 * to mimic that here by taking a load/istream page
935 * fault.
936 */
937 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
938 uasm_i_jr(p, ptr);
David Daney2c8c53e2010-12-27 18:07:57 -0800939
940 if (mode == refill_scratch) {
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000941 if (scratch_reg >= 0) {
942 uasm_i_ehb(p);
Jayachandran C7777b932013-06-11 14:41:35 +0000943 UASM_i_MFC0(p, 1, c0_kscratch(), scratch_reg);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000944 } else {
David Daney2c8c53e2010-12-27 18:07:57 -0800945 UASM_i_LW(p, 1, scratchpad_offset(0), 0);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +0000946 }
David Daney2c8c53e2010-12-27 18:07:57 -0800947 } else {
948 uasm_i_nop(p);
949 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700950 }
951}
952
Ralf Baechle875d43e2005-09-03 15:56:16 -0700953#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700954
955/*
956 * TMP and PTR are scratch.
957 * TMP will be clobbered, PTR will hold the pgd entry.
958 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000959static void __maybe_unused
Linus Torvalds1da177e2005-04-16 15:20:36 -0700960build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
961{
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530962 if (pgd_reg != -1) {
963 /* pgd is in pgd_reg */
964 uasm_i_mfc0(p, ptr, c0_kscratch(), pgd_reg);
965 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
966 } else {
967 long pgdc = (long)pgd_current;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700968
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530969 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700970#ifdef CONFIG_SMP
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530971 uasm_i_mfc0(p, ptr, SMP_CPUID_REG);
972 UASM_i_LA_mostly(p, tmp, pgdc);
973 uasm_i_srl(p, ptr, ptr, SMP_CPUID_PTRSHIFT);
974 uasm_i_addu(p, ptr, tmp, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975#else
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530976 UASM_i_LA_mostly(p, ptr, pgdc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700977#endif
Jayachandran Cf4ae17a2013-09-25 16:28:04 +0530978 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
979 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
980 }
Thiemo Seufere30ec452008-01-28 20:05:38 +0000981 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
982 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
983 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700984}
985
Ralf Baechle875d43e2005-09-03 15:56:16 -0700986#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700987
Paul Gortmaker078a55f2013-06-18 13:38:59 +0000988static void build_adjust_context(u32 **p, unsigned int ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700989{
Ralf Baechle242954b2006-10-24 02:29:01 +0100990 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700991 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
992
Ralf Baechle10cc3522007-10-11 23:46:15 +0100993 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994 case CPU_VR41XX:
995 case CPU_VR4111:
996 case CPU_VR4121:
997 case CPU_VR4122:
998 case CPU_VR4131:
999 case CPU_VR4181:
1000 case CPU_VR4181A:
1001 case CPU_VR4133:
1002 shift += 2;
1003 break;
1004
1005 default:
1006 break;
1007 }
1008
1009 if (shift)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001010 UASM_i_SRL(p, ctx, ctx, shift);
1011 uasm_i_andi(p, ctx, ctx, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001012}
1013
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001014static void build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001015{
1016 /*
1017 * Bug workaround for the Nevada. It seems as if under certain
1018 * circumstances the move from cp0_context might produce a
1019 * bogus result when the mfc0 instruction and its consumer are
1020 * in a different cacheline or a load instruction, probably any
1021 * memory reference, is between them.
1022 */
Ralf Baechle10cc3522007-10-11 23:46:15 +01001023 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +00001025 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001026 GET_CONTEXT(p, tmp); /* get context reg */
1027 break;
1028
1029 default:
1030 GET_CONTEXT(p, tmp); /* get context reg */
Thiemo Seufere30ec452008-01-28 20:05:38 +00001031 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 break;
1033 }
1034
1035 build_adjust_context(p, tmp);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001036 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037}
1038
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001039static void build_update_entries(u32 **p, unsigned int tmp, unsigned int ptep)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001040{
Paul Burton2caa89b2016-04-19 09:25:09 +01001041 int pte_off_even = 0;
1042 int pte_off_odd = sizeof(pte_t);
Paul Burton7b2cb642016-04-19 09:25:05 +01001043
Paul Burton2caa89b2016-04-19 09:25:09 +01001044#if defined(CONFIG_CPU_MIPS32) && defined(CONFIG_PHYS_ADDR_T_64BIT)
1045 /* The low 32 bits of EntryLo is stored in pte_high */
1046 pte_off_even += offsetof(pte_t, pte_high);
1047 pte_off_odd += offsetof(pte_t, pte_high);
1048#endif
1049
Masahiro Yamada97f26452016-08-03 13:45:50 -07001050 if (IS_ENABLED(CONFIG_XPA)) {
Steven J. Hillc5b36782015-02-26 18:16:38 -06001051 uasm_i_lw(p, tmp, pte_off_even, ptep); /* even pte */
Steven J. Hillc5b36782015-02-26 18:16:38 -06001052 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
Steven J. Hillc5b36782015-02-26 18:16:38 -06001053 UASM_i_MTC0(p, tmp, C0_ENTRYLO0);
Paul Burton7b2cb642016-04-19 09:25:05 +01001054
James Hogan4b6f99d2016-04-19 09:25:10 +01001055 if (cpu_has_xpa && !mips_xpa_disabled) {
1056 uasm_i_lw(p, tmp, 0, ptep);
1057 uasm_i_ext(p, tmp, tmp, 0, 24);
1058 uasm_i_mthc0(p, tmp, C0_ENTRYLO0);
1059 }
James Hoganf3832192016-04-19 09:25:06 +01001060
1061 uasm_i_lw(p, tmp, pte_off_odd, ptep); /* odd pte */
1062 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL));
1063 UASM_i_MTC0(p, tmp, C0_ENTRYLO1);
1064
James Hogan4b6f99d2016-04-19 09:25:10 +01001065 if (cpu_has_xpa && !mips_xpa_disabled) {
1066 uasm_i_lw(p, tmp, sizeof(pte_t), ptep);
1067 uasm_i_ext(p, tmp, tmp, 0, 24);
1068 uasm_i_mthc0(p, tmp, C0_ENTRYLO1);
1069 }
Paul Burton7b2cb642016-04-19 09:25:05 +01001070 return;
1071 }
1072
Paul Burton2caa89b2016-04-19 09:25:09 +01001073 UASM_i_LW(p, tmp, pte_off_even, ptep); /* get even pte */
1074 UASM_i_LW(p, ptep, pte_off_odd, ptep); /* get odd pte */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001075 if (r45k_bvahwbug())
1076 build_tlb_probe_entry(p);
Paul Burton974a0b62015-09-22 11:42:49 -07001077 build_convert_pte_to_entrylo(p, tmp);
1078 if (r4k_250MHZhwbug())
1079 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
1080 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
1081 build_convert_pte_to_entrylo(p, ptep);
1082 if (r45k_bvahwbug())
1083 uasm_i_mfc0(p, tmp, C0_INDEX);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 if (r4k_250MHZhwbug())
David Daney9b8c3892010-02-10 15:12:44 -08001085 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
1086 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001087}
1088
David Daney2c8c53e2010-12-27 18:07:57 -08001089struct mips_huge_tlb_info {
1090 int huge_pte;
1091 int restore_scratch;
David Daney9e0f1622014-10-20 15:34:23 -07001092 bool need_reload_pte;
David Daney2c8c53e2010-12-27 18:07:57 -08001093};
1094
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001095static struct mips_huge_tlb_info
David Daney2c8c53e2010-12-27 18:07:57 -08001096build_fast_tlb_refill_handler (u32 **p, struct uasm_label **l,
1097 struct uasm_reloc **r, unsigned int tmp,
Jayachandran C7777b932013-06-11 14:41:35 +00001098 unsigned int ptr, int c0_scratch_reg)
David Daney2c8c53e2010-12-27 18:07:57 -08001099{
1100 struct mips_huge_tlb_info rv;
1101 unsigned int even, odd;
1102 int vmalloc_branch_delay_filled = 0;
1103 const int scratch = 1; /* Our extra working register */
1104
1105 rv.huge_pte = scratch;
1106 rv.restore_scratch = 0;
David Daney9e0f1622014-10-20 15:34:23 -07001107 rv.need_reload_pte = false;
David Daney2c8c53e2010-12-27 18:07:57 -08001108
1109 if (check_for_high_segbits) {
1110 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1111
1112 if (pgd_reg != -1)
Jayachandran C7777b932013-06-11 14:41:35 +00001113 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
David Daney2c8c53e2010-12-27 18:07:57 -08001114 else
1115 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1116
Jayachandran C7777b932013-06-11 14:41:35 +00001117 if (c0_scratch_reg >= 0)
1118 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
David Daney2c8c53e2010-12-27 18:07:57 -08001119 else
1120 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1121
1122 uasm_i_dsrl_safe(p, scratch, tmp,
1123 PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1124 uasm_il_bnez(p, r, scratch, label_vmalloc);
1125
1126 if (pgd_reg == -1) {
1127 vmalloc_branch_delay_filled = 1;
1128 /* Clear lower 23 bits of context. */
1129 uasm_i_dins(p, ptr, 0, 0, 23);
1130 }
1131 } else {
1132 if (pgd_reg != -1)
Jayachandran C7777b932013-06-11 14:41:35 +00001133 UASM_i_MFC0(p, ptr, c0_kscratch(), pgd_reg);
David Daney2c8c53e2010-12-27 18:07:57 -08001134 else
1135 UASM_i_MFC0(p, ptr, C0_CONTEXT);
1136
1137 UASM_i_MFC0(p, tmp, C0_BADVADDR);
1138
Jayachandran C7777b932013-06-11 14:41:35 +00001139 if (c0_scratch_reg >= 0)
1140 UASM_i_MTC0(p, scratch, c0_kscratch(), c0_scratch_reg);
David Daney2c8c53e2010-12-27 18:07:57 -08001141 else
1142 UASM_i_SW(p, scratch, scratchpad_offset(0), 0);
1143
1144 if (pgd_reg == -1)
1145 /* Clear lower 23 bits of context. */
1146 uasm_i_dins(p, ptr, 0, 0, 23);
1147
1148 uasm_il_bltz(p, r, tmp, label_vmalloc);
1149 }
1150
1151 if (pgd_reg == -1) {
1152 vmalloc_branch_delay_filled = 1;
Ralf Baechle70342282013-01-22 12:59:30 +01001153 /* 1 0 1 0 1 << 6 xkphys cached */
David Daney2c8c53e2010-12-27 18:07:57 -08001154 uasm_i_ori(p, ptr, ptr, 0x540);
1155 uasm_i_drotr(p, ptr, ptr, 11);
1156 }
1157
1158#ifdef __PAGETABLE_PMD_FOLDED
1159#define LOC_PTEP scratch
1160#else
1161#define LOC_PTEP ptr
1162#endif
1163
1164 if (!vmalloc_branch_delay_filled)
1165 /* get pgd offset in bytes */
1166 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1167
1168 uasm_l_vmalloc_done(l, *p);
1169
1170 /*
Ralf Baechle70342282013-01-22 12:59:30 +01001171 * tmp ptr
1172 * fall-through case = badvaddr *pgd_current
1173 * vmalloc case = badvaddr swapper_pg_dir
David Daney2c8c53e2010-12-27 18:07:57 -08001174 */
1175
1176 if (vmalloc_branch_delay_filled)
1177 /* get pgd offset in bytes */
1178 uasm_i_dsrl_safe(p, scratch, tmp, PGDIR_SHIFT - 3);
1179
1180#ifdef __PAGETABLE_PMD_FOLDED
1181 GET_CONTEXT(p, tmp); /* get context reg */
1182#endif
1183 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PGD - 1) << 3);
1184
1185 if (use_lwx_insns()) {
1186 UASM_i_LWX(p, LOC_PTEP, scratch, ptr);
1187 } else {
1188 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pgd offset */
1189 uasm_i_ld(p, LOC_PTEP, 0, ptr); /* get pmd pointer */
1190 }
1191
1192#ifndef __PAGETABLE_PMD_FOLDED
1193 /* get pmd offset in bytes */
1194 uasm_i_dsrl_safe(p, scratch, tmp, PMD_SHIFT - 3);
1195 uasm_i_andi(p, scratch, scratch, (PTRS_PER_PMD - 1) << 3);
1196 GET_CONTEXT(p, tmp); /* get context reg */
1197
1198 if (use_lwx_insns()) {
1199 UASM_i_LWX(p, scratch, scratch, ptr);
1200 } else {
1201 uasm_i_daddu(p, ptr, ptr, scratch); /* add in pmd offset */
1202 UASM_i_LW(p, scratch, 0, ptr);
1203 }
1204#endif
1205 /* Adjust the context during the load latency. */
1206 build_adjust_context(p, tmp);
1207
David Daneyaa1762f2012-10-17 00:48:10 +02001208#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney2c8c53e2010-12-27 18:07:57 -08001209 uasm_il_bbit1(p, r, scratch, ilog2(_PAGE_HUGE), label_tlb_huge_update);
1210 /*
1211 * The in the LWX case we don't want to do the load in the
Ralf Baechle70342282013-01-22 12:59:30 +01001212 * delay slot. It cannot issue in the same cycle and may be
David Daney2c8c53e2010-12-27 18:07:57 -08001213 * speculative and unneeded.
1214 */
1215 if (use_lwx_insns())
1216 uasm_i_nop(p);
David Daneyaa1762f2012-10-17 00:48:10 +02001217#endif /* CONFIG_MIPS_HUGE_TLB_SUPPORT */
David Daney2c8c53e2010-12-27 18:07:57 -08001218
1219
1220 /* build_update_entries */
1221 if (use_lwx_insns()) {
1222 even = ptr;
1223 odd = tmp;
1224 UASM_i_LWX(p, even, scratch, tmp);
1225 UASM_i_ADDIU(p, tmp, tmp, sizeof(pte_t));
1226 UASM_i_LWX(p, odd, scratch, tmp);
1227 } else {
1228 UASM_i_ADDU(p, ptr, scratch, tmp); /* add in offset */
1229 even = tmp;
1230 odd = ptr;
1231 UASM_i_LW(p, even, 0, ptr); /* get even pte */
1232 UASM_i_LW(p, odd, sizeof(pte_t), ptr); /* get odd pte */
1233 }
Steven J. Hill05857c62012-09-13 16:51:46 -05001234 if (cpu_has_rixi) {
David Daney748e7872012-08-23 10:02:03 -07001235 uasm_i_drotr(p, even, even, ilog2(_PAGE_GLOBAL));
David Daney2c8c53e2010-12-27 18:07:57 -08001236 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
David Daney748e7872012-08-23 10:02:03 -07001237 uasm_i_drotr(p, odd, odd, ilog2(_PAGE_GLOBAL));
David Daney2c8c53e2010-12-27 18:07:57 -08001238 } else {
1239 uasm_i_dsrl_safe(p, even, even, ilog2(_PAGE_GLOBAL));
1240 UASM_i_MTC0(p, even, C0_ENTRYLO0); /* load it */
1241 uasm_i_dsrl_safe(p, odd, odd, ilog2(_PAGE_GLOBAL));
1242 }
1243 UASM_i_MTC0(p, odd, C0_ENTRYLO1); /* load it */
1244
Jayachandran C7777b932013-06-11 14:41:35 +00001245 if (c0_scratch_reg >= 0) {
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001246 uasm_i_ehb(p);
Jayachandran C7777b932013-06-11 14:41:35 +00001247 UASM_i_MFC0(p, scratch, c0_kscratch(), c0_scratch_reg);
David Daney2c8c53e2010-12-27 18:07:57 -08001248 build_tlb_write_entry(p, l, r, tlb_random);
1249 uasm_l_leave(l, *p);
1250 rv.restore_scratch = 1;
1251 } else if (PAGE_SHIFT == 14 || PAGE_SHIFT == 13) {
1252 build_tlb_write_entry(p, l, r, tlb_random);
1253 uasm_l_leave(l, *p);
1254 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1255 } else {
1256 UASM_i_LW(p, scratch, scratchpad_offset(0), 0);
1257 build_tlb_write_entry(p, l, r, tlb_random);
1258 uasm_l_leave(l, *p);
1259 rv.restore_scratch = 1;
1260 }
1261
1262 uasm_i_eret(p); /* return from trap */
1263
1264 return rv;
1265}
1266
David Daneye6f72d32009-05-20 11:40:58 -07001267/*
1268 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
1269 * because EXL == 0. If we wrap, we can also use the 32 instruction
1270 * slots before the XTLB refill exception handler which belong to the
1271 * unused TLB refill exception.
1272 */
1273#define MIPS64_REFILL_INSNS 32
1274
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001275static void build_r4000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001276{
1277 u32 *p = tlb_handler;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001278 struct uasm_label *l = labels;
1279 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280 u32 *f;
1281 unsigned int final_len;
Ralf Baechle4a9040f2011-03-29 10:54:54 +02001282 struct mips_huge_tlb_info htlb_info __maybe_unused;
1283 enum vmalloc64_mode vmalloc_mode __maybe_unused;
David Daney18280ed2014-05-28 23:52:13 +02001284
Linus Torvalds1da177e2005-04-16 15:20:36 -07001285 memset(tlb_handler, 0, sizeof(tlb_handler));
1286 memset(labels, 0, sizeof(labels));
1287 memset(relocs, 0, sizeof(relocs));
1288 memset(final_handler, 0, sizeof(final_handler));
1289
David Daney18280ed2014-05-28 23:52:13 +02001290 if (IS_ENABLED(CONFIG_64BIT) && (scratch_reg >= 0 || scratchpad_available()) && use_bbit_insns()) {
David Daney2c8c53e2010-12-27 18:07:57 -08001291 htlb_info = build_fast_tlb_refill_handler(&p, &l, &r, K0, K1,
1292 scratch_reg);
1293 vmalloc_mode = refill_scratch;
1294 } else {
1295 htlb_info.huge_pte = K0;
1296 htlb_info.restore_scratch = 0;
David Daney9e0f1622014-10-20 15:34:23 -07001297 htlb_info.need_reload_pte = true;
David Daney2c8c53e2010-12-27 18:07:57 -08001298 vmalloc_mode = refill_noscratch;
1299 /*
1300 * create the plain linear handler
1301 */
1302 if (bcm1250_m3_war()) {
1303 unsigned int segbits = 44;
1304
1305 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1306 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
1307 uasm_i_xor(&p, K0, K0, K1);
1308 uasm_i_dsrl_safe(&p, K1, K0, 62);
1309 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1310 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
1311 uasm_i_or(&p, K0, K0, K1);
1312 uasm_il_bnez(&p, &r, K0, label_leave);
1313 /* No need for uasm_i_nop */
1314 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001315
Ralf Baechle875d43e2005-09-03 15:56:16 -07001316#ifdef CONFIG_64BIT
David Daney2c8c53e2010-12-27 18:07:57 -08001317 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001318#else
David Daney2c8c53e2010-12-27 18:07:57 -08001319 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001320#endif
1321
David Daneyaa1762f2012-10-17 00:48:10 +02001322#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daney2c8c53e2010-12-27 18:07:57 -08001323 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
David Daneyfd062c82009-05-27 17:47:44 -07001324#endif
1325
David Daney2c8c53e2010-12-27 18:07:57 -08001326 build_get_ptep(&p, K0, K1);
1327 build_update_entries(&p, K0, K1);
1328 build_tlb_write_entry(&p, &l, &r, tlb_random);
1329 uasm_l_leave(&l, p);
1330 uasm_i_eret(&p); /* return from trap */
1331 }
David Daneyaa1762f2012-10-17 00:48:10 +02001332#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07001333 uasm_l_tlb_huge_update(&l, p);
David Daney9e0f1622014-10-20 15:34:23 -07001334 if (htlb_info.need_reload_pte)
1335 UASM_i_LW(&p, htlb_info.huge_pte, 0, K1);
David Daney2c8c53e2010-12-27 18:07:57 -08001336 build_huge_update_entries(&p, htlb_info.huge_pte, K1);
1337 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random,
1338 htlb_info.restore_scratch);
David Daneyfd062c82009-05-27 17:47:44 -07001339#endif
1340
Ralf Baechle875d43e2005-09-03 15:56:16 -07001341#ifdef CONFIG_64BIT
David Daney2c8c53e2010-12-27 18:07:57 -08001342 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, vmalloc_mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001343#endif
1344
1345 /*
1346 * Overflow check: For the 64bit handler, we need at least one
1347 * free instruction slot for the wrap-around branch. In worst
1348 * case, if the intended insertion point is a delay slot, we
Matt LaPlante4b3f6862006-10-03 22:21:02 +02001349 * need three, with the second nop'ed and the third being
Linus Torvalds1da177e2005-04-16 15:20:36 -07001350 * unused.
1351 */
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001352 switch (boot_cpu_type()) {
1353 default:
1354 if (sizeof(long) == 4) {
1355 case CPU_LOONGSON2:
1356 /* Loongson2 ebase is different than r4k, we have more space */
1357 if ((p - tlb_handler) > 64)
1358 panic("TLB refill handler space exceeded");
1359 /*
1360 * Now fold the handler in the TLB refill handler space.
1361 */
1362 f = final_handler;
1363 /* Simplest case, just copy the handler. */
1364 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1365 final_len = p - tlb_handler;
1366 break;
1367 } else {
1368 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
1369 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
1370 && uasm_insn_has_bdelay(relocs,
1371 tlb_handler + MIPS64_REFILL_INSNS - 3)))
1372 panic("TLB refill handler space exceeded");
1373 /*
1374 * Now fold the handler in the TLB refill handler space.
1375 */
1376 f = final_handler + MIPS64_REFILL_INSNS;
1377 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
1378 /* Just copy the handler. */
1379 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
1380 final_len = p - tlb_handler;
1381 } else {
David Daneyaa1762f2012-10-17 00:48:10 +02001382#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001383 const enum label_id ls = label_tlb_huge_update;
David Daney95affdd2009-05-20 11:40:59 -07001384#else
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001385 const enum label_id ls = label_vmalloc;
David Daney95affdd2009-05-20 11:40:59 -07001386#endif
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001387 u32 *split;
1388 int ov = 0;
1389 int i;
David Daney95affdd2009-05-20 11:40:59 -07001390
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001391 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
1392 ;
1393 BUG_ON(i == ARRAY_SIZE(labels));
1394 split = labels[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001395
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001396 /*
1397 * See if we have overflown one way or the other.
1398 */
1399 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
1400 split < p - MIPS64_REFILL_INSNS)
1401 ov = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001402
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001403 if (ov) {
1404 /*
1405 * Split two instructions before the end. One
1406 * for the branch and one for the instruction
1407 * in the delay slot.
1408 */
1409 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
David Daney95affdd2009-05-20 11:40:59 -07001410
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001411 /*
1412 * If the branch would fall in a delay slot,
1413 * we must back up an additional instruction
1414 * so that it is no longer in a delay slot.
1415 */
1416 if (uasm_insn_has_bdelay(relocs, split - 1))
1417 split--;
1418 }
1419 /* Copy first part of the handler. */
1420 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
1421 f += split - tlb_handler;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001422
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001423 if (ov) {
1424 /* Insert branch. */
1425 uasm_l_split(&l, final_handler);
1426 uasm_il_b(&f, &r, label_split);
1427 if (uasm_insn_has_bdelay(relocs, split))
1428 uasm_i_nop(&f);
1429 else {
1430 uasm_copy_handler(relocs, labels,
1431 split, split + 1, f);
1432 uasm_move_labels(labels, f, f + 1, -1);
1433 f++;
1434 split++;
1435 }
1436 }
1437
1438 /* Copy the rest of the handler. */
1439 uasm_copy_handler(relocs, labels, split, p, final_handler);
1440 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
1441 (p - split);
David Daney95affdd2009-05-20 11:40:59 -07001442 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001443 }
Ralf Baechle14bd8c02013-09-25 18:21:26 +02001444 break;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001445 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001446
Thiemo Seufere30ec452008-01-28 20:05:38 +00001447 uasm_resolve_relocs(relocs, labels);
1448 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1449 final_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001450
Ralf Baechle91b05e62006-03-29 18:53:00 +01001451 memcpy((void *)ebase, final_handler, 0x100);
Leonid Yegoshin10620802014-07-11 15:18:05 -07001452 local_flush_icache_range(ebase, ebase + 0x100);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001453
Ralf Baechlea2c763e2012-10-16 22:20:26 +02001454 dump_handler("r4000_tlb_refill", (u32 *)ebase, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001455}
1456
Huacai Chen380cd582016-03-03 09:45:12 +08001457static void setup_pw(void)
1458{
1459 unsigned long pgd_i, pgd_w;
1460#ifndef __PAGETABLE_PMD_FOLDED
1461 unsigned long pmd_i, pmd_w;
1462#endif
1463 unsigned long pt_i, pt_w;
1464 unsigned long pte_i, pte_w;
1465#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1466 unsigned long psn;
1467
1468 psn = ilog2(_PAGE_HUGE); /* bit used to indicate huge page */
1469#endif
1470 pgd_i = PGDIR_SHIFT; /* 1st level PGD */
1471#ifndef __PAGETABLE_PMD_FOLDED
1472 pgd_w = PGDIR_SHIFT - PMD_SHIFT + PGD_ORDER;
1473
1474 pmd_i = PMD_SHIFT; /* 2nd level PMD */
1475 pmd_w = PMD_SHIFT - PAGE_SHIFT;
1476#else
1477 pgd_w = PGDIR_SHIFT - PAGE_SHIFT + PGD_ORDER;
1478#endif
1479
1480 pt_i = PAGE_SHIFT; /* 3rd level PTE */
1481 pt_w = PAGE_SHIFT - 3;
1482
1483 pte_i = ilog2(_PAGE_GLOBAL);
1484 pte_w = 0;
1485
1486#ifndef __PAGETABLE_PMD_FOLDED
1487 write_c0_pwfield(pgd_i << 24 | pmd_i << 12 | pt_i << 6 | pte_i);
1488 write_c0_pwsize(1 << 30 | pgd_w << 24 | pmd_w << 12 | pt_w << 6 | pte_w);
1489#else
1490 write_c0_pwfield(pgd_i << 24 | pt_i << 6 | pte_i);
1491 write_c0_pwsize(1 << 30 | pgd_w << 24 | pt_w << 6 | pte_w);
1492#endif
1493
1494#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
1495 write_c0_pwctl(1 << 6 | psn);
1496#endif
1497 write_c0_kpgd(swapper_pg_dir);
1498 kscratch_used_mask |= (1 << 7); /* KScratch6 is used for KPGD */
1499}
1500
1501static void build_loongson3_tlb_refill_handler(void)
1502{
1503 u32 *p = tlb_handler;
1504 struct uasm_label *l = labels;
1505 struct uasm_reloc *r = relocs;
1506
1507 memset(labels, 0, sizeof(labels));
1508 memset(relocs, 0, sizeof(relocs));
1509 memset(tlb_handler, 0, sizeof(tlb_handler));
1510
1511 if (check_for_high_segbits) {
1512 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1513 uasm_i_dsrl_safe(&p, K1, K0, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1514 uasm_il_beqz(&p, &r, K1, label_vmalloc);
1515 uasm_i_nop(&p);
1516
1517 uasm_il_bgez(&p, &r, K0, label_large_segbits_fault);
1518 uasm_i_nop(&p);
1519 uasm_l_vmalloc(&l, p);
1520 }
1521
1522 uasm_i_dmfc0(&p, K1, C0_PGD);
1523
1524 uasm_i_lddir(&p, K0, K1, 3); /* global page dir */
1525#ifndef __PAGETABLE_PMD_FOLDED
1526 uasm_i_lddir(&p, K1, K0, 1); /* middle page dir */
1527#endif
1528 uasm_i_ldpte(&p, K1, 0); /* even */
1529 uasm_i_ldpte(&p, K1, 1); /* odd */
1530 uasm_i_tlbwr(&p);
1531
1532 /* restore page mask */
1533 if (PM_DEFAULT_MASK >> 16) {
1534 uasm_i_lui(&p, K0, PM_DEFAULT_MASK >> 16);
1535 uasm_i_ori(&p, K0, K0, PM_DEFAULT_MASK & 0xffff);
1536 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1537 } else if (PM_DEFAULT_MASK) {
1538 uasm_i_ori(&p, K0, 0, PM_DEFAULT_MASK);
1539 uasm_i_mtc0(&p, K0, C0_PAGEMASK);
1540 } else {
1541 uasm_i_mtc0(&p, 0, C0_PAGEMASK);
1542 }
1543
1544 uasm_i_eret(&p);
1545
1546 if (check_for_high_segbits) {
1547 uasm_l_large_segbits_fault(&l, p);
1548 UASM_i_LA(&p, K1, (unsigned long)tlb_do_page_fault_0);
1549 uasm_i_jr(&p, K1);
1550 uasm_i_nop(&p);
1551 }
1552
1553 uasm_resolve_relocs(relocs, labels);
1554 memcpy((void *)(ebase + 0x80), tlb_handler, 0x80);
1555 local_flush_icache_range(ebase + 0x80, ebase + 0x100);
1556 dump_handler("loongson3_tlb_refill", (u32 *)(ebase + 0x80), 32);
1557}
1558
Jayachandran C6ba045f2013-06-23 17:16:19 +00001559extern u32 handle_tlbl[], handle_tlbl_end[];
1560extern u32 handle_tlbs[], handle_tlbs_end[];
1561extern u32 handle_tlbm[], handle_tlbm_end[];
Steven J. Hill7bb39402014-04-10 14:06:17 -05001562extern u32 tlbmiss_handler_setup_pgd_start[], tlbmiss_handler_setup_pgd[];
1563extern u32 tlbmiss_handler_setup_pgd_end[];
David Daney3d8bfdd2010-12-21 14:19:11 -08001564
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301565static void build_setup_pgd(void)
David Daney3d8bfdd2010-12-21 14:19:11 -08001566{
1567 const int a0 = 4;
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301568 const int __maybe_unused a1 = 5;
1569 const int __maybe_unused a2 = 6;
Steven J. Hill7bb39402014-04-10 14:06:17 -05001570 u32 *p = tlbmiss_handler_setup_pgd_start;
Jayachandran C6ba045f2013-06-23 17:16:19 +00001571 const int tlbmiss_handler_setup_pgd_size =
Steven J. Hill7bb39402014-04-10 14:06:17 -05001572 tlbmiss_handler_setup_pgd_end - tlbmiss_handler_setup_pgd_start;
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301573#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
1574 long pgdc = (long)pgd_current;
1575#endif
David Daney3d8bfdd2010-12-21 14:19:11 -08001576
Jayachandran C6ba045f2013-06-23 17:16:19 +00001577 memset(tlbmiss_handler_setup_pgd, 0, tlbmiss_handler_setup_pgd_size *
1578 sizeof(tlbmiss_handler_setup_pgd[0]));
David Daney3d8bfdd2010-12-21 14:19:11 -08001579 memset(labels, 0, sizeof(labels));
1580 memset(relocs, 0, sizeof(relocs));
David Daney3d8bfdd2010-12-21 14:19:11 -08001581 pgd_reg = allocate_kscratch();
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301582#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
David Daney3d8bfdd2010-12-21 14:19:11 -08001583 if (pgd_reg == -1) {
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301584 struct uasm_label *l = labels;
1585 struct uasm_reloc *r = relocs;
1586
David Daney3d8bfdd2010-12-21 14:19:11 -08001587 /* PGD << 11 in c0_Context */
1588 /*
1589 * If it is a ckseg0 address, convert to a physical
1590 * address. Shifting right by 29 and adding 4 will
1591 * result in zero for these addresses.
1592 *
1593 */
1594 UASM_i_SRA(&p, a1, a0, 29);
1595 UASM_i_ADDIU(&p, a1, a1, 4);
1596 uasm_il_bnez(&p, &r, a1, label_tlbl_goaround1);
1597 uasm_i_nop(&p);
1598 uasm_i_dinsm(&p, a0, 0, 29, 64 - 29);
1599 uasm_l_tlbl_goaround1(&l, p);
1600 UASM_i_SLL(&p, a0, a0, 11);
David Daney3d8bfdd2010-12-21 14:19:11 -08001601 UASM_i_MTC0(&p, a0, C0_CONTEXT);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001602 uasm_i_jr(&p, 31);
1603 uasm_i_ehb(&p);
David Daney3d8bfdd2010-12-21 14:19:11 -08001604 } else {
1605 /* PGD in c0_KScratch */
Huacai Chen380cd582016-03-03 09:45:12 +08001606 if (cpu_has_ldpte)
1607 UASM_i_MTC0(&p, a0, C0_PWBASE);
1608 else
1609 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001610 uasm_i_jr(&p, 31);
1611 uasm_i_ehb(&p);
David Daney3d8bfdd2010-12-21 14:19:11 -08001612 }
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301613#else
1614#ifdef CONFIG_SMP
1615 /* Save PGD to pgd_current[smp_processor_id()] */
1616 UASM_i_CPUID_MFC0(&p, a1, SMP_CPUID_REG);
1617 UASM_i_SRL_SAFE(&p, a1, a1, SMP_CPUID_PTRSHIFT);
1618 UASM_i_LA_mostly(&p, a2, pgdc);
1619 UASM_i_ADDU(&p, a2, a2, a1);
1620 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1621#else
1622 UASM_i_LA_mostly(&p, a2, pgdc);
1623 UASM_i_SW(&p, a0, uasm_rel_lo(pgdc), a2);
1624#endif /* SMP */
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301625
1626 /* if pgd_reg is allocated, save PGD also to scratch register */
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001627 if (pgd_reg != -1) {
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301628 UASM_i_MTC0(&p, a0, c0_kscratch(), pgd_reg);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001629 uasm_i_jr(&p, 31);
1630 uasm_i_ehb(&p);
1631 } else {
1632 uasm_i_jr(&p, 31);
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301633 uasm_i_nop(&p);
Dmitry Korotindd8f65a2019-06-24 19:05:27 +00001634 }
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05301635#endif
Jayachandran C6ba045f2013-06-23 17:16:19 +00001636 if (p >= tlbmiss_handler_setup_pgd_end)
1637 panic("tlbmiss_handler_setup_pgd space exceeded");
David Daney3d8bfdd2010-12-21 14:19:11 -08001638
Jayachandran C6ba045f2013-06-23 17:16:19 +00001639 uasm_resolve_relocs(relocs, labels);
1640 pr_debug("Wrote tlbmiss_handler_setup_pgd (%u instructions).\n",
1641 (unsigned int)(p - tlbmiss_handler_setup_pgd));
1642
1643 dump_handler("tlbmiss_handler", tlbmiss_handler_setup_pgd,
1644 tlbmiss_handler_setup_pgd_size);
David Daney3d8bfdd2010-12-21 14:19:11 -08001645}
Linus Torvalds1da177e2005-04-16 15:20:36 -07001646
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001647static void
David Daneybd1437e2009-05-08 15:10:50 -07001648iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001649{
1650#ifdef CONFIG_SMP
Ralf Baechle34adb282014-11-22 00:16:48 +01001651# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001652 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001653 uasm_i_lld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001654 else
1655# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001656 UASM_i_LL(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001657#else
Ralf Baechle34adb282014-11-22 00:16:48 +01001658# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001659 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001660 uasm_i_ld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001661 else
1662# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001663 UASM_i_LW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001664#endif
1665}
1666
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001667static void
Thiemo Seufere30ec452008-01-28 20:05:38 +00001668iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001669 unsigned int mode, unsigned int scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001670{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001671 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
Paul Burtonb4ebbb82016-04-19 09:25:08 +01001672 unsigned int swmode = mode & ~hwmode;
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001673
Masahiro Yamada97f26452016-08-03 13:45:50 -07001674 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_64bits) {
Paul Burtonb4ebbb82016-04-19 09:25:08 +01001675 uasm_i_lui(p, scratch, swmode >> 16);
Steven J. Hillc5b36782015-02-26 18:16:38 -06001676 uasm_i_or(p, pte, pte, scratch);
Paul Burtonb4ebbb82016-04-19 09:25:08 +01001677 BUG_ON(swmode & 0xffff);
1678 } else {
1679 uasm_i_ori(p, pte, pte, mode);
1680 }
1681
Linus Torvalds1da177e2005-04-16 15:20:36 -07001682#ifdef CONFIG_SMP
Ralf Baechle34adb282014-11-22 00:16:48 +01001683# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001684 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001685 uasm_i_scd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001686 else
1687# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001688 UASM_i_SC(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001689
1690 if (r10000_llsc_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +00001691 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001692 else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001693 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001694
Ralf Baechle34adb282014-11-22 00:16:48 +01001695# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001696 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001697 /* no uasm_i_nop needed */
1698 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1699 uasm_i_ori(p, pte, pte, hwmode);
Paul Burtonb4ebbb82016-04-19 09:25:08 +01001700 BUG_ON(hwmode & ~0xffff);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001701 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1702 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1703 /* no uasm_i_nop needed */
1704 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001705 } else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001706 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001707# else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001708 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001709# endif
1710#else
Ralf Baechle34adb282014-11-22 00:16:48 +01001711# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001712 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001713 uasm_i_sd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001714 else
1715# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001716 UASM_i_SW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001717
Ralf Baechle34adb282014-11-22 00:16:48 +01001718# ifdef CONFIG_PHYS_ADDR_T_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001719 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001720 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1721 uasm_i_ori(p, pte, pte, hwmode);
Paul Burtonb4ebbb82016-04-19 09:25:08 +01001722 BUG_ON(hwmode & ~0xffff);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001723 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1724 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001725 }
1726# endif
1727#endif
1728}
1729
1730/*
1731 * Check if PTE is present, if not then jump to LABEL. PTR points to
1732 * the page table where this PTE is located, PTE will be re-loaded
1733 * with it's original value.
1734 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001735static void
David Daneybd1437e2009-05-08 15:10:50 -07001736build_pte_present(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001737 int pte, int ptr, int scratch, enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001738{
David Daneybf286072011-07-05 16:34:46 -07001739 int t = scratch >= 0 ? scratch : pte;
James Hogan8fe49082015-04-27 15:07:18 +01001740 int cur = pte;
David Daneybf286072011-07-05 16:34:46 -07001741
Steven J. Hill05857c62012-09-13 16:51:46 -05001742 if (cpu_has_rixi) {
David Daneycc33ae42010-12-20 15:54:50 -08001743 if (use_bbit_insns()) {
1744 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_PRESENT), lid);
1745 uasm_i_nop(p);
1746 } else {
James Hogan8fe49082015-04-27 15:07:18 +01001747 if (_PAGE_PRESENT_SHIFT) {
1748 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1749 cur = t;
1750 }
1751 uasm_i_andi(p, t, cur, 1);
David Daneybf286072011-07-05 16:34:46 -07001752 uasm_il_beqz(p, r, t, lid);
1753 if (pte == t)
1754 /* You lose the SMP race :-(*/
1755 iPTE_LW(p, pte, ptr);
David Daneycc33ae42010-12-20 15:54:50 -08001756 }
David Daney6dd93442010-02-10 15:12:47 -08001757 } else {
James Hogan8fe49082015-04-27 15:07:18 +01001758 if (_PAGE_PRESENT_SHIFT) {
1759 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1760 cur = t;
1761 }
1762 uasm_i_andi(p, t, cur,
Paul Burton780602d2016-04-19 09:25:03 +01001763 (_PAGE_PRESENT | _PAGE_NO_READ) >> _PAGE_PRESENT_SHIFT);
1764 uasm_i_xori(p, t, t, _PAGE_PRESENT >> _PAGE_PRESENT_SHIFT);
David Daneybf286072011-07-05 16:34:46 -07001765 uasm_il_bnez(p, r, t, lid);
1766 if (pte == t)
1767 /* You lose the SMP race :-(*/
1768 iPTE_LW(p, pte, ptr);
David Daney6dd93442010-02-10 15:12:47 -08001769 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001770}
1771
1772/* Make PTE valid, store result in PTR. */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001773static void
Thiemo Seufere30ec452008-01-28 20:05:38 +00001774build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001775 unsigned int ptr, unsigned int scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001776{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001777 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1778
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001779 iPTE_SW(p, r, pte, ptr, mode, scratch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001780}
1781
1782/*
1783 * Check if PTE can be written to, if not branch to LABEL. Regardless
1784 * restore PTE with value from PTR when done.
1785 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001786static void
David Daneybd1437e2009-05-08 15:10:50 -07001787build_pte_writable(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001788 unsigned int pte, unsigned int ptr, int scratch,
1789 enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001790{
David Daneybf286072011-07-05 16:34:46 -07001791 int t = scratch >= 0 ? scratch : pte;
James Hogan8fe49082015-04-27 15:07:18 +01001792 int cur = pte;
David Daneybf286072011-07-05 16:34:46 -07001793
James Hogan8fe49082015-04-27 15:07:18 +01001794 if (_PAGE_PRESENT_SHIFT) {
1795 uasm_i_srl(p, t, cur, _PAGE_PRESENT_SHIFT);
1796 cur = t;
1797 }
1798 uasm_i_andi(p, t, cur,
James Hogana3ae5652015-04-27 15:07:17 +01001799 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
1800 uasm_i_xori(p, t, t,
1801 (_PAGE_PRESENT | _PAGE_WRITE) >> _PAGE_PRESENT_SHIFT);
David Daneybf286072011-07-05 16:34:46 -07001802 uasm_il_bnez(p, r, t, lid);
1803 if (pte == t)
1804 /* You lose the SMP race :-(*/
David Daneycc33ae42010-12-20 15:54:50 -08001805 iPTE_LW(p, pte, ptr);
David Daneybf286072011-07-05 16:34:46 -07001806 else
1807 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001808}
1809
1810/* Make PTE writable, update software status bits as well, then store
1811 * at PTR.
1812 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001813static void
Thiemo Seufere30ec452008-01-28 20:05:38 +00001814build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001815 unsigned int ptr, unsigned int scratch)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001816{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001817 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1818 | _PAGE_DIRTY);
1819
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001820 iPTE_SW(p, r, pte, ptr, mode, scratch);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001821}
1822
1823/*
1824 * Check if PTE can be modified, if not branch to LABEL. Regardless
1825 * restore PTE with value from PTR when done.
1826 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001827static void
David Daneybd1437e2009-05-08 15:10:50 -07001828build_pte_modifiable(u32 **p, struct uasm_reloc **r,
David Daneybf286072011-07-05 16:34:46 -07001829 unsigned int pte, unsigned int ptr, int scratch,
1830 enum label_id lid)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001831{
David Daneycc33ae42010-12-20 15:54:50 -08001832 if (use_bbit_insns()) {
1833 uasm_il_bbit0(p, r, pte, ilog2(_PAGE_WRITE), lid);
1834 uasm_i_nop(p);
1835 } else {
David Daneybf286072011-07-05 16:34:46 -07001836 int t = scratch >= 0 ? scratch : pte;
Steven J. Hillc5b36782015-02-26 18:16:38 -06001837 uasm_i_srl(p, t, pte, _PAGE_WRITE_SHIFT);
1838 uasm_i_andi(p, t, t, 1);
David Daneybf286072011-07-05 16:34:46 -07001839 uasm_il_beqz(p, r, t, lid);
1840 if (pte == t)
1841 /* You lose the SMP race :-(*/
1842 iPTE_LW(p, pte, ptr);
David Daneycc33ae42010-12-20 15:54:50 -08001843 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001844}
1845
David Daney826222842009-10-14 12:16:56 -07001846#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
David Daney3d8bfdd2010-12-21 14:19:11 -08001847
1848
Linus Torvalds1da177e2005-04-16 15:20:36 -07001849/*
1850 * R3000 style TLB load/store/modify handlers.
1851 */
1852
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001853/*
1854 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1855 * Then it returns.
1856 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001857static void
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001858build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001859{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001860 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1861 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1862 uasm_i_tlbwi(p);
1863 uasm_i_jr(p, tmp);
1864 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001865}
1866
1867/*
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001868 * This places the pte into ENTRYLO0 and writes it with tlbwi
1869 * or tlbwr as appropriate. This is because the index register
1870 * may have the probe fail bit set as a result of a trap on a
1871 * kseg2 access, i.e. without refill. Then it returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001872 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001873static void
Thiemo Seufere30ec452008-01-28 20:05:38 +00001874build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1875 struct uasm_reloc **r, unsigned int pte,
1876 unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001877{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001878 uasm_i_mfc0(p, tmp, C0_INDEX);
1879 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1880 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1881 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1882 uasm_i_tlbwi(p); /* cp0 delay */
1883 uasm_i_jr(p, tmp);
1884 uasm_i_rfe(p); /* branch delay */
1885 uasm_l_r3000_write_probe_fail(l, *p);
1886 uasm_i_tlbwr(p); /* cp0 delay */
1887 uasm_i_jr(p, tmp);
1888 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001889}
1890
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001891static void
Linus Torvalds1da177e2005-04-16 15:20:36 -07001892build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1893 unsigned int ptr)
1894{
1895 long pgdc = (long)pgd_current;
1896
Thiemo Seufere30ec452008-01-28 20:05:38 +00001897 uasm_i_mfc0(p, pte, C0_BADVADDR);
1898 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1899 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1900 uasm_i_srl(p, pte, pte, 22); /* load delay */
1901 uasm_i_sll(p, pte, pte, 2);
1902 uasm_i_addu(p, ptr, ptr, pte);
1903 uasm_i_mfc0(p, pte, C0_CONTEXT);
1904 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1905 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1906 uasm_i_addu(p, ptr, ptr, pte);
1907 uasm_i_lw(p, pte, 0, ptr);
1908 uasm_i_tlbp(p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001909}
1910
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001911static void build_r3000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001912{
1913 u32 *p = handle_tlbl;
Jayachandran C6ba045f2013-06-23 17:16:19 +00001914 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001915 struct uasm_label *l = labels;
1916 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001917
Jayachandran C6ba045f2013-06-23 17:16:19 +00001918 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001919 memset(labels, 0, sizeof(labels));
1920 memset(relocs, 0, sizeof(relocs));
1921
1922 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybf286072011-07-05 16:34:46 -07001923 build_pte_present(&p, &r, K0, K1, -1, label_nopage_tlbl);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001924 uasm_i_nop(&p); /* load delay */
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001925 build_make_valid(&p, &r, K0, K1, -1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001926 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001927
Thiemo Seufere30ec452008-01-28 20:05:38 +00001928 uasm_l_nopage_tlbl(&l, p);
1929 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1930 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001931
Jayachandran C6ba045f2013-06-23 17:16:19 +00001932 if (p >= handle_tlbl_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001933 panic("TLB load handler fastpath space exceeded");
1934
Thiemo Seufere30ec452008-01-28 20:05:38 +00001935 uasm_resolve_relocs(relocs, labels);
1936 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1937 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001938
Jayachandran C6ba045f2013-06-23 17:16:19 +00001939 dump_handler("r3000_tlb_load", handle_tlbl, handle_tlbl_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001940}
1941
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001942static void build_r3000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001943{
1944 u32 *p = handle_tlbs;
Jayachandran C6ba045f2013-06-23 17:16:19 +00001945 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001946 struct uasm_label *l = labels;
1947 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001948
Jayachandran C6ba045f2013-06-23 17:16:19 +00001949 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001950 memset(labels, 0, sizeof(labels));
1951 memset(relocs, 0, sizeof(relocs));
1952
1953 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybf286072011-07-05 16:34:46 -07001954 build_pte_writable(&p, &r, K0, K1, -1, label_nopage_tlbs);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001955 uasm_i_nop(&p); /* load delay */
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001956 build_make_write(&p, &r, K0, K1, -1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001957 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001958
Thiemo Seufere30ec452008-01-28 20:05:38 +00001959 uasm_l_nopage_tlbs(&l, p);
1960 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1961 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001962
Tony Wuafc813a2013-07-18 09:45:47 +00001963 if (p >= handle_tlbs_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001964 panic("TLB store handler fastpath space exceeded");
1965
Thiemo Seufere30ec452008-01-28 20:05:38 +00001966 uasm_resolve_relocs(relocs, labels);
1967 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1968 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001969
Jayachandran C6ba045f2013-06-23 17:16:19 +00001970 dump_handler("r3000_tlb_store", handle_tlbs, handle_tlbs_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001971}
1972
Paul Gortmaker078a55f2013-06-18 13:38:59 +00001973static void build_r3000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001974{
1975 u32 *p = handle_tlbm;
Jayachandran C6ba045f2013-06-23 17:16:19 +00001976 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001977 struct uasm_label *l = labels;
1978 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001979
Jayachandran C6ba045f2013-06-23 17:16:19 +00001980 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001981 memset(labels, 0, sizeof(labels));
1982 memset(relocs, 0, sizeof(relocs));
1983
1984 build_r3000_tlbchange_handler_head(&p, K0, K1);
Ralf Baechled954ffe2011-08-02 22:52:48 +01001985 build_pte_modifiable(&p, &r, K0, K1, -1, label_nopage_tlbm);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001986 uasm_i_nop(&p); /* load delay */
Paul Burtonbbeeffe2016-04-19 09:25:07 +01001987 build_make_write(&p, &r, K0, K1, -1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001988 build_r3000_pte_reload_tlbwi(&p, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001989
Thiemo Seufere30ec452008-01-28 20:05:38 +00001990 uasm_l_nopage_tlbm(&l, p);
1991 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1992 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001993
Jayachandran C6ba045f2013-06-23 17:16:19 +00001994 if (p >= handle_tlbm_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001995 panic("TLB modify handler fastpath space exceeded");
1996
Thiemo Seufere30ec452008-01-28 20:05:38 +00001997 uasm_resolve_relocs(relocs, labels);
1998 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1999 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002000
Jayachandran C6ba045f2013-06-23 17:16:19 +00002001 dump_handler("r3000_tlb_modify", handle_tlbm, handle_tlbm_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002002}
David Daney826222842009-10-14 12:16:56 -07002003#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002004
2005/*
2006 * R4000 style TLB load/store/modify handlers.
2007 */
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002008static struct work_registers
Thiemo Seufere30ec452008-01-28 20:05:38 +00002009build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
David Daneybf286072011-07-05 16:34:46 -07002010 struct uasm_reloc **r)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002011{
David Daneybf286072011-07-05 16:34:46 -07002012 struct work_registers wr = build_get_work_registers(p);
2013
Ralf Baechle875d43e2005-09-03 15:56:16 -07002014#ifdef CONFIG_64BIT
David Daneybf286072011-07-05 16:34:46 -07002015 build_get_pmde64(p, l, r, wr.r1, wr.r2); /* get pmd in ptr */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002016#else
David Daneybf286072011-07-05 16:34:46 -07002017 build_get_pgde32(p, wr.r1, wr.r2); /* get pgd in ptr */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002018#endif
2019
David Daneyaa1762f2012-10-17 00:48:10 +02002020#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002021 /*
2022 * For huge tlb entries, pmd doesn't contain an address but
2023 * instead contains the tlb pte. Check the PAGE_HUGE bit and
2024 * see if we need to jump to huge tlb processing.
2025 */
David Daneybf286072011-07-05 16:34:46 -07002026 build_is_huge_pte(p, r, wr.r1, wr.r2, label_tlb_huge_update);
David Daneyfd062c82009-05-27 17:47:44 -07002027#endif
2028
David Daneybf286072011-07-05 16:34:46 -07002029 UASM_i_MFC0(p, wr.r1, C0_BADVADDR);
2030 UASM_i_LW(p, wr.r2, 0, wr.r2);
2031 UASM_i_SRL(p, wr.r1, wr.r1, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
2032 uasm_i_andi(p, wr.r1, wr.r1, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
2033 UASM_i_ADDU(p, wr.r2, wr.r2, wr.r1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002034
2035#ifdef CONFIG_SMP
Thiemo Seufere30ec452008-01-28 20:05:38 +00002036 uasm_l_smp_pgtable_change(l, *p);
2037#endif
David Daneybf286072011-07-05 16:34:46 -07002038 iPTE_LW(p, wr.r1, wr.r2); /* get even pte */
Leonid Yegoshin070e76c2014-11-27 11:13:08 +00002039 if (!m4kc_tlbp_war()) {
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002040 build_tlb_probe_entry(p);
Leonid Yegoshin070e76c2014-11-27 11:13:08 +00002041 if (cpu_has_htw) {
2042 /* race condition happens, leaving */
2043 uasm_i_ehb(p);
2044 uasm_i_mfc0(p, wr.r3, C0_INDEX);
2045 uasm_il_bltz(p, r, wr.r3, label_leave);
2046 uasm_i_nop(p);
2047 }
2048 }
David Daneybf286072011-07-05 16:34:46 -07002049 return wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002050}
2051
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002052static void
Thiemo Seufere30ec452008-01-28 20:05:38 +00002053build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
2054 struct uasm_reloc **r, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07002055 unsigned int ptr)
2056{
Thiemo Seufere30ec452008-01-28 20:05:38 +00002057 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
2058 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002059 build_update_entries(p, tmp, ptr);
2060 build_tlb_write_entry(p, l, r, tlb_indexed);
Thiemo Seufere30ec452008-01-28 20:05:38 +00002061 uasm_l_leave(l, *p);
David Daneybf286072011-07-05 16:34:46 -07002062 build_restore_work_registers(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +00002063 uasm_i_eret(p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002064
Ralf Baechle875d43e2005-09-03 15:56:16 -07002065#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -07002066 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002067#endif
2068}
2069
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002070static void build_r4000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002071{
2072 u32 *p = handle_tlbl;
Jayachandran C6ba045f2013-06-23 17:16:19 +00002073 const int handle_tlbl_size = handle_tlbl_end - handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00002074 struct uasm_label *l = labels;
2075 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07002076 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002077
Jayachandran C6ba045f2013-06-23 17:16:19 +00002078 memset(handle_tlbl, 0, handle_tlbl_size * sizeof(handle_tlbl[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002079 memset(labels, 0, sizeof(labels));
2080 memset(relocs, 0, sizeof(relocs));
2081
2082 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +01002083 unsigned int segbits = 44;
2084
2085 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
2086 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +00002087 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -07002088 uasm_i_dsrl_safe(&p, K1, K0, 62);
2089 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
2090 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +01002091 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +00002092 uasm_il_bnez(&p, &r, K0, label_leave);
2093 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -07002094 }
2095
David Daneybf286072011-07-05 16:34:46 -07002096 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2097 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002098 if (m4kc_tlbp_war())
2099 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08002100
Leonid Yegoshin5890f702014-07-15 14:09:56 +01002101 if (cpu_has_rixi && !cpu_has_rixiex) {
David Daney6dd93442010-02-10 15:12:47 -08002102 /*
2103 * If the page is not _PAGE_VALID, RI or XI could not
2104 * have triggered it. Skip the expensive test..
2105 */
David Daneycc33ae42010-12-20 15:54:50 -08002106 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002107 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
David Daneycc33ae42010-12-20 15:54:50 -08002108 label_tlbl_goaround1);
2109 } else {
David Daneybf286072011-07-05 16:34:46 -07002110 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2111 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround1);
David Daneycc33ae42010-12-20 15:54:50 -08002112 }
David Daney6dd93442010-02-10 15:12:47 -08002113 uasm_i_nop(&p);
2114
2115 uasm_i_tlbr(&p);
Ralf Baechle73acc7d2013-06-20 14:56:17 +02002116
2117 switch (current_cpu_type()) {
2118 default:
Leonid Yegoshin77f3ee52014-11-24 15:42:46 +00002119 if (cpu_has_mips_r2_exec_hazard) {
Ralf Baechle73acc7d2013-06-20 14:56:17 +02002120 uasm_i_ehb(&p);
2121
2122 case CPU_CAVIUM_OCTEON:
2123 case CPU_CAVIUM_OCTEON_PLUS:
2124 case CPU_CAVIUM_OCTEON2:
2125 break;
2126 }
2127 }
2128
David Daney6dd93442010-02-10 15:12:47 -08002129 /* Examine entrylo 0 or 1 based on ptr. */
David Daneycc33ae42010-12-20 15:54:50 -08002130 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002131 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
David Daneycc33ae42010-12-20 15:54:50 -08002132 } else {
David Daneybf286072011-07-05 16:34:46 -07002133 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2134 uasm_i_beqz(&p, wr.r3, 8);
David Daneycc33ae42010-12-20 15:54:50 -08002135 }
David Daneybf286072011-07-05 16:34:46 -07002136 /* load it in the delay slot*/
2137 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2138 /* load it if ptr is odd */
2139 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
David Daney6dd93442010-02-10 15:12:47 -08002140 /*
David Daneybf286072011-07-05 16:34:46 -07002141 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
David Daney6dd93442010-02-10 15:12:47 -08002142 * XI must have triggered it.
2143 */
David Daneycc33ae42010-12-20 15:54:50 -08002144 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002145 uasm_il_bbit1(&p, &r, wr.r3, 1, label_nopage_tlbl);
2146 uasm_i_nop(&p);
David Daneycc33ae42010-12-20 15:54:50 -08002147 uasm_l_tlbl_goaround1(&l, p);
2148 } else {
David Daneybf286072011-07-05 16:34:46 -07002149 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2150 uasm_il_bnez(&p, &r, wr.r3, label_nopage_tlbl);
2151 uasm_i_nop(&p);
David Daneycc33ae42010-12-20 15:54:50 -08002152 }
David Daneybf286072011-07-05 16:34:46 -07002153 uasm_l_tlbl_goaround1(&l, p);
David Daney6dd93442010-02-10 15:12:47 -08002154 }
Paul Burtonbbeeffe2016-04-19 09:25:07 +01002155 build_make_valid(&p, &r, wr.r1, wr.r2, wr.r3);
David Daneybf286072011-07-05 16:34:46 -07002156 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002157
David Daneyaa1762f2012-10-17 00:48:10 +02002158#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002159 /*
2160 * This is the entry point when build_r4000_tlbchange_handler_head
2161 * spots a huge page.
2162 */
2163 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002164 iPTE_LW(&p, wr.r1, wr.r2);
2165 build_pte_present(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbl);
David Daneyfd062c82009-05-27 17:47:44 -07002166 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08002167
Leonid Yegoshin5890f702014-07-15 14:09:56 +01002168 if (cpu_has_rixi && !cpu_has_rixiex) {
David Daney6dd93442010-02-10 15:12:47 -08002169 /*
2170 * If the page is not _PAGE_VALID, RI or XI could not
2171 * have triggered it. Skip the expensive test..
2172 */
David Daneycc33ae42010-12-20 15:54:50 -08002173 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002174 uasm_il_bbit0(&p, &r, wr.r1, ilog2(_PAGE_VALID),
David Daneycc33ae42010-12-20 15:54:50 -08002175 label_tlbl_goaround2);
2176 } else {
David Daneybf286072011-07-05 16:34:46 -07002177 uasm_i_andi(&p, wr.r3, wr.r1, _PAGE_VALID);
2178 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002179 }
David Daney6dd93442010-02-10 15:12:47 -08002180 uasm_i_nop(&p);
2181
2182 uasm_i_tlbr(&p);
Ralf Baechle73acc7d2013-06-20 14:56:17 +02002183
2184 switch (current_cpu_type()) {
2185 default:
Leonid Yegoshin77f3ee52014-11-24 15:42:46 +00002186 if (cpu_has_mips_r2_exec_hazard) {
Ralf Baechle73acc7d2013-06-20 14:56:17 +02002187 uasm_i_ehb(&p);
2188
2189 case CPU_CAVIUM_OCTEON:
2190 case CPU_CAVIUM_OCTEON_PLUS:
2191 case CPU_CAVIUM_OCTEON2:
2192 break;
2193 }
2194 }
2195
David Daney6dd93442010-02-10 15:12:47 -08002196 /* Examine entrylo 0 or 1 based on ptr. */
David Daneycc33ae42010-12-20 15:54:50 -08002197 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002198 uasm_i_bbit0(&p, wr.r2, ilog2(sizeof(pte_t)), 8);
David Daneycc33ae42010-12-20 15:54:50 -08002199 } else {
David Daneybf286072011-07-05 16:34:46 -07002200 uasm_i_andi(&p, wr.r3, wr.r2, sizeof(pte_t));
2201 uasm_i_beqz(&p, wr.r3, 8);
David Daneycc33ae42010-12-20 15:54:50 -08002202 }
David Daneybf286072011-07-05 16:34:46 -07002203 /* load it in the delay slot*/
2204 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO0);
2205 /* load it if ptr is odd */
2206 UASM_i_MFC0(&p, wr.r3, C0_ENTRYLO1);
David Daney6dd93442010-02-10 15:12:47 -08002207 /*
David Daneybf286072011-07-05 16:34:46 -07002208 * If the entryLo (now in wr.r3) is valid (bit 1), RI or
David Daney6dd93442010-02-10 15:12:47 -08002209 * XI must have triggered it.
2210 */
David Daneycc33ae42010-12-20 15:54:50 -08002211 if (use_bbit_insns()) {
David Daneybf286072011-07-05 16:34:46 -07002212 uasm_il_bbit0(&p, &r, wr.r3, 1, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002213 } else {
David Daneybf286072011-07-05 16:34:46 -07002214 uasm_i_andi(&p, wr.r3, wr.r3, 2);
2215 uasm_il_beqz(&p, &r, wr.r3, label_tlbl_goaround2);
David Daneycc33ae42010-12-20 15:54:50 -08002216 }
David Daney0f4ccbc2011-09-16 18:06:02 -07002217 if (PM_DEFAULT_MASK == 0)
2218 uasm_i_nop(&p);
David Daney6dd93442010-02-10 15:12:47 -08002219 /*
2220 * We clobbered C0_PAGEMASK, restore it. On the other branch
2221 * it is restored in build_huge_tlb_write_entry.
2222 */
David Daneybf286072011-07-05 16:34:46 -07002223 build_restore_pagemask(&p, &r, wr.r3, label_nopage_tlbl, 0);
David Daney6dd93442010-02-10 15:12:47 -08002224
2225 uasm_l_tlbl_goaround2(&l, p);
2226 }
David Daneybf286072011-07-05 16:34:46 -07002227 uasm_i_ori(&p, wr.r1, wr.r1, (_PAGE_ACCESSED | _PAGE_VALID));
Huacai Chen59b87252017-03-16 21:00:27 +08002228 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
David Daneyfd062c82009-05-27 17:47:44 -07002229#endif
2230
Thiemo Seufere30ec452008-01-28 20:05:38 +00002231 uasm_l_nopage_tlbl(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002232 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002233#ifdef CONFIG_CPU_MICROMIPS
2234 if ((unsigned long)tlb_do_page_fault_0 & 1) {
2235 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_0));
2236 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_0));
2237 uasm_i_jr(&p, K0);
2238 } else
2239#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002240 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
2241 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002242
Jayachandran C6ba045f2013-06-23 17:16:19 +00002243 if (p >= handle_tlbl_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002244 panic("TLB load handler fastpath space exceeded");
2245
Thiemo Seufere30ec452008-01-28 20:05:38 +00002246 uasm_resolve_relocs(relocs, labels);
2247 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
2248 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002249
Jayachandran C6ba045f2013-06-23 17:16:19 +00002250 dump_handler("r4000_tlb_load", handle_tlbl, handle_tlbl_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002251}
2252
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002253static void build_r4000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002254{
2255 u32 *p = handle_tlbs;
Jayachandran C6ba045f2013-06-23 17:16:19 +00002256 const int handle_tlbs_size = handle_tlbs_end - handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00002257 struct uasm_label *l = labels;
2258 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07002259 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002260
Jayachandran C6ba045f2013-06-23 17:16:19 +00002261 memset(handle_tlbs, 0, handle_tlbs_size * sizeof(handle_tlbs[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002262 memset(labels, 0, sizeof(labels));
2263 memset(relocs, 0, sizeof(relocs));
2264
David Daneybf286072011-07-05 16:34:46 -07002265 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2266 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002267 if (m4kc_tlbp_war())
2268 build_tlb_probe_entry(&p);
Paul Burtonbbeeffe2016-04-19 09:25:07 +01002269 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
David Daneybf286072011-07-05 16:34:46 -07002270 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002271
David Daneyaa1762f2012-10-17 00:48:10 +02002272#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002273 /*
2274 * This is the entry point when
2275 * build_r4000_tlbchange_handler_head spots a huge page.
2276 */
2277 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002278 iPTE_LW(&p, wr.r1, wr.r2);
2279 build_pte_writable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbs);
David Daneyfd062c82009-05-27 17:47:44 -07002280 build_tlb_probe_entry(&p);
David Daneybf286072011-07-05 16:34:46 -07002281 uasm_i_ori(&p, wr.r1, wr.r1,
David Daneyfd062c82009-05-27 17:47:44 -07002282 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
Huacai Chen59b87252017-03-16 21:00:27 +08002283 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 1);
David Daneyfd062c82009-05-27 17:47:44 -07002284#endif
2285
Thiemo Seufere30ec452008-01-28 20:05:38 +00002286 uasm_l_nopage_tlbs(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002287 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002288#ifdef CONFIG_CPU_MICROMIPS
2289 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2290 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2291 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2292 uasm_i_jr(&p, K0);
2293 } else
2294#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002295 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2296 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002297
Jayachandran C6ba045f2013-06-23 17:16:19 +00002298 if (p >= handle_tlbs_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002299 panic("TLB store handler fastpath space exceeded");
2300
Thiemo Seufere30ec452008-01-28 20:05:38 +00002301 uasm_resolve_relocs(relocs, labels);
2302 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
2303 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002304
Jayachandran C6ba045f2013-06-23 17:16:19 +00002305 dump_handler("r4000_tlb_store", handle_tlbs, handle_tlbs_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002306}
2307
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002308static void build_r4000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002309{
2310 u32 *p = handle_tlbm;
Jayachandran C6ba045f2013-06-23 17:16:19 +00002311 const int handle_tlbm_size = handle_tlbm_end - handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00002312 struct uasm_label *l = labels;
2313 struct uasm_reloc *r = relocs;
David Daneybf286072011-07-05 16:34:46 -07002314 struct work_registers wr;
Linus Torvalds1da177e2005-04-16 15:20:36 -07002315
Jayachandran C6ba045f2013-06-23 17:16:19 +00002316 memset(handle_tlbm, 0, handle_tlbm_size * sizeof(handle_tlbm[0]));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002317 memset(labels, 0, sizeof(labels));
2318 memset(relocs, 0, sizeof(relocs));
2319
David Daneybf286072011-07-05 16:34:46 -07002320 wr = build_r4000_tlbchange_handler_head(&p, &l, &r);
2321 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01002322 if (m4kc_tlbp_war())
2323 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002324 /* Present and writable bits set, set accessed and dirty bits. */
Paul Burtonbbeeffe2016-04-19 09:25:07 +01002325 build_make_write(&p, &r, wr.r1, wr.r2, wr.r3);
David Daneybf286072011-07-05 16:34:46 -07002326 build_r4000_tlbchange_handler_tail(&p, &l, &r, wr.r1, wr.r2);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002327
David Daneyaa1762f2012-10-17 00:48:10 +02002328#ifdef CONFIG_MIPS_HUGE_TLB_SUPPORT
David Daneyfd062c82009-05-27 17:47:44 -07002329 /*
2330 * This is the entry point when
2331 * build_r4000_tlbchange_handler_head spots a huge page.
2332 */
2333 uasm_l_tlb_huge_update(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002334 iPTE_LW(&p, wr.r1, wr.r2);
2335 build_pte_modifiable(&p, &r, wr.r1, wr.r2, wr.r3, label_nopage_tlbm);
David Daneyfd062c82009-05-27 17:47:44 -07002336 build_tlb_probe_entry(&p);
David Daneybf286072011-07-05 16:34:46 -07002337 uasm_i_ori(&p, wr.r1, wr.r1,
David Daneyfd062c82009-05-27 17:47:44 -07002338 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
Huacai Chen59b87252017-03-16 21:00:27 +08002339 build_huge_handler_tail(&p, &r, &l, wr.r1, wr.r2, 0);
David Daneyfd062c82009-05-27 17:47:44 -07002340#endif
2341
Thiemo Seufere30ec452008-01-28 20:05:38 +00002342 uasm_l_nopage_tlbm(&l, p);
David Daneybf286072011-07-05 16:34:46 -07002343 build_restore_work_registers(&p);
Steven J. Hill2a0b24f2013-03-25 12:15:55 -05002344#ifdef CONFIG_CPU_MICROMIPS
2345 if ((unsigned long)tlb_do_page_fault_1 & 1) {
2346 uasm_i_lui(&p, K0, uasm_rel_hi((long)tlb_do_page_fault_1));
2347 uasm_i_addiu(&p, K0, K0, uasm_rel_lo((long)tlb_do_page_fault_1));
2348 uasm_i_jr(&p, K0);
2349 } else
2350#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00002351 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
2352 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002353
Jayachandran C6ba045f2013-06-23 17:16:19 +00002354 if (p >= handle_tlbm_end)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002355 panic("TLB modify handler fastpath space exceeded");
2356
Thiemo Seufere30ec452008-01-28 20:05:38 +00002357 uasm_resolve_relocs(relocs, labels);
2358 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
2359 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07002360
Jayachandran C6ba045f2013-06-23 17:16:19 +00002361 dump_handler("r4000_tlb_modify", handle_tlbm, handle_tlbm_size);
Linus Torvalds1da177e2005-04-16 15:20:36 -07002362}
2363
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002364static void flush_tlb_handlers(void)
Jonas Gorskia3d90862013-06-21 17:48:48 +00002365{
2366 local_flush_icache_range((unsigned long)handle_tlbl,
Ralf Baechle6ac53102013-07-02 17:19:04 +02002367 (unsigned long)handle_tlbl_end);
Jonas Gorskia3d90862013-06-21 17:48:48 +00002368 local_flush_icache_range((unsigned long)handle_tlbs,
Ralf Baechle6ac53102013-07-02 17:19:04 +02002369 (unsigned long)handle_tlbs_end);
Jonas Gorskia3d90862013-06-21 17:48:48 +00002370 local_flush_icache_range((unsigned long)handle_tlbm,
Ralf Baechle6ac53102013-07-02 17:19:04 +02002371 (unsigned long)handle_tlbm_end);
Ralf Baechle6ac53102013-07-02 17:19:04 +02002372 local_flush_icache_range((unsigned long)tlbmiss_handler_setup_pgd,
2373 (unsigned long)tlbmiss_handler_setup_pgd_end);
Jonas Gorskia3d90862013-06-21 17:48:48 +00002374}
2375
Markos Chandrasf1014d12014-07-14 12:47:09 +01002376static void print_htw_config(void)
2377{
2378 unsigned long config;
2379 unsigned int pwctl;
2380 const int field = 2 * sizeof(unsigned long);
2381
2382 config = read_c0_pwfield();
2383 pr_debug("PWField (0x%0*lx): GDI: 0x%02lx UDI: 0x%02lx MDI: 0x%02lx PTI: 0x%02lx PTEI: 0x%02lx\n",
2384 field, config,
2385 (config & MIPS_PWFIELD_GDI_MASK) >> MIPS_PWFIELD_GDI_SHIFT,
2386 (config & MIPS_PWFIELD_UDI_MASK) >> MIPS_PWFIELD_UDI_SHIFT,
2387 (config & MIPS_PWFIELD_MDI_MASK) >> MIPS_PWFIELD_MDI_SHIFT,
2388 (config & MIPS_PWFIELD_PTI_MASK) >> MIPS_PWFIELD_PTI_SHIFT,
2389 (config & MIPS_PWFIELD_PTEI_MASK) >> MIPS_PWFIELD_PTEI_SHIFT);
2390
2391 config = read_c0_pwsize();
James Hogan6446e6c2016-05-27 22:25:22 +01002392 pr_debug("PWSize (0x%0*lx): PS: 0x%lx GDW: 0x%02lx UDW: 0x%02lx MDW: 0x%02lx PTW: 0x%02lx PTEW: 0x%02lx\n",
Markos Chandrasf1014d12014-07-14 12:47:09 +01002393 field, config,
James Hogan6446e6c2016-05-27 22:25:22 +01002394 (config & MIPS_PWSIZE_PS_MASK) >> MIPS_PWSIZE_PS_SHIFT,
Markos Chandrasf1014d12014-07-14 12:47:09 +01002395 (config & MIPS_PWSIZE_GDW_MASK) >> MIPS_PWSIZE_GDW_SHIFT,
2396 (config & MIPS_PWSIZE_UDW_MASK) >> MIPS_PWSIZE_UDW_SHIFT,
2397 (config & MIPS_PWSIZE_MDW_MASK) >> MIPS_PWSIZE_MDW_SHIFT,
2398 (config & MIPS_PWSIZE_PTW_MASK) >> MIPS_PWSIZE_PTW_SHIFT,
2399 (config & MIPS_PWSIZE_PTEW_MASK) >> MIPS_PWSIZE_PTEW_SHIFT);
2400
2401 pwctl = read_c0_pwctl();
James Hogan6446e6c2016-05-27 22:25:22 +01002402 pr_debug("PWCtl (0x%x): PWEn: 0x%x XK: 0x%x XS: 0x%x XU: 0x%x DPH: 0x%x HugePg: 0x%x Psn: 0x%x\n",
Markos Chandrasf1014d12014-07-14 12:47:09 +01002403 pwctl,
2404 (pwctl & MIPS_PWCTL_PWEN_MASK) >> MIPS_PWCTL_PWEN_SHIFT,
James Hogan6446e6c2016-05-27 22:25:22 +01002405 (pwctl & MIPS_PWCTL_XK_MASK) >> MIPS_PWCTL_XK_SHIFT,
2406 (pwctl & MIPS_PWCTL_XS_MASK) >> MIPS_PWCTL_XS_SHIFT,
2407 (pwctl & MIPS_PWCTL_XU_MASK) >> MIPS_PWCTL_XU_SHIFT,
Markos Chandrasf1014d12014-07-14 12:47:09 +01002408 (pwctl & MIPS_PWCTL_DPH_MASK) >> MIPS_PWCTL_DPH_SHIFT,
2409 (pwctl & MIPS_PWCTL_HUGEPG_MASK) >> MIPS_PWCTL_HUGEPG_SHIFT,
2410 (pwctl & MIPS_PWCTL_PSN_MASK) >> MIPS_PWCTL_PSN_SHIFT);
2411}
2412
2413static void config_htw_params(void)
2414{
2415 unsigned long pwfield, pwsize, ptei;
2416 unsigned int config;
2417
2418 /*
2419 * We are using 2-level page tables, so we only need to
2420 * setup GDW and PTW appropriately. UDW and MDW will remain 0.
2421 * The default value of GDI/UDI/MDI/PTI is 0xc. It is illegal to
2422 * write values less than 0xc in these fields because the entire
2423 * write will be dropped. As a result of which, we must preserve
2424 * the original reset values and overwrite only what we really want.
2425 */
2426
2427 pwfield = read_c0_pwfield();
2428 /* re-initialize the GDI field */
2429 pwfield &= ~MIPS_PWFIELD_GDI_MASK;
2430 pwfield |= PGDIR_SHIFT << MIPS_PWFIELD_GDI_SHIFT;
2431 /* re-initialize the PTI field including the even/odd bit */
2432 pwfield &= ~MIPS_PWFIELD_PTI_MASK;
2433 pwfield |= PAGE_SHIFT << MIPS_PWFIELD_PTI_SHIFT;
Paul Burtoncab25bc2015-09-22 12:03:37 -07002434 if (CONFIG_PGTABLE_LEVELS >= 3) {
2435 pwfield &= ~MIPS_PWFIELD_MDI_MASK;
2436 pwfield |= PMD_SHIFT << MIPS_PWFIELD_MDI_SHIFT;
2437 }
Markos Chandrasf1014d12014-07-14 12:47:09 +01002438 /* Set the PTEI right shift */
2439 ptei = _PAGE_GLOBAL_SHIFT << MIPS_PWFIELD_PTEI_SHIFT;
2440 pwfield |= ptei;
2441 write_c0_pwfield(pwfield);
2442 /* Check whether the PTEI value is supported */
2443 back_to_back_c0_hazard();
2444 pwfield = read_c0_pwfield();
2445 if (((pwfield & MIPS_PWFIELD_PTEI_MASK) << MIPS_PWFIELD_PTEI_SHIFT)
2446 != ptei) {
2447 pr_warn("Unsupported PTEI field value: 0x%lx. HTW will not be enabled",
2448 ptei);
2449 /*
2450 * Drop option to avoid HTW being enabled via another path
2451 * (eg htw_reset())
2452 */
2453 current_cpu_data.options &= ~MIPS_CPU_HTW;
2454 return;
2455 }
2456
2457 pwsize = ilog2(PTRS_PER_PGD) << MIPS_PWSIZE_GDW_SHIFT;
2458 pwsize |= ilog2(PTRS_PER_PTE) << MIPS_PWSIZE_PTW_SHIFT;
Paul Burtoncab25bc2015-09-22 12:03:37 -07002459 if (CONFIG_PGTABLE_LEVELS >= 3)
2460 pwsize |= ilog2(PTRS_PER_PMD) << MIPS_PWSIZE_MDW_SHIFT;
Steven J. Hillc5b36782015-02-26 18:16:38 -06002461
James Hoganaa760422016-05-27 22:25:23 +01002462 /* Set pointer size to size of directory pointers */
Masahiro Yamada97f26452016-08-03 13:45:50 -07002463 if (IS_ENABLED(CONFIG_64BIT))
James Hoganaa760422016-05-27 22:25:23 +01002464 pwsize |= MIPS_PWSIZE_PS_MASK;
2465 /* PTEs may be multiple pointers long (e.g. with XPA) */
2466 pwsize |= ((PTE_T_LOG2 - PGD_T_LOG2) << MIPS_PWSIZE_PTEW_SHIFT)
2467 & MIPS_PWSIZE_PTEW_MASK;
Steven J. Hillc5b36782015-02-26 18:16:38 -06002468
Markos Chandrasf1014d12014-07-14 12:47:09 +01002469 write_c0_pwsize(pwsize);
2470
2471 /* Make sure everything is set before we enable the HTW */
2472 back_to_back_c0_hazard();
2473
James Hoganaa760422016-05-27 22:25:23 +01002474 /*
2475 * Enable HTW (and only for XUSeg on 64-bit), and disable the rest of
2476 * the pwctl fields.
2477 */
Markos Chandrasf1014d12014-07-14 12:47:09 +01002478 config = 1 << MIPS_PWCTL_PWEN_SHIFT;
Masahiro Yamada97f26452016-08-03 13:45:50 -07002479 if (IS_ENABLED(CONFIG_64BIT))
James Hoganaa760422016-05-27 22:25:23 +01002480 config |= MIPS_PWCTL_XU_MASK;
Markos Chandrasf1014d12014-07-14 12:47:09 +01002481 write_c0_pwctl(config);
2482 pr_info("Hardware Page Table Walker enabled\n");
2483
2484 print_htw_config();
2485}
2486
Steven J. Hillc5b36782015-02-26 18:16:38 -06002487static void config_xpa_params(void)
2488{
2489#ifdef CONFIG_XPA
2490 unsigned int pagegrain;
2491
2492 if (mips_xpa_disabled) {
2493 pr_info("Extended Physical Addressing (XPA) disabled\n");
2494 return;
2495 }
2496
2497 pagegrain = read_c0_pagegrain();
2498 write_c0_pagegrain(pagegrain | PG_ELPA);
2499 back_to_back_c0_hazard();
2500 pagegrain = read_c0_pagegrain();
2501
2502 if (pagegrain & PG_ELPA)
2503 pr_info("Extended Physical Addressing (XPA) enabled\n");
2504 else
2505 panic("Extended Physical Addressing (XPA) disabled");
2506#endif
2507}
2508
Paul Burton00bf1c62015-09-22 11:42:52 -07002509static void check_pabits(void)
2510{
2511 unsigned long entry;
2512 unsigned pabits, fillbits;
2513
2514 if (!cpu_has_rixi || !_PAGE_NO_EXEC) {
2515 /*
2516 * We'll only be making use of the fact that we can rotate bits
2517 * into the fill if the CPU supports RIXI, so don't bother
2518 * probing this for CPUs which don't.
2519 */
2520 return;
2521 }
2522
2523 write_c0_entrylo0(~0ul);
2524 back_to_back_c0_hazard();
2525 entry = read_c0_entrylo0();
2526
2527 /* clear all non-PFN bits */
2528 entry &= ~((1 << MIPS_ENTRYLO_PFN_SHIFT) - 1);
2529 entry &= ~(MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI);
2530
2531 /* find a lower bound on PABITS, and upper bound on fill bits */
2532 pabits = fls_long(entry) + 6;
2533 fillbits = max_t(int, (int)BITS_PER_LONG - pabits, 0);
2534
2535 /* minus the RI & XI bits */
2536 fillbits -= min_t(unsigned, fillbits, 2);
2537
2538 if (fillbits >= ilog2(_PAGE_NO_EXEC))
2539 fill_includes_sw_bits = true;
2540
2541 pr_debug("Entry* registers contain %u fill bits\n", fillbits);
2542}
2543
Paul Gortmaker078a55f2013-06-18 13:38:59 +00002544void build_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07002545{
2546 /*
2547 * The refill handler is generated per-CPU, multi-node systems
2548 * may have local storage for it. The other handlers are only
2549 * needed once.
2550 */
2551 static int run_once = 0;
2552
Masahiro Yamada97f26452016-08-03 13:45:50 -07002553 if (IS_ENABLED(CONFIG_XPA) && !cpu_has_rixi)
Paul Burtone56c7e12016-04-19 09:25:11 +01002554 panic("Kernels supporting XPA currently require CPUs with RIXI");
2555
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002556 output_pgtable_bits_defines();
Paul Burton00bf1c62015-09-22 11:42:52 -07002557 check_pabits();
Ralf Baechlea2c763e2012-10-16 22:20:26 +02002558
David Daney1ec56322010-04-28 12:16:18 -07002559#ifdef CONFIG_64BIT
2560 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
2561#endif
2562
Ralf Baechle10cc3522007-10-11 23:46:15 +01002563 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07002564 case CPU_R2000:
2565 case CPU_R3000:
2566 case CPU_R3000A:
2567 case CPU_R3081E:
2568 case CPU_TX3912:
2569 case CPU_TX3922:
2570 case CPU_TX3927:
David Daney826222842009-10-14 12:16:56 -07002571#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Huacai Chen87599342013-03-17 11:49:38 +00002572 if (cpu_has_local_ebase)
2573 build_r3000_tlb_refill_handler();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002574 if (!run_once) {
Huacai Chen87599342013-03-17 11:49:38 +00002575 if (!cpu_has_local_ebase)
2576 build_r3000_tlb_refill_handler();
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05302577 build_setup_pgd();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002578 build_r3000_tlb_load_handler();
2579 build_r3000_tlb_store_handler();
2580 build_r3000_tlb_modify_handler();
Jonas Gorskia3d90862013-06-21 17:48:48 +00002581 flush_tlb_handlers();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002582 run_once++;
2583 }
David Daney826222842009-10-14 12:16:56 -07002584#else
2585 panic("No R3000 TLB refill handler");
2586#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07002587 break;
2588
2589 case CPU_R6000:
2590 case CPU_R6000A:
2591 panic("No R6000 TLB refill handler yet");
2592 break;
2593
2594 case CPU_R8000:
2595 panic("No R8000 TLB refill handler yet");
2596 break;
2597
2598 default:
Huacai Chen380cd582016-03-03 09:45:12 +08002599 if (cpu_has_ldpte)
2600 setup_pw();
2601
Linus Torvalds1da177e2005-04-16 15:20:36 -07002602 if (!run_once) {
David Daneybf286072011-07-05 16:34:46 -07002603 scratch_reg = allocate_kscratch();
Jayachandran Cf4ae17a2013-09-25 16:28:04 +05302604 build_setup_pgd();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002605 build_r4000_tlb_load_handler();
2606 build_r4000_tlb_store_handler();
2607 build_r4000_tlb_modify_handler();
Huacai Chen380cd582016-03-03 09:45:12 +08002608 if (cpu_has_ldpte)
2609 build_loongson3_tlb_refill_handler();
2610 else if (!cpu_has_local_ebase)
Huacai Chen87599342013-03-17 11:49:38 +00002611 build_r4000_tlb_refill_handler();
Jonas Gorskia3d90862013-06-21 17:48:48 +00002612 flush_tlb_handlers();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002613 run_once++;
2614 }
Huacai Chen87599342013-03-17 11:49:38 +00002615 if (cpu_has_local_ebase)
2616 build_r4000_tlb_refill_handler();
Steven J. Hillc5b36782015-02-26 18:16:38 -06002617 if (cpu_has_xpa)
2618 config_xpa_params();
Markos Chandrasf1014d12014-07-14 12:47:09 +01002619 if (cpu_has_htw)
2620 config_htw_params();
Linus Torvalds1da177e2005-04-16 15:20:36 -07002621 }
2622}