blob: 86f004dc83557240a18e02ca15bf2f17fe96005d [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
Thiemo Seufere30ec452008-01-28 20:05:38 +00008 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
David Daney95affdd2009-05-20 11:40:59 -07009 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
Ralf Baechle41c594a2006-04-05 09:45:45 +010010 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
David Daneyfd062c82009-05-27 17:47:44 -070011 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
Ralf Baechle41c594a2006-04-05 09:45:45 +010012 *
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
15 *
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
19 *
20 * (Condolences to Napoleon XIV)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
David Daney95affdd2009-05-20 11:40:59 -070023#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/types.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010026#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/string.h>
28#include <linux/init.h>
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/war.h>
Florian Fainelli3482d712010-01-28 15:21:24 +010032#include <asm/uasm.h>
Thiemo Seufere30ec452008-01-28 20:05:38 +000033
David Daney1ec56322010-04-28 12:16:18 -070034/*
35 * TLB load/store/modify handlers.
36 *
37 * Only the fastpath gets synthesized at runtime, the slowpath for
38 * do_page_fault remains normal asm.
39 */
40extern void tlb_do_page_fault_0(void);
41extern void tlb_do_page_fault_1(void);
42
43
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010044static inline int r45k_bvahwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 /* XXX: We should probe for the presence of this bug, but we don't. */
47 return 0;
48}
49
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010050static inline int r4k_250MHZhwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
52 /* XXX: We should probe for the presence of this bug, but we don't. */
53 return 0;
54}
55
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010056static inline int __maybe_unused bcm1250_m3_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 return BCM1250_M3_WAR;
59}
60
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010061static inline int __maybe_unused r10000_llsc_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
63 return R10000_LLSC_WAR;
64}
65
66/*
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +010067 * Found by experiment: At least some revisions of the 4kc throw under
68 * some circumstances a machine check exception, triggered by invalid
69 * values in the index register. Delaying the tlbp instruction until
70 * after the next branch, plus adding an additional nop in front of
71 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
72 * why; it's not an issue caused by the core RTL.
73 *
74 */
Ralf Baechle234fcd12008-03-08 09:56:28 +000075static int __cpuinit m4kc_tlbp_war(void)
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +010076{
77 return (current_cpu_data.processor_id & 0xffff00) ==
78 (PRID_COMP_MIPS | PRID_IMP_4KC);
79}
80
Thiemo Seufere30ec452008-01-28 20:05:38 +000081/* Handle labels (which must be positive integers). */
Linus Torvalds1da177e2005-04-16 15:20:36 -070082enum label_id {
Thiemo Seufere30ec452008-01-28 20:05:38 +000083 label_second_part = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 label_leave,
85 label_vmalloc,
86 label_vmalloc_done,
87 label_tlbw_hazard,
88 label_split,
David Daney6dd93442010-02-10 15:12:47 -080089 label_tlbl_goaround1,
90 label_tlbl_goaround2,
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 label_nopage_tlbl,
92 label_nopage_tlbs,
93 label_nopage_tlbm,
94 label_smp_pgtable_change,
95 label_r3000_write_probe_fail,
David Daney1ec56322010-04-28 12:16:18 -070096 label_large_segbits_fault,
David Daneyfd062c82009-05-27 17:47:44 -070097#ifdef CONFIG_HUGETLB_PAGE
98 label_tlb_huge_update,
99#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100};
101
Thiemo Seufere30ec452008-01-28 20:05:38 +0000102UASM_L_LA(_second_part)
103UASM_L_LA(_leave)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000104UASM_L_LA(_vmalloc)
105UASM_L_LA(_vmalloc_done)
106UASM_L_LA(_tlbw_hazard)
107UASM_L_LA(_split)
David Daney6dd93442010-02-10 15:12:47 -0800108UASM_L_LA(_tlbl_goaround1)
109UASM_L_LA(_tlbl_goaround2)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000110UASM_L_LA(_nopage_tlbl)
111UASM_L_LA(_nopage_tlbs)
112UASM_L_LA(_nopage_tlbm)
113UASM_L_LA(_smp_pgtable_change)
114UASM_L_LA(_r3000_write_probe_fail)
David Daney1ec56322010-04-28 12:16:18 -0700115UASM_L_LA(_large_segbits_fault)
David Daneyfd062c82009-05-27 17:47:44 -0700116#ifdef CONFIG_HUGETLB_PAGE
117UASM_L_LA(_tlb_huge_update)
118#endif
Atsushi Nemoto656be922006-10-26 00:08:31 +0900119
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200120/*
121 * For debug purposes.
122 */
123static inline void dump_handler(const u32 *handler, int count)
124{
125 int i;
126
127 pr_debug("\t.set push\n");
128 pr_debug("\t.set noreorder\n");
129
130 for (i = 0; i < count; i++)
131 pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
132
133 pr_debug("\t.set pop\n");
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* The only general purpose registers allowed in TLB handlers. */
137#define K0 26
138#define K1 27
139
140/* Some CP0 registers */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100141#define C0_INDEX 0, 0
142#define C0_ENTRYLO0 2, 0
143#define C0_TCBIND 2, 2
144#define C0_ENTRYLO1 3, 0
145#define C0_CONTEXT 4, 0
David Daneyfd062c82009-05-27 17:47:44 -0700146#define C0_PAGEMASK 5, 0
Ralf Baechle41c594a2006-04-05 09:45:45 +0100147#define C0_BADVADDR 8, 0
148#define C0_ENTRYHI 10, 0
149#define C0_EPC 14, 0
150#define C0_XCONTEXT 20, 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Ralf Baechle875d43e2005-09-03 15:56:16 -0700152#ifdef CONFIG_64BIT
Thiemo Seufere30ec452008-01-28 20:05:38 +0000153# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000155# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#endif
157
158/* The worst case length of the handler is around 18 instructions for
159 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
160 * Maximum space available is 32 instructions for R3000 and 64
161 * instructions for R4000.
162 *
163 * We deliberately chose a buffer size of 128, so we won't scribble
164 * over anything important on overflow before we panic.
165 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000166static u32 tlb_handler[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/* simply assume worst case size for labels and relocs */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000169static struct uasm_label labels[128] __cpuinitdata;
170static struct uasm_reloc relocs[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
David Daney1ec56322010-04-28 12:16:18 -0700172#ifdef CONFIG_64BIT
173static int check_for_high_segbits __cpuinitdata;
174#endif
175
David Daney826222842009-10-14 12:16:56 -0700176#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
177/*
178 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
179 * we cannot do r3000 under these circumstances.
180 */
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/*
183 * The R3000 TLB handler is simple.
184 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000185static void __cpuinit build_r3000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 long pgdc = (long)pgd_current;
188 u32 *p;
189
190 memset(tlb_handler, 0, sizeof(tlb_handler));
191 p = tlb_handler;
192
Thiemo Seufere30ec452008-01-28 20:05:38 +0000193 uasm_i_mfc0(&p, K0, C0_BADVADDR);
194 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
195 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
196 uasm_i_srl(&p, K0, K0, 22); /* load delay */
197 uasm_i_sll(&p, K0, K0, 2);
198 uasm_i_addu(&p, K1, K1, K0);
199 uasm_i_mfc0(&p, K0, C0_CONTEXT);
200 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
201 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
202 uasm_i_addu(&p, K1, K1, K0);
203 uasm_i_lw(&p, K0, 0, K1);
204 uasm_i_nop(&p); /* load delay */
205 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
206 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
207 uasm_i_tlbwr(&p); /* cp0 delay */
208 uasm_i_jr(&p, K1);
209 uasm_i_rfe(&p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 if (p > tlb_handler + 32)
212 panic("TLB refill handler space exceeded");
213
Thiemo Seufere30ec452008-01-28 20:05:38 +0000214 pr_debug("Wrote TLB refill handler (%u instructions).\n",
215 (unsigned int)(p - tlb_handler));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Ralf Baechle91b05e62006-03-29 18:53:00 +0100217 memcpy((void *)ebase, tlb_handler, 0x80);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200218
219 dump_handler((u32 *)ebase, 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
David Daney826222842009-10-14 12:16:56 -0700221#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223/*
224 * The R4000 TLB handler is much more complicated. We have two
225 * consecutive handler areas with 32 instructions space each.
226 * Since they aren't used at the same time, we can overflow in the
227 * other one.To keep things simple, we first assume linear space,
228 * then we relocate it to the final handler layout as needed.
229 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000230static u32 final_handler[64] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232/*
233 * Hazards
234 *
235 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
236 * 2. A timing hazard exists for the TLBP instruction.
237 *
238 * stalling_instruction
239 * TLBP
240 *
241 * The JTLB is being read for the TLBP throughout the stall generated by the
242 * previous instruction. This is not really correct as the stalling instruction
243 * can modify the address used to access the JTLB. The failure symptom is that
244 * the TLBP instruction will use an address created for the stalling instruction
245 * and not the address held in C0_ENHI and thus report the wrong results.
246 *
247 * The software work-around is to not allow the instruction preceding the TLBP
248 * to stall - make it an NOP or some other instruction guaranteed not to stall.
249 *
250 * Errata 2 will not be fixed. This errata is also on the R5000.
251 *
252 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
253 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000254static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255{
Ralf Baechle10cc3522007-10-11 23:46:15 +0100256 switch (current_cpu_type()) {
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200257 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
Thiemo Seuferf5b4d952005-09-09 17:11:50 +0000258 case CPU_R4600:
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200259 case CPU_R4700:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 case CPU_R5000:
261 case CPU_R5000A:
262 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000263 uasm_i_nop(p);
264 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 break;
266
267 default:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000268 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 break;
270 }
271}
272
273/*
274 * Write random or indexed TLB entry, and care about the hazards from
275 * the preceeding mtc0 and for the following eret.
276 */
277enum tlb_write_entry { tlb_random, tlb_indexed };
278
Ralf Baechle234fcd12008-03-08 09:56:28 +0000279static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
Thiemo Seufere30ec452008-01-28 20:05:38 +0000280 struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 enum tlb_write_entry wmode)
282{
283 void(*tlbw)(u32 **) = NULL;
284
285 switch (wmode) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000286 case tlb_random: tlbw = uasm_i_tlbwr; break;
287 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 }
289
Ralf Baechle161548b2008-01-29 10:14:54 +0000290 if (cpu_has_mips_r2) {
David Daney41f0e4d2009-05-12 12:41:53 -0700291 if (cpu_has_mips_r2_exec_hazard)
292 uasm_i_ehb(p);
Ralf Baechle161548b2008-01-29 10:14:54 +0000293 tlbw(p);
294 return;
295 }
296
Ralf Baechle10cc3522007-10-11 23:46:15 +0100297 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 case CPU_R4000PC:
299 case CPU_R4000SC:
300 case CPU_R4000MC:
301 case CPU_R4400PC:
302 case CPU_R4400SC:
303 case CPU_R4400MC:
304 /*
305 * This branch uses up a mtc0 hazard nop slot and saves
306 * two nops after the tlbw instruction.
307 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000308 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000310 uasm_l_tlbw_hazard(l, *p);
311 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 break;
313
314 case CPU_R4600:
315 case CPU_R4700:
316 case CPU_R5000:
317 case CPU_R5000A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000318 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000319 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000320 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000321 break;
322
323 case CPU_R4300:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 case CPU_5KC:
325 case CPU_TX49XX:
Pete Popovbdf21b12005-07-14 17:47:57 +0000326 case CPU_PR4450:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000327 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 tlbw(p);
329 break;
330
331 case CPU_R10000:
332 case CPU_R12000:
Kumba44d921b2006-05-16 22:23:59 -0400333 case CPU_R14000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 case CPU_4KC:
Thomas Bogendoerferb1ec4c82008-03-26 16:42:54 +0100335 case CPU_4KEC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 case CPU_SB1:
Andrew Isaacson93ce2f522005-10-19 23:56:20 -0700337 case CPU_SB1A:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 case CPU_4KSC:
339 case CPU_20KC:
340 case CPU_25KF:
Aurelien Jarno1c0c13e2007-09-25 15:40:12 +0200341 case CPU_BCM3302:
342 case CPU_BCM4710:
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800343 case CPU_LOONGSON2:
Maxime Bizon0de663e2009-08-18 13:23:37 +0100344 case CPU_BCM6338:
345 case CPU_BCM6345:
346 case CPU_BCM6348:
347 case CPU_BCM6358:
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900348 case CPU_R5500:
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100349 if (m4kc_tlbp_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +0000350 uasm_i_nop(p);
Manuel Lauss2f794d02009-03-25 17:49:30 +0100351 case CPU_ALCHEMY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700352 tlbw(p);
353 break;
354
355 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000356 uasm_i_nop(p); /* QED specifies 2 nops hazard */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700357 /*
358 * This branch uses up a mtc0 hazard nop slot and saves
359 * a nop after the tlbw instruction.
360 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000361 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700362 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000363 uasm_l_tlbw_hazard(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700364 break;
365
366 case CPU_RM7000:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000367 uasm_i_nop(p);
368 uasm_i_nop(p);
369 uasm_i_nop(p);
370 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700371 tlbw(p);
372 break;
373
Linus Torvalds1da177e2005-04-16 15:20:36 -0700374 case CPU_RM9000:
375 /*
376 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
377 * use of the JTLB for instructions should not occur for 4
378 * cpu cycles and use for data translations should not occur
379 * for 3 cpu cycles.
380 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000381 uasm_i_ssnop(p);
382 uasm_i_ssnop(p);
383 uasm_i_ssnop(p);
384 uasm_i_ssnop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700385 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000386 uasm_i_ssnop(p);
387 uasm_i_ssnop(p);
388 uasm_i_ssnop(p);
389 uasm_i_ssnop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700390 break;
391
392 case CPU_VR4111:
393 case CPU_VR4121:
394 case CPU_VR4122:
395 case CPU_VR4181:
396 case CPU_VR4181A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000397 uasm_i_nop(p);
398 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700399 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000400 uasm_i_nop(p);
401 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700402 break;
403
404 case CPU_VR4131:
405 case CPU_VR4133:
Ralf Baechle7623deb2005-08-29 16:49:55 +0000406 case CPU_R5432:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000407 uasm_i_nop(p);
408 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700409 tlbw(p);
410 break;
411
412 default:
413 panic("No TLB refill handler yet (CPU type: %d)",
414 current_cpu_data.cputype);
415 break;
416 }
417}
418
David Daney6dd93442010-02-10 15:12:47 -0800419static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
420 unsigned int reg)
421{
422 if (kernel_uses_smartmips_rixi) {
423 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
424 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
425 } else {
426#ifdef CONFIG_64BIT_PHYS_ADDR
David Daney3be60222010-04-28 12:16:17 -0700427 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -0800428#else
429 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
430#endif
431 }
432}
433
David Daneyfd062c82009-05-27 17:47:44 -0700434#ifdef CONFIG_HUGETLB_PAGE
David Daney6dd93442010-02-10 15:12:47 -0800435
436static __cpuinit void build_restore_pagemask(u32 **p,
437 struct uasm_reloc **r,
438 unsigned int tmp,
439 enum label_id lid)
440{
441 /* Reset default page size */
442 if (PM_DEFAULT_MASK >> 16) {
443 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
444 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
445 uasm_il_b(p, r, lid);
446 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
447 } else if (PM_DEFAULT_MASK) {
448 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
449 uasm_il_b(p, r, lid);
450 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
451 } else {
452 uasm_il_b(p, r, lid);
453 uasm_i_mtc0(p, 0, C0_PAGEMASK);
454 }
455}
456
David Daneyfd062c82009-05-27 17:47:44 -0700457static __cpuinit void build_huge_tlb_write_entry(u32 **p,
458 struct uasm_label **l,
459 struct uasm_reloc **r,
460 unsigned int tmp,
461 enum tlb_write_entry wmode)
462{
463 /* Set huge page tlb entry size */
464 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
465 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
466 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
467
468 build_tlb_write_entry(p, l, r, wmode);
469
David Daney6dd93442010-02-10 15:12:47 -0800470 build_restore_pagemask(p, r, tmp, label_leave);
David Daneyfd062c82009-05-27 17:47:44 -0700471}
472
473/*
474 * Check if Huge PTE is present, if so then jump to LABEL.
475 */
476static void __cpuinit
477build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
478 unsigned int pmd, int lid)
479{
480 UASM_i_LW(p, tmp, 0, pmd);
481 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
482 uasm_il_bnez(p, r, tmp, lid);
483}
484
485static __cpuinit void build_huge_update_entries(u32 **p,
486 unsigned int pte,
487 unsigned int tmp)
488{
489 int small_sequence;
490
491 /*
492 * A huge PTE describes an area the size of the
493 * configured huge page size. This is twice the
494 * of the large TLB entry size we intend to use.
495 * A TLB entry half the size of the configured
496 * huge page size is configured into entrylo0
497 * and entrylo1 to cover the contiguous huge PTE
498 * address space.
499 */
500 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
501
502 /* We can clobber tmp. It isn't used after this.*/
503 if (!small_sequence)
504 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
505
David Daney6dd93442010-02-10 15:12:47 -0800506 build_convert_pte_to_entrylo(p, pte);
David Daney9b8c3892010-02-10 15:12:44 -0800507 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700508 /* convert to entrylo1 */
509 if (small_sequence)
510 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
511 else
512 UASM_i_ADDU(p, pte, pte, tmp);
513
David Daney9b8c3892010-02-10 15:12:44 -0800514 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700515}
516
517static __cpuinit void build_huge_handler_tail(u32 **p,
518 struct uasm_reloc **r,
519 struct uasm_label **l,
520 unsigned int pte,
521 unsigned int ptr)
522{
523#ifdef CONFIG_SMP
524 UASM_i_SC(p, pte, 0, ptr);
525 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
526 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
527#else
528 UASM_i_SW(p, pte, 0, ptr);
529#endif
530 build_huge_update_entries(p, pte, ptr);
531 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
532}
533#endif /* CONFIG_HUGETLB_PAGE */
534
Ralf Baechle875d43e2005-09-03 15:56:16 -0700535#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700536/*
537 * TMP and PTR are scratch.
538 * TMP will be clobbered, PTR will hold the pmd entry.
539 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000540static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000541build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700542 unsigned int tmp, unsigned int ptr)
543{
David Daney826222842009-10-14 12:16:56 -0700544#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700545 long pgdc = (long)pgd_current;
David Daney826222842009-10-14 12:16:56 -0700546#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700547 /*
548 * The vmalloc handling is not in the hotpath.
549 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000550 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
David Daney1ec56322010-04-28 12:16:18 -0700551
552 if (check_for_high_segbits) {
553 /*
554 * The kernel currently implicitely assumes that the
555 * MIPS SEGBITS parameter for the processor is
556 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
557 * allocate virtual addresses outside the maximum
558 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
559 * that doesn't prevent user code from accessing the
560 * higher xuseg addresses. Here, we make sure that
561 * everything but the lower xuseg addresses goes down
562 * the module_alloc/vmalloc path.
563 */
564 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
565 uasm_il_bnez(p, r, ptr, label_vmalloc);
566 } else {
567 uasm_il_bltz(p, r, tmp, label_vmalloc);
568 }
Thiemo Seufere30ec452008-01-28 20:05:38 +0000569 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700570
David Daney826222842009-10-14 12:16:56 -0700571#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
572 /*
573 * &pgd << 11 stored in CONTEXT [23..63].
574 */
575 UASM_i_MFC0(p, ptr, C0_CONTEXT);
576 uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */
577 uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */
578 uasm_i_drotr(p, ptr, ptr, 11);
579#elif defined(CONFIG_SMP)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100580# ifdef CONFIG_MIPS_MT_SMTC
581 /*
582 * SMTC uses TCBind value as "CPU" index
583 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000584 uasm_i_mfc0(p, ptr, C0_TCBIND);
David Daney3be60222010-04-28 12:16:17 -0700585 uasm_i_dsrl_safe(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100586# else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700587 /*
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000588 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700589 * stored in CONTEXT.
590 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000591 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
David Daney3be60222010-04-28 12:16:17 -0700592 uasm_i_dsrl_safe(p, ptr, ptr, 23);
David Daney826222842009-10-14 12:16:56 -0700593# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000594 UASM_i_LA_mostly(p, tmp, pgdc);
595 uasm_i_daddu(p, ptr, ptr, tmp);
596 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
597 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700598#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000599 UASM_i_LA_mostly(p, ptr, pgdc);
600 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700601#endif
602
Thiemo Seufere30ec452008-01-28 20:05:38 +0000603 uasm_l_vmalloc_done(l, *p);
Ralf Baechle242954b2006-10-24 02:29:01 +0100604
David Daney3be60222010-04-28 12:16:17 -0700605 /* get pgd offset in bytes */
606 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
Ralf Baechle242954b2006-10-24 02:29:01 +0100607
Thiemo Seufere30ec452008-01-28 20:05:38 +0000608 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
609 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
David Daney325f8a02009-12-04 13:52:36 -0800610#ifndef __PAGETABLE_PMD_FOLDED
Thiemo Seufere30ec452008-01-28 20:05:38 +0000611 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
612 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
David Daney3be60222010-04-28 12:16:17 -0700613 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000614 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
615 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
David Daney325f8a02009-12-04 13:52:36 -0800616#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700617}
618
David Daney1ec56322010-04-28 12:16:18 -0700619enum vmalloc64_mode {not_refill, refill};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700620/*
621 * BVADDR is the faulting address, PTR is scratch.
622 * PTR will hold the pgd for vmalloc.
623 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000624static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000625build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
David Daney1ec56322010-04-28 12:16:18 -0700626 unsigned int bvaddr, unsigned int ptr,
627 enum vmalloc64_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700628{
629 long swpd = (long)swapper_pg_dir;
David Daney1ec56322010-04-28 12:16:18 -0700630 int single_insn_swpd;
631 int did_vmalloc_branch = 0;
632
633 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700634
Thiemo Seufere30ec452008-01-28 20:05:38 +0000635 uasm_l_vmalloc(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700636
David Daney1ec56322010-04-28 12:16:18 -0700637 if (mode == refill && check_for_high_segbits) {
638 if (single_insn_swpd) {
639 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
640 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
641 did_vmalloc_branch = 1;
642 /* fall through */
643 } else {
644 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
645 }
646 }
647 if (!did_vmalloc_branch) {
648 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
649 uasm_il_b(p, r, label_vmalloc_done);
650 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
651 } else {
652 UASM_i_LA_mostly(p, ptr, swpd);
653 uasm_il_b(p, r, label_vmalloc_done);
654 if (uasm_in_compat_space_p(swpd))
655 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
656 else
657 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
658 }
659 }
660 if (mode == refill && check_for_high_segbits) {
661 uasm_l_large_segbits_fault(l, *p);
662 /*
663 * We get here if we are an xsseg address, or if we are
664 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
665 *
666 * Ignoring xsseg (assume disabled so would generate
667 * (address errors?), the only remaining possibility
668 * is the upper xuseg addresses. On processors with
669 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
670 * addresses would have taken an address error. We try
671 * to mimic that here by taking a load/istream page
672 * fault.
673 */
674 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
675 uasm_i_jr(p, ptr);
676 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700677 }
678}
679
Ralf Baechle875d43e2005-09-03 15:56:16 -0700680#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681
682/*
683 * TMP and PTR are scratch.
684 * TMP will be clobbered, PTR will hold the pgd entry.
685 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000686static void __cpuinit __maybe_unused
Linus Torvalds1da177e2005-04-16 15:20:36 -0700687build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
688{
689 long pgdc = (long)pgd_current;
690
691 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
692#ifdef CONFIG_SMP
Ralf Baechle41c594a2006-04-05 09:45:45 +0100693#ifdef CONFIG_MIPS_MT_SMTC
694 /*
695 * SMTC uses TCBind value as "CPU" index
696 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000697 uasm_i_mfc0(p, ptr, C0_TCBIND);
698 UASM_i_LA_mostly(p, tmp, pgdc);
699 uasm_i_srl(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100700#else
701 /*
702 * smp_processor_id() << 3 is stored in CONTEXT.
703 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000704 uasm_i_mfc0(p, ptr, C0_CONTEXT);
705 UASM_i_LA_mostly(p, tmp, pgdc);
706 uasm_i_srl(p, ptr, ptr, 23);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100707#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000708 uasm_i_addu(p, ptr, tmp, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700709#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000710 UASM_i_LA_mostly(p, ptr, pgdc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700711#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000712 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
713 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
714 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
715 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
716 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700717}
718
Ralf Baechle875d43e2005-09-03 15:56:16 -0700719#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700720
Ralf Baechle234fcd12008-03-08 09:56:28 +0000721static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700722{
Ralf Baechle242954b2006-10-24 02:29:01 +0100723 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
725
Ralf Baechle10cc3522007-10-11 23:46:15 +0100726 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700727 case CPU_VR41XX:
728 case CPU_VR4111:
729 case CPU_VR4121:
730 case CPU_VR4122:
731 case CPU_VR4131:
732 case CPU_VR4181:
733 case CPU_VR4181A:
734 case CPU_VR4133:
735 shift += 2;
736 break;
737
738 default:
739 break;
740 }
741
742 if (shift)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000743 UASM_i_SRL(p, ctx, ctx, shift);
744 uasm_i_andi(p, ctx, ctx, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700745}
746
Ralf Baechle234fcd12008-03-08 09:56:28 +0000747static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700748{
749 /*
750 * Bug workaround for the Nevada. It seems as if under certain
751 * circumstances the move from cp0_context might produce a
752 * bogus result when the mfc0 instruction and its consumer are
753 * in a different cacheline or a load instruction, probably any
754 * memory reference, is between them.
755 */
Ralf Baechle10cc3522007-10-11 23:46:15 +0100756 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700757 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000758 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700759 GET_CONTEXT(p, tmp); /* get context reg */
760 break;
761
762 default:
763 GET_CONTEXT(p, tmp); /* get context reg */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000764 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700765 break;
766 }
767
768 build_adjust_context(p, tmp);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000769 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700770}
771
Ralf Baechle234fcd12008-03-08 09:56:28 +0000772static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700773 unsigned int ptep)
774{
775 /*
776 * 64bit address support (36bit on a 32bit CPU) in a 32bit
777 * Kernel is a special case. Only a few CPUs use it.
778 */
779#ifdef CONFIG_64BIT_PHYS_ADDR
780 if (cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000781 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
782 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
David Daney6dd93442010-02-10 15:12:47 -0800783 if (kernel_uses_smartmips_rixi) {
784 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
785 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
786 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
787 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
788 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
789 } else {
David Daney3be60222010-04-28 12:16:17 -0700790 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
David Daney6dd93442010-02-10 15:12:47 -0800791 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
David Daney3be60222010-04-28 12:16:17 -0700792 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
David Daney6dd93442010-02-10 15:12:47 -0800793 }
David Daney9b8c3892010-02-10 15:12:44 -0800794 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700795 } else {
796 int pte_off_even = sizeof(pte_t) / 2;
797 int pte_off_odd = pte_off_even + sizeof(pte_t);
798
799 /* The pte entries are pre-shifted */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000800 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
David Daney9b8c3892010-02-10 15:12:44 -0800801 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000802 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
David Daney9b8c3892010-02-10 15:12:44 -0800803 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700804 }
805#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000806 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
807 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 if (r45k_bvahwbug())
809 build_tlb_probe_entry(p);
David Daney6dd93442010-02-10 15:12:47 -0800810 if (kernel_uses_smartmips_rixi) {
811 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
812 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
813 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
814 if (r4k_250MHZhwbug())
815 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
816 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
817 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
818 } else {
819 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
820 if (r4k_250MHZhwbug())
821 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
822 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
823 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
824 if (r45k_bvahwbug())
825 uasm_i_mfc0(p, tmp, C0_INDEX);
826 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700827 if (r4k_250MHZhwbug())
David Daney9b8c3892010-02-10 15:12:44 -0800828 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
829 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700830#endif
831}
832
David Daneye6f72d32009-05-20 11:40:58 -0700833/*
834 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
835 * because EXL == 0. If we wrap, we can also use the 32 instruction
836 * slots before the XTLB refill exception handler which belong to the
837 * unused TLB refill exception.
838 */
839#define MIPS64_REFILL_INSNS 32
840
Ralf Baechle234fcd12008-03-08 09:56:28 +0000841static void __cpuinit build_r4000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700842{
843 u32 *p = tlb_handler;
Thiemo Seufere30ec452008-01-28 20:05:38 +0000844 struct uasm_label *l = labels;
845 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846 u32 *f;
847 unsigned int final_len;
848
849 memset(tlb_handler, 0, sizeof(tlb_handler));
850 memset(labels, 0, sizeof(labels));
851 memset(relocs, 0, sizeof(relocs));
852 memset(final_handler, 0, sizeof(final_handler));
853
854 /*
855 * create the plain linear handler
856 */
857 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +0100858 unsigned int segbits = 44;
859
860 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
861 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000862 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -0700863 uasm_i_dsrl_safe(&p, K1, K0, 62);
864 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
865 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +0100866 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000867 uasm_il_bnez(&p, &r, K0, label_leave);
868 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700869 }
870
Ralf Baechle875d43e2005-09-03 15:56:16 -0700871#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700872 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
873#else
874 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
875#endif
876
David Daneyfd062c82009-05-27 17:47:44 -0700877#ifdef CONFIG_HUGETLB_PAGE
878 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
879#endif
880
Linus Torvalds1da177e2005-04-16 15:20:36 -0700881 build_get_ptep(&p, K0, K1);
882 build_update_entries(&p, K0, K1);
883 build_tlb_write_entry(&p, &l, &r, tlb_random);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000884 uasm_l_leave(&l, p);
885 uasm_i_eret(&p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700886
David Daneyfd062c82009-05-27 17:47:44 -0700887#ifdef CONFIG_HUGETLB_PAGE
888 uasm_l_tlb_huge_update(&l, p);
889 UASM_i_LW(&p, K0, 0, K1);
890 build_huge_update_entries(&p, K0, K1);
891 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
892#endif
893
Ralf Baechle875d43e2005-09-03 15:56:16 -0700894#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -0700895 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700896#endif
897
898 /*
899 * Overflow check: For the 64bit handler, we need at least one
900 * free instruction slot for the wrap-around branch. In worst
901 * case, if the intended insertion point is a delay slot, we
Matt LaPlante4b3f6862006-10-03 22:21:02 +0200902 * need three, with the second nop'ed and the third being
Linus Torvalds1da177e2005-04-16 15:20:36 -0700903 * unused.
904 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800905 /* Loongson2 ebase is different than r4k, we have more space */
906#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 if ((p - tlb_handler) > 64)
908 panic("TLB refill handler space exceeded");
909#else
David Daneye6f72d32009-05-20 11:40:58 -0700910 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
911 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
912 && uasm_insn_has_bdelay(relocs,
913 tlb_handler + MIPS64_REFILL_INSNS - 3)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700914 panic("TLB refill handler space exceeded");
915#endif
916
917 /*
918 * Now fold the handler in the TLB refill handler space.
919 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800920#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700921 f = final_handler;
922 /* Simplest case, just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000923 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700924 final_len = p - tlb_handler;
Ralf Baechle875d43e2005-09-03 15:56:16 -0700925#else /* CONFIG_64BIT */
David Daneye6f72d32009-05-20 11:40:58 -0700926 f = final_handler + MIPS64_REFILL_INSNS;
927 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 /* Just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000929 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700930 final_len = p - tlb_handler;
931 } else {
David Daneyfd062c82009-05-27 17:47:44 -0700932#if defined(CONFIG_HUGETLB_PAGE)
933 const enum label_id ls = label_tlb_huge_update;
David Daney95affdd2009-05-20 11:40:59 -0700934#else
935 const enum label_id ls = label_vmalloc;
936#endif
937 u32 *split;
938 int ov = 0;
939 int i;
940
941 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
942 ;
943 BUG_ON(i == ARRAY_SIZE(labels));
944 split = labels[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700945
946 /*
David Daney95affdd2009-05-20 11:40:59 -0700947 * See if we have overflown one way or the other.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700948 */
David Daney95affdd2009-05-20 11:40:59 -0700949 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
950 split < p - MIPS64_REFILL_INSNS)
951 ov = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952
David Daney95affdd2009-05-20 11:40:59 -0700953 if (ov) {
954 /*
955 * Split two instructions before the end. One
956 * for the branch and one for the instruction
957 * in the delay slot.
958 */
959 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
960
961 /*
962 * If the branch would fall in a delay slot,
963 * we must back up an additional instruction
964 * so that it is no longer in a delay slot.
965 */
966 if (uasm_insn_has_bdelay(relocs, split - 1))
967 split--;
968 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700969 /* Copy first part of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000970 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700971 f += split - tlb_handler;
972
David Daney95affdd2009-05-20 11:40:59 -0700973 if (ov) {
974 /* Insert branch. */
975 uasm_l_split(&l, final_handler);
976 uasm_il_b(&f, &r, label_split);
977 if (uasm_insn_has_bdelay(relocs, split))
978 uasm_i_nop(&f);
979 else {
980 uasm_copy_handler(relocs, labels,
981 split, split + 1, f);
982 uasm_move_labels(labels, f, f + 1, -1);
983 f++;
984 split++;
985 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700986 }
987
988 /* Copy the rest of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000989 uasm_copy_handler(relocs, labels, split, p, final_handler);
David Daneye6f72d32009-05-20 11:40:58 -0700990 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
991 (p - split);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700992 }
Ralf Baechle875d43e2005-09-03 15:56:16 -0700993#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700994
Thiemo Seufere30ec452008-01-28 20:05:38 +0000995 uasm_resolve_relocs(relocs, labels);
996 pr_debug("Wrote TLB refill handler (%u instructions).\n",
997 final_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Ralf Baechle91b05e62006-03-29 18:53:00 +0100999 memcpy((void *)ebase, final_handler, 0x100);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001000
1001 dump_handler((u32 *)ebase, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002}
1003
1004/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001005 * 128 instructions for the fastpath handler is generous and should
1006 * never be exceeded.
1007 */
1008#define FASTPATH_SIZE 128
1009
Franck Bui-Huucbdbe072007-10-18 09:11:16 +02001010u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1011u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1012u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001013
Ralf Baechle234fcd12008-03-08 09:56:28 +00001014static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001015iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001016{
1017#ifdef CONFIG_SMP
1018# ifdef CONFIG_64BIT_PHYS_ADDR
1019 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001020 uasm_i_lld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001021 else
1022# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001023 UASM_i_LL(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001024#else
1025# ifdef CONFIG_64BIT_PHYS_ADDR
1026 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001027 uasm_i_ld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028 else
1029# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001030 UASM_i_LW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001031#endif
1032}
1033
Ralf Baechle234fcd12008-03-08 09:56:28 +00001034static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001035iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001036 unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001037{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001038#ifdef CONFIG_64BIT_PHYS_ADDR
1039 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1040#endif
1041
Thiemo Seufere30ec452008-01-28 20:05:38 +00001042 uasm_i_ori(p, pte, pte, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001043#ifdef CONFIG_SMP
1044# ifdef CONFIG_64BIT_PHYS_ADDR
1045 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001046 uasm_i_scd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047 else
1048# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001049 UASM_i_SC(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001050
1051 if (r10000_llsc_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +00001052 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001053 else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001054 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001055
1056# ifdef CONFIG_64BIT_PHYS_ADDR
1057 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001058 /* no uasm_i_nop needed */
1059 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1060 uasm_i_ori(p, pte, pte, hwmode);
1061 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1062 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1063 /* no uasm_i_nop needed */
1064 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001065 } else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001066 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001067# else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001068 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069# endif
1070#else
1071# ifdef CONFIG_64BIT_PHYS_ADDR
1072 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001073 uasm_i_sd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001074 else
1075# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001076 UASM_i_SW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001077
1078# ifdef CONFIG_64BIT_PHYS_ADDR
1079 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001080 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1081 uasm_i_ori(p, pte, pte, hwmode);
1082 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1083 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001084 }
1085# endif
1086#endif
1087}
1088
1089/*
1090 * Check if PTE is present, if not then jump to LABEL. PTR points to
1091 * the page table where this PTE is located, PTE will be re-loaded
1092 * with it's original value.
1093 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001094static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001095build_pte_present(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001096 unsigned int pte, unsigned int ptr, enum label_id lid)
1097{
David Daney6dd93442010-02-10 15:12:47 -08001098 if (kernel_uses_smartmips_rixi) {
1099 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
1100 uasm_il_beqz(p, r, pte, lid);
1101 } else {
1102 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1103 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1104 uasm_il_bnez(p, r, pte, lid);
1105 }
David Daneybd1437e2009-05-08 15:10:50 -07001106 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001107}
1108
1109/* Make PTE valid, store result in PTR. */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001110static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001111build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001112 unsigned int ptr)
1113{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001114 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1115
1116 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001117}
1118
1119/*
1120 * Check if PTE can be written to, if not branch to LABEL. Regardless
1121 * restore PTE with value from PTR when done.
1122 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001123static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001124build_pte_writable(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001125 unsigned int pte, unsigned int ptr, enum label_id lid)
1126{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001127 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1128 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1129 uasm_il_bnez(p, r, pte, lid);
David Daneybd1437e2009-05-08 15:10:50 -07001130 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001131}
1132
1133/* Make PTE writable, update software status bits as well, then store
1134 * at PTR.
1135 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001136static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001137build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001138 unsigned int ptr)
1139{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001140 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1141 | _PAGE_DIRTY);
1142
1143 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001144}
1145
1146/*
1147 * Check if PTE can be modified, if not branch to LABEL. Regardless
1148 * restore PTE with value from PTR when done.
1149 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001150static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001151build_pte_modifiable(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001152 unsigned int pte, unsigned int ptr, enum label_id lid)
1153{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001154 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
1155 uasm_il_beqz(p, r, pte, lid);
David Daneybd1437e2009-05-08 15:10:50 -07001156 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001157}
1158
David Daney826222842009-10-14 12:16:56 -07001159#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001160/*
1161 * R3000 style TLB load/store/modify handlers.
1162 */
1163
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001164/*
1165 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1166 * Then it returns.
1167 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001168static void __cpuinit
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001169build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001170{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001171 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1172 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1173 uasm_i_tlbwi(p);
1174 uasm_i_jr(p, tmp);
1175 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001176}
1177
1178/*
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001179 * This places the pte into ENTRYLO0 and writes it with tlbwi
1180 * or tlbwr as appropriate. This is because the index register
1181 * may have the probe fail bit set as a result of a trap on a
1182 * kseg2 access, i.e. without refill. Then it returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001183 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001184static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001185build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1186 struct uasm_reloc **r, unsigned int pte,
1187 unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001188{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001189 uasm_i_mfc0(p, tmp, C0_INDEX);
1190 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1191 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1192 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1193 uasm_i_tlbwi(p); /* cp0 delay */
1194 uasm_i_jr(p, tmp);
1195 uasm_i_rfe(p); /* branch delay */
1196 uasm_l_r3000_write_probe_fail(l, *p);
1197 uasm_i_tlbwr(p); /* cp0 delay */
1198 uasm_i_jr(p, tmp);
1199 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001200}
1201
Ralf Baechle234fcd12008-03-08 09:56:28 +00001202static void __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07001203build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1204 unsigned int ptr)
1205{
1206 long pgdc = (long)pgd_current;
1207
Thiemo Seufere30ec452008-01-28 20:05:38 +00001208 uasm_i_mfc0(p, pte, C0_BADVADDR);
1209 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1210 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1211 uasm_i_srl(p, pte, pte, 22); /* load delay */
1212 uasm_i_sll(p, pte, pte, 2);
1213 uasm_i_addu(p, ptr, ptr, pte);
1214 uasm_i_mfc0(p, pte, C0_CONTEXT);
1215 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1216 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1217 uasm_i_addu(p, ptr, ptr, pte);
1218 uasm_i_lw(p, pte, 0, ptr);
1219 uasm_i_tlbp(p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001220}
1221
Ralf Baechle234fcd12008-03-08 09:56:28 +00001222static void __cpuinit build_r3000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001223{
1224 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001225 struct uasm_label *l = labels;
1226 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227
1228 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1229 memset(labels, 0, sizeof(labels));
1230 memset(relocs, 0, sizeof(relocs));
1231
1232 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001233 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001234 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001235 build_make_valid(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001236 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001237
Thiemo Seufere30ec452008-01-28 20:05:38 +00001238 uasm_l_nopage_tlbl(&l, p);
1239 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1240 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
1242 if ((p - handle_tlbl) > FASTPATH_SIZE)
1243 panic("TLB load handler fastpath space exceeded");
1244
Thiemo Seufere30ec452008-01-28 20:05:38 +00001245 uasm_resolve_relocs(relocs, labels);
1246 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1247 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001248
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001249 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001250}
1251
Ralf Baechle234fcd12008-03-08 09:56:28 +00001252static void __cpuinit build_r3000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001253{
1254 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001255 struct uasm_label *l = labels;
1256 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257
1258 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1259 memset(labels, 0, sizeof(labels));
1260 memset(relocs, 0, sizeof(relocs));
1261
1262 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001263 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001264 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001265 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001266 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001267
Thiemo Seufere30ec452008-01-28 20:05:38 +00001268 uasm_l_nopage_tlbs(&l, p);
1269 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1270 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
1272 if ((p - handle_tlbs) > FASTPATH_SIZE)
1273 panic("TLB store handler fastpath space exceeded");
1274
Thiemo Seufere30ec452008-01-28 20:05:38 +00001275 uasm_resolve_relocs(relocs, labels);
1276 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1277 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001278
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001279 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001280}
1281
Ralf Baechle234fcd12008-03-08 09:56:28 +00001282static void __cpuinit build_r3000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001283{
1284 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001285 struct uasm_label *l = labels;
1286 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287
1288 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1289 memset(labels, 0, sizeof(labels));
1290 memset(relocs, 0, sizeof(relocs));
1291
1292 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001293 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001294 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001295 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001296 build_r3000_pte_reload_tlbwi(&p, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001297
Thiemo Seufere30ec452008-01-28 20:05:38 +00001298 uasm_l_nopage_tlbm(&l, p);
1299 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1300 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
1302 if ((p - handle_tlbm) > FASTPATH_SIZE)
1303 panic("TLB modify handler fastpath space exceeded");
1304
Thiemo Seufere30ec452008-01-28 20:05:38 +00001305 uasm_resolve_relocs(relocs, labels);
1306 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1307 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001308
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001309 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001310}
David Daney826222842009-10-14 12:16:56 -07001311#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
1313/*
1314 * R4000 style TLB load/store/modify handlers.
1315 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001316static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001317build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1318 struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001319 unsigned int ptr)
1320{
Ralf Baechle875d43e2005-09-03 15:56:16 -07001321#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001322 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
1323#else
1324 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1325#endif
1326
David Daneyfd062c82009-05-27 17:47:44 -07001327#ifdef CONFIG_HUGETLB_PAGE
1328 /*
1329 * For huge tlb entries, pmd doesn't contain an address but
1330 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1331 * see if we need to jump to huge tlb processing.
1332 */
1333 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
1334#endif
1335
Thiemo Seufere30ec452008-01-28 20:05:38 +00001336 UASM_i_MFC0(p, pte, C0_BADVADDR);
1337 UASM_i_LW(p, ptr, 0, ptr);
1338 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1339 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1340 UASM_i_ADDU(p, ptr, ptr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001341
1342#ifdef CONFIG_SMP
Thiemo Seufere30ec452008-01-28 20:05:38 +00001343 uasm_l_smp_pgtable_change(l, *p);
1344#endif
David Daneybd1437e2009-05-08 15:10:50 -07001345 iPTE_LW(p, pte, ptr); /* get even pte */
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001346 if (!m4kc_tlbp_war())
1347 build_tlb_probe_entry(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001348}
1349
Ralf Baechle234fcd12008-03-08 09:56:28 +00001350static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001351build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1352 struct uasm_reloc **r, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001353 unsigned int ptr)
1354{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001355 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1356 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 build_update_entries(p, tmp, ptr);
1358 build_tlb_write_entry(p, l, r, tlb_indexed);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001359 uasm_l_leave(l, *p);
1360 uasm_i_eret(p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361
Ralf Baechle875d43e2005-09-03 15:56:16 -07001362#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -07001363 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001364#endif
1365}
1366
Ralf Baechle234fcd12008-03-08 09:56:28 +00001367static void __cpuinit build_r4000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368{
1369 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001370 struct uasm_label *l = labels;
1371 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372
1373 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1374 memset(labels, 0, sizeof(labels));
1375 memset(relocs, 0, sizeof(relocs));
1376
1377 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +01001378 unsigned int segbits = 44;
1379
1380 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1381 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001382 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -07001383 uasm_i_dsrl_safe(&p, K1, K0, 62);
1384 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1385 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +01001386 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001387 uasm_il_bnez(&p, &r, K0, label_leave);
1388 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001389 }
1390
1391 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001392 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001393 if (m4kc_tlbp_war())
1394 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001395
1396 if (kernel_uses_smartmips_rixi) {
1397 /*
1398 * If the page is not _PAGE_VALID, RI or XI could not
1399 * have triggered it. Skip the expensive test..
1400 */
1401 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1402 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1403 uasm_i_nop(&p);
1404
1405 uasm_i_tlbr(&p);
1406 /* Examine entrylo 0 or 1 based on ptr. */
1407 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1408 uasm_i_beqz(&p, K0, 8);
1409
1410 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1411 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1412 /*
1413 * If the entryLo (now in K0) is valid (bit 1), RI or
1414 * XI must have triggered it.
1415 */
1416 uasm_i_andi(&p, K0, K0, 2);
1417 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1418
1419 uasm_l_tlbl_goaround1(&l, p);
1420 /* Reload the PTE value */
1421 iPTE_LW(&p, K0, K1);
1422 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001423 build_make_valid(&p, &r, K0, K1);
1424 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1425
David Daneyfd062c82009-05-27 17:47:44 -07001426#ifdef CONFIG_HUGETLB_PAGE
1427 /*
1428 * This is the entry point when build_r4000_tlbchange_handler_head
1429 * spots a huge page.
1430 */
1431 uasm_l_tlb_huge_update(&l, p);
1432 iPTE_LW(&p, K0, K1);
1433 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1434 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001435
1436 if (kernel_uses_smartmips_rixi) {
1437 /*
1438 * If the page is not _PAGE_VALID, RI or XI could not
1439 * have triggered it. Skip the expensive test..
1440 */
1441 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1442 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1443 uasm_i_nop(&p);
1444
1445 uasm_i_tlbr(&p);
1446 /* Examine entrylo 0 or 1 based on ptr. */
1447 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1448 uasm_i_beqz(&p, K0, 8);
1449
1450 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1451 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1452 /*
1453 * If the entryLo (now in K0) is valid (bit 1), RI or
1454 * XI must have triggered it.
1455 */
1456 uasm_i_andi(&p, K0, K0, 2);
1457 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1458 /* Reload the PTE value */
1459 iPTE_LW(&p, K0, K1);
1460
1461 /*
1462 * We clobbered C0_PAGEMASK, restore it. On the other branch
1463 * it is restored in build_huge_tlb_write_entry.
1464 */
1465 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
1466
1467 uasm_l_tlbl_goaround2(&l, p);
1468 }
David Daneyfd062c82009-05-27 17:47:44 -07001469 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1470 build_huge_handler_tail(&p, &r, &l, K0, K1);
1471#endif
1472
Thiemo Seufere30ec452008-01-28 20:05:38 +00001473 uasm_l_nopage_tlbl(&l, p);
1474 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1475 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001476
1477 if ((p - handle_tlbl) > FASTPATH_SIZE)
1478 panic("TLB load handler fastpath space exceeded");
1479
Thiemo Seufere30ec452008-01-28 20:05:38 +00001480 uasm_resolve_relocs(relocs, labels);
1481 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1482 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001483
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001484 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001485}
1486
Ralf Baechle234fcd12008-03-08 09:56:28 +00001487static void __cpuinit build_r4000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001488{
1489 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001490 struct uasm_label *l = labels;
1491 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492
1493 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1494 memset(labels, 0, sizeof(labels));
1495 memset(relocs, 0, sizeof(relocs));
1496
1497 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001498 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001499 if (m4kc_tlbp_war())
1500 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001501 build_make_write(&p, &r, K0, K1);
1502 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1503
David Daneyfd062c82009-05-27 17:47:44 -07001504#ifdef CONFIG_HUGETLB_PAGE
1505 /*
1506 * This is the entry point when
1507 * build_r4000_tlbchange_handler_head spots a huge page.
1508 */
1509 uasm_l_tlb_huge_update(&l, p);
1510 iPTE_LW(&p, K0, K1);
1511 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1512 build_tlb_probe_entry(&p);
1513 uasm_i_ori(&p, K0, K0,
1514 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1515 build_huge_handler_tail(&p, &r, &l, K0, K1);
1516#endif
1517
Thiemo Seufere30ec452008-01-28 20:05:38 +00001518 uasm_l_nopage_tlbs(&l, p);
1519 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1520 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001521
1522 if ((p - handle_tlbs) > FASTPATH_SIZE)
1523 panic("TLB store handler fastpath space exceeded");
1524
Thiemo Seufere30ec452008-01-28 20:05:38 +00001525 uasm_resolve_relocs(relocs, labels);
1526 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1527 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001528
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001529 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001530}
1531
Ralf Baechle234fcd12008-03-08 09:56:28 +00001532static void __cpuinit build_r4000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001533{
1534 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001535 struct uasm_label *l = labels;
1536 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537
1538 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1539 memset(labels, 0, sizeof(labels));
1540 memset(relocs, 0, sizeof(relocs));
1541
1542 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001543 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001544 if (m4kc_tlbp_war())
1545 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001546 /* Present and writable bits set, set accessed and dirty bits. */
1547 build_make_write(&p, &r, K0, K1);
1548 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1549
David Daneyfd062c82009-05-27 17:47:44 -07001550#ifdef CONFIG_HUGETLB_PAGE
1551 /*
1552 * This is the entry point when
1553 * build_r4000_tlbchange_handler_head spots a huge page.
1554 */
1555 uasm_l_tlb_huge_update(&l, p);
1556 iPTE_LW(&p, K0, K1);
1557 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1558 build_tlb_probe_entry(&p);
1559 uasm_i_ori(&p, K0, K0,
1560 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1561 build_huge_handler_tail(&p, &r, &l, K0, K1);
1562#endif
1563
Thiemo Seufere30ec452008-01-28 20:05:38 +00001564 uasm_l_nopage_tlbm(&l, p);
1565 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1566 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001567
1568 if ((p - handle_tlbm) > FASTPATH_SIZE)
1569 panic("TLB modify handler fastpath space exceeded");
1570
Thiemo Seufere30ec452008-01-28 20:05:38 +00001571 uasm_resolve_relocs(relocs, labels);
1572 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1573 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001574
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001575 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001576}
1577
Ralf Baechle234fcd12008-03-08 09:56:28 +00001578void __cpuinit build_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001579{
1580 /*
1581 * The refill handler is generated per-CPU, multi-node systems
1582 * may have local storage for it. The other handlers are only
1583 * needed once.
1584 */
1585 static int run_once = 0;
1586
David Daney1ec56322010-04-28 12:16:18 -07001587#ifdef CONFIG_64BIT
1588 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1589#endif
1590
Ralf Baechle10cc3522007-10-11 23:46:15 +01001591 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001592 case CPU_R2000:
1593 case CPU_R3000:
1594 case CPU_R3000A:
1595 case CPU_R3081E:
1596 case CPU_TX3912:
1597 case CPU_TX3922:
1598 case CPU_TX3927:
David Daney826222842009-10-14 12:16:56 -07001599#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001600 build_r3000_tlb_refill_handler();
1601 if (!run_once) {
1602 build_r3000_tlb_load_handler();
1603 build_r3000_tlb_store_handler();
1604 build_r3000_tlb_modify_handler();
1605 run_once++;
1606 }
David Daney826222842009-10-14 12:16:56 -07001607#else
1608 panic("No R3000 TLB refill handler");
1609#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001610 break;
1611
1612 case CPU_R6000:
1613 case CPU_R6000A:
1614 panic("No R6000 TLB refill handler yet");
1615 break;
1616
1617 case CPU_R8000:
1618 panic("No R8000 TLB refill handler yet");
1619 break;
1620
1621 default:
1622 build_r4000_tlb_refill_handler();
1623 if (!run_once) {
1624 build_r4000_tlb_load_handler();
1625 build_r4000_tlb_store_handler();
1626 build_r4000_tlb_modify_handler();
1627 run_once++;
1628 }
1629 }
1630}
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001631
Ralf Baechle234fcd12008-03-08 09:56:28 +00001632void __cpuinit flush_tlb_handlers(void)
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001633{
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001634 local_flush_icache_range((unsigned long)handle_tlbl,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001635 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001636 local_flush_icache_range((unsigned long)handle_tlbs,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001637 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001638 local_flush_icache_range((unsigned long)handle_tlbm,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001639 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
1640}