blob: 93816f3bca67f79b1f5b6aeb028487c6d06e67ec [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * Synthesize TLB refill handlers at runtime.
7 *
Thiemo Seufere30ec452008-01-28 20:05:38 +00008 * Copyright (C) 2004, 2005, 2006, 2008 Thiemo Seufer
David Daney95affdd2009-05-20 11:40:59 -07009 * Copyright (C) 2005, 2007, 2008, 2009 Maciej W. Rozycki
Ralf Baechle41c594a2006-04-05 09:45:45 +010010 * Copyright (C) 2006 Ralf Baechle (ralf@linux-mips.org)
David Daneyfd062c82009-05-27 17:47:44 -070011 * Copyright (C) 2008, 2009 Cavium Networks, Inc.
Ralf Baechle41c594a2006-04-05 09:45:45 +010012 *
13 * ... and the days got worse and worse and now you see
14 * I've gone completly out of my mind.
15 *
16 * They're coming to take me a away haha
17 * they're coming to take me a away hoho hihi haha
18 * to the funny farm where code is beautiful all the time ...
19 *
20 * (Condolences to Napoleon XIV)
Linus Torvalds1da177e2005-04-16 15:20:36 -070021 */
22
David Daney95affdd2009-05-20 11:40:59 -070023#include <linux/bug.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070024#include <linux/kernel.h>
25#include <linux/types.h>
Ralf Baechle631330f2009-06-19 14:05:26 +010026#include <linux/smp.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070027#include <linux/string.h>
28#include <linux/init.h>
29
Linus Torvalds1da177e2005-04-16 15:20:36 -070030#include <asm/mmu_context.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070031#include <asm/war.h>
Florian Fainelli3482d712010-01-28 15:21:24 +010032#include <asm/uasm.h>
Thiemo Seufere30ec452008-01-28 20:05:38 +000033
David Daney1ec56322010-04-28 12:16:18 -070034/*
35 * TLB load/store/modify handlers.
36 *
37 * Only the fastpath gets synthesized at runtime, the slowpath for
38 * do_page_fault remains normal asm.
39 */
40extern void tlb_do_page_fault_0(void);
41extern void tlb_do_page_fault_1(void);
42
43
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010044static inline int r45k_bvahwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070045{
46 /* XXX: We should probe for the presence of this bug, but we don't. */
47 return 0;
48}
49
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010050static inline int r4k_250MHZhwbug(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070051{
52 /* XXX: We should probe for the presence of this bug, but we don't. */
53 return 0;
54}
55
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010056static inline int __maybe_unused bcm1250_m3_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070057{
58 return BCM1250_M3_WAR;
59}
60
Ralf Baechleaeffdbb2007-10-11 23:46:14 +010061static inline int __maybe_unused r10000_llsc_war(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -070062{
63 return R10000_LLSC_WAR;
64}
65
66/*
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +010067 * Found by experiment: At least some revisions of the 4kc throw under
68 * some circumstances a machine check exception, triggered by invalid
69 * values in the index register. Delaying the tlbp instruction until
70 * after the next branch, plus adding an additional nop in front of
71 * tlbwi/tlbwr avoids the invalid index register values. Nobody knows
72 * why; it's not an issue caused by the core RTL.
73 *
74 */
Ralf Baechle234fcd12008-03-08 09:56:28 +000075static int __cpuinit m4kc_tlbp_war(void)
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +010076{
77 return (current_cpu_data.processor_id & 0xffff00) ==
78 (PRID_COMP_MIPS | PRID_IMP_4KC);
79}
80
Thiemo Seufere30ec452008-01-28 20:05:38 +000081/* Handle labels (which must be positive integers). */
Linus Torvalds1da177e2005-04-16 15:20:36 -070082enum label_id {
Thiemo Seufere30ec452008-01-28 20:05:38 +000083 label_second_part = 1,
Linus Torvalds1da177e2005-04-16 15:20:36 -070084 label_leave,
85 label_vmalloc,
86 label_vmalloc_done,
87 label_tlbw_hazard,
88 label_split,
David Daney6dd93442010-02-10 15:12:47 -080089 label_tlbl_goaround1,
90 label_tlbl_goaround2,
Linus Torvalds1da177e2005-04-16 15:20:36 -070091 label_nopage_tlbl,
92 label_nopage_tlbs,
93 label_nopage_tlbm,
94 label_smp_pgtable_change,
95 label_r3000_write_probe_fail,
David Daney1ec56322010-04-28 12:16:18 -070096 label_large_segbits_fault,
David Daneyfd062c82009-05-27 17:47:44 -070097#ifdef CONFIG_HUGETLB_PAGE
98 label_tlb_huge_update,
99#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700100};
101
Thiemo Seufere30ec452008-01-28 20:05:38 +0000102UASM_L_LA(_second_part)
103UASM_L_LA(_leave)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000104UASM_L_LA(_vmalloc)
105UASM_L_LA(_vmalloc_done)
106UASM_L_LA(_tlbw_hazard)
107UASM_L_LA(_split)
David Daney6dd93442010-02-10 15:12:47 -0800108UASM_L_LA(_tlbl_goaround1)
109UASM_L_LA(_tlbl_goaround2)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000110UASM_L_LA(_nopage_tlbl)
111UASM_L_LA(_nopage_tlbs)
112UASM_L_LA(_nopage_tlbm)
113UASM_L_LA(_smp_pgtable_change)
114UASM_L_LA(_r3000_write_probe_fail)
David Daney1ec56322010-04-28 12:16:18 -0700115UASM_L_LA(_large_segbits_fault)
David Daneyfd062c82009-05-27 17:47:44 -0700116#ifdef CONFIG_HUGETLB_PAGE
117UASM_L_LA(_tlb_huge_update)
118#endif
Atsushi Nemoto656be922006-10-26 00:08:31 +0900119
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200120/*
121 * For debug purposes.
122 */
123static inline void dump_handler(const u32 *handler, int count)
124{
125 int i;
126
127 pr_debug("\t.set push\n");
128 pr_debug("\t.set noreorder\n");
129
130 for (i = 0; i < count; i++)
131 pr_debug("\t%p\t.word 0x%08x\n", &handler[i], handler[i]);
132
133 pr_debug("\t.set pop\n");
134}
135
Linus Torvalds1da177e2005-04-16 15:20:36 -0700136/* The only general purpose registers allowed in TLB handlers. */
137#define K0 26
138#define K1 27
139
140/* Some CP0 registers */
Ralf Baechle41c594a2006-04-05 09:45:45 +0100141#define C0_INDEX 0, 0
142#define C0_ENTRYLO0 2, 0
143#define C0_TCBIND 2, 2
144#define C0_ENTRYLO1 3, 0
145#define C0_CONTEXT 4, 0
David Daneyfd062c82009-05-27 17:47:44 -0700146#define C0_PAGEMASK 5, 0
Ralf Baechle41c594a2006-04-05 09:45:45 +0100147#define C0_BADVADDR 8, 0
148#define C0_ENTRYHI 10, 0
149#define C0_EPC 14, 0
150#define C0_XCONTEXT 20, 0
Linus Torvalds1da177e2005-04-16 15:20:36 -0700151
Ralf Baechle875d43e2005-09-03 15:56:16 -0700152#ifdef CONFIG_64BIT
Thiemo Seufere30ec452008-01-28 20:05:38 +0000153# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_XCONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700154#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000155# define GET_CONTEXT(buf, reg) UASM_i_MFC0(buf, reg, C0_CONTEXT)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700156#endif
157
158/* The worst case length of the handler is around 18 instructions for
159 * R3000-style TLBs and up to 63 instructions for R4000-style TLBs.
160 * Maximum space available is 32 instructions for R3000 and 64
161 * instructions for R4000.
162 *
163 * We deliberately chose a buffer size of 128, so we won't scribble
164 * over anything important on overflow before we panic.
165 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000166static u32 tlb_handler[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700167
168/* simply assume worst case size for labels and relocs */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000169static struct uasm_label labels[128] __cpuinitdata;
170static struct uasm_reloc relocs[128] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700171
David Daney1ec56322010-04-28 12:16:18 -0700172#ifdef CONFIG_64BIT
173static int check_for_high_segbits __cpuinitdata;
174#endif
175
David Daney826222842009-10-14 12:16:56 -0700176#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
177/*
178 * CONFIG_MIPS_PGD_C0_CONTEXT implies 64 bit and lack of pgd_current,
179 * we cannot do r3000 under these circumstances.
180 */
181
Linus Torvalds1da177e2005-04-16 15:20:36 -0700182/*
183 * The R3000 TLB handler is simple.
184 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000185static void __cpuinit build_r3000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700186{
187 long pgdc = (long)pgd_current;
188 u32 *p;
189
190 memset(tlb_handler, 0, sizeof(tlb_handler));
191 p = tlb_handler;
192
Thiemo Seufere30ec452008-01-28 20:05:38 +0000193 uasm_i_mfc0(&p, K0, C0_BADVADDR);
194 uasm_i_lui(&p, K1, uasm_rel_hi(pgdc)); /* cp0 delay */
195 uasm_i_lw(&p, K1, uasm_rel_lo(pgdc), K1);
196 uasm_i_srl(&p, K0, K0, 22); /* load delay */
197 uasm_i_sll(&p, K0, K0, 2);
198 uasm_i_addu(&p, K1, K1, K0);
199 uasm_i_mfc0(&p, K0, C0_CONTEXT);
200 uasm_i_lw(&p, K1, 0, K1); /* cp0 delay */
201 uasm_i_andi(&p, K0, K0, 0xffc); /* load delay */
202 uasm_i_addu(&p, K1, K1, K0);
203 uasm_i_lw(&p, K0, 0, K1);
204 uasm_i_nop(&p); /* load delay */
205 uasm_i_mtc0(&p, K0, C0_ENTRYLO0);
206 uasm_i_mfc0(&p, K1, C0_EPC); /* cp0 delay */
207 uasm_i_tlbwr(&p); /* cp0 delay */
208 uasm_i_jr(&p, K1);
209 uasm_i_rfe(&p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700210
211 if (p > tlb_handler + 32)
212 panic("TLB refill handler space exceeded");
213
Thiemo Seufere30ec452008-01-28 20:05:38 +0000214 pr_debug("Wrote TLB refill handler (%u instructions).\n",
215 (unsigned int)(p - tlb_handler));
Linus Torvalds1da177e2005-04-16 15:20:36 -0700216
Ralf Baechle91b05e62006-03-29 18:53:00 +0100217 memcpy((void *)ebase, tlb_handler, 0x80);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +0200218
219 dump_handler((u32 *)ebase, 32);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700220}
David Daney826222842009-10-14 12:16:56 -0700221#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700222
223/*
224 * The R4000 TLB handler is much more complicated. We have two
225 * consecutive handler areas with 32 instructions space each.
226 * Since they aren't used at the same time, we can overflow in the
227 * other one.To keep things simple, we first assume linear space,
228 * then we relocate it to the final handler layout as needed.
229 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000230static u32 final_handler[64] __cpuinitdata;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700231
232/*
233 * Hazards
234 *
235 * From the IDT errata for the QED RM5230 (Nevada), processor revision 1.0:
236 * 2. A timing hazard exists for the TLBP instruction.
237 *
238 * stalling_instruction
239 * TLBP
240 *
241 * The JTLB is being read for the TLBP throughout the stall generated by the
242 * previous instruction. This is not really correct as the stalling instruction
243 * can modify the address used to access the JTLB. The failure symptom is that
244 * the TLBP instruction will use an address created for the stalling instruction
245 * and not the address held in C0_ENHI and thus report the wrong results.
246 *
247 * The software work-around is to not allow the instruction preceding the TLBP
248 * to stall - make it an NOP or some other instruction guaranteed not to stall.
249 *
250 * Errata 2 will not be fixed. This errata is also on the R5000.
251 *
252 * As if we MIPS hackers wouldn't know how to nop pipelines happy ...
253 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000254static void __cpuinit __maybe_unused build_tlb_probe_entry(u32 **p)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700255{
Ralf Baechle10cc3522007-10-11 23:46:15 +0100256 switch (current_cpu_type()) {
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200257 /* Found by experiment: R4600 v2.0/R4700 needs this, too. */
Thiemo Seuferf5b4d952005-09-09 17:11:50 +0000258 case CPU_R4600:
Thomas Bogendoerfer326e2e12008-05-12 13:55:42 +0200259 case CPU_R4700:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700260 case CPU_R5000:
261 case CPU_R5000A:
262 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000263 uasm_i_nop(p);
264 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700265 break;
266
267 default:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000268 uasm_i_tlbp(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700269 break;
270 }
271}
272
273/*
274 * Write random or indexed TLB entry, and care about the hazards from
275 * the preceeding mtc0 and for the following eret.
276 */
277enum tlb_write_entry { tlb_random, tlb_indexed };
278
Ralf Baechle234fcd12008-03-08 09:56:28 +0000279static void __cpuinit build_tlb_write_entry(u32 **p, struct uasm_label **l,
Thiemo Seufere30ec452008-01-28 20:05:38 +0000280 struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700281 enum tlb_write_entry wmode)
282{
283 void(*tlbw)(u32 **) = NULL;
284
285 switch (wmode) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000286 case tlb_random: tlbw = uasm_i_tlbwr; break;
287 case tlb_indexed: tlbw = uasm_i_tlbwi; break;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700288 }
289
Ralf Baechle161548b2008-01-29 10:14:54 +0000290 if (cpu_has_mips_r2) {
David Daney41f0e4d2009-05-12 12:41:53 -0700291 if (cpu_has_mips_r2_exec_hazard)
292 uasm_i_ehb(p);
Ralf Baechle161548b2008-01-29 10:14:54 +0000293 tlbw(p);
294 return;
295 }
296
Ralf Baechle10cc3522007-10-11 23:46:15 +0100297 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700298 case CPU_R4000PC:
299 case CPU_R4000SC:
300 case CPU_R4000MC:
301 case CPU_R4400PC:
302 case CPU_R4400SC:
303 case CPU_R4400MC:
304 /*
305 * This branch uses up a mtc0 hazard nop slot and saves
306 * two nops after the tlbw instruction.
307 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000308 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700309 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000310 uasm_l_tlbw_hazard(l, *p);
311 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700312 break;
313
314 case CPU_R4600:
315 case CPU_R4700:
316 case CPU_R5000:
317 case CPU_R5000A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000318 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000319 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000320 uasm_i_nop(p);
Maciej W. Rozycki2c93e122005-06-30 10:51:01 +0000321 break;
322
323 case CPU_R4300:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700324 case CPU_5KC:
325 case CPU_TX49XX:
Pete Popovbdf21b12005-07-14 17:47:57 +0000326 case CPU_PR4450:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000327 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700328 tlbw(p);
329 break;
330
331 case CPU_R10000:
332 case CPU_R12000:
Kumba44d921b2006-05-16 22:23:59 -0400333 case CPU_R14000:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700334 case CPU_4KC:
Thomas Bogendoerferb1ec4c82008-03-26 16:42:54 +0100335 case CPU_4KEC:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700336 case CPU_SB1:
Andrew Isaacson93ce2f522005-10-19 23:56:20 -0700337 case CPU_SB1A:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700338 case CPU_4KSC:
339 case CPU_20KC:
340 case CPU_25KF:
Kevin Cernekee602977b2010-10-16 14:22:30 -0700341 case CPU_BMIPS32:
342 case CPU_BMIPS3300:
343 case CPU_BMIPS4350:
344 case CPU_BMIPS4380:
345 case CPU_BMIPS5000:
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800346 case CPU_LOONGSON2:
Shinya Kuribayashia644b272009-03-03 18:05:51 +0900347 case CPU_R5500:
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +0100348 if (m4kc_tlbp_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +0000349 uasm_i_nop(p);
Manuel Lauss2f794d02009-03-25 17:49:30 +0100350 case CPU_ALCHEMY:
Linus Torvalds1da177e2005-04-16 15:20:36 -0700351 tlbw(p);
352 break;
353
354 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000355 uasm_i_nop(p); /* QED specifies 2 nops hazard */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700356 /*
357 * This branch uses up a mtc0 hazard nop slot and saves
358 * a nop after the tlbw instruction.
359 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000360 uasm_il_bgezl(p, r, 0, label_tlbw_hazard);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700361 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000362 uasm_l_tlbw_hazard(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700363 break;
364
365 case CPU_RM7000:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000366 uasm_i_nop(p);
367 uasm_i_nop(p);
368 uasm_i_nop(p);
369 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700370 tlbw(p);
371 break;
372
Linus Torvalds1da177e2005-04-16 15:20:36 -0700373 case CPU_RM9000:
374 /*
375 * When the JTLB is updated by tlbwi or tlbwr, a subsequent
376 * use of the JTLB for instructions should not occur for 4
377 * cpu cycles and use for data translations should not occur
378 * for 3 cpu cycles.
379 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000380 uasm_i_ssnop(p);
381 uasm_i_ssnop(p);
382 uasm_i_ssnop(p);
383 uasm_i_ssnop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700384 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000385 uasm_i_ssnop(p);
386 uasm_i_ssnop(p);
387 uasm_i_ssnop(p);
388 uasm_i_ssnop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700389 break;
390
391 case CPU_VR4111:
392 case CPU_VR4121:
393 case CPU_VR4122:
394 case CPU_VR4181:
395 case CPU_VR4181A:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000396 uasm_i_nop(p);
397 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700398 tlbw(p);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000399 uasm_i_nop(p);
400 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700401 break;
402
403 case CPU_VR4131:
404 case CPU_VR4133:
Ralf Baechle7623deb2005-08-29 16:49:55 +0000405 case CPU_R5432:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000406 uasm_i_nop(p);
407 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700408 tlbw(p);
409 break;
410
Lars-Peter Clausen83ccf692010-07-17 11:07:51 +0000411 case CPU_JZRISC:
412 tlbw(p);
413 uasm_i_nop(p);
414 break;
415
Linus Torvalds1da177e2005-04-16 15:20:36 -0700416 default:
417 panic("No TLB refill handler yet (CPU type: %d)",
418 current_cpu_data.cputype);
419 break;
420 }
421}
422
David Daney6dd93442010-02-10 15:12:47 -0800423static __cpuinit __maybe_unused void build_convert_pte_to_entrylo(u32 **p,
424 unsigned int reg)
425{
426 if (kernel_uses_smartmips_rixi) {
427 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_NO_EXEC));
428 UASM_i_ROTR(p, reg, reg, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
429 } else {
430#ifdef CONFIG_64BIT_PHYS_ADDR
David Daney3be60222010-04-28 12:16:17 -0700431 uasm_i_dsrl_safe(p, reg, reg, ilog2(_PAGE_GLOBAL));
David Daney6dd93442010-02-10 15:12:47 -0800432#else
433 UASM_i_SRL(p, reg, reg, ilog2(_PAGE_GLOBAL));
434#endif
435 }
436}
437
David Daneyfd062c82009-05-27 17:47:44 -0700438#ifdef CONFIG_HUGETLB_PAGE
David Daney6dd93442010-02-10 15:12:47 -0800439
440static __cpuinit void build_restore_pagemask(u32 **p,
441 struct uasm_reloc **r,
442 unsigned int tmp,
443 enum label_id lid)
444{
445 /* Reset default page size */
446 if (PM_DEFAULT_MASK >> 16) {
447 uasm_i_lui(p, tmp, PM_DEFAULT_MASK >> 16);
448 uasm_i_ori(p, tmp, tmp, PM_DEFAULT_MASK & 0xffff);
449 uasm_il_b(p, r, lid);
450 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
451 } else if (PM_DEFAULT_MASK) {
452 uasm_i_ori(p, tmp, 0, PM_DEFAULT_MASK);
453 uasm_il_b(p, r, lid);
454 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
455 } else {
456 uasm_il_b(p, r, lid);
457 uasm_i_mtc0(p, 0, C0_PAGEMASK);
458 }
459}
460
David Daneyfd062c82009-05-27 17:47:44 -0700461static __cpuinit void build_huge_tlb_write_entry(u32 **p,
462 struct uasm_label **l,
463 struct uasm_reloc **r,
464 unsigned int tmp,
465 enum tlb_write_entry wmode)
466{
467 /* Set huge page tlb entry size */
468 uasm_i_lui(p, tmp, PM_HUGE_MASK >> 16);
469 uasm_i_ori(p, tmp, tmp, PM_HUGE_MASK & 0xffff);
470 uasm_i_mtc0(p, tmp, C0_PAGEMASK);
471
472 build_tlb_write_entry(p, l, r, wmode);
473
David Daney6dd93442010-02-10 15:12:47 -0800474 build_restore_pagemask(p, r, tmp, label_leave);
David Daneyfd062c82009-05-27 17:47:44 -0700475}
476
477/*
478 * Check if Huge PTE is present, if so then jump to LABEL.
479 */
480static void __cpuinit
481build_is_huge_pte(u32 **p, struct uasm_reloc **r, unsigned int tmp,
482 unsigned int pmd, int lid)
483{
484 UASM_i_LW(p, tmp, 0, pmd);
485 uasm_i_andi(p, tmp, tmp, _PAGE_HUGE);
486 uasm_il_bnez(p, r, tmp, lid);
487}
488
489static __cpuinit void build_huge_update_entries(u32 **p,
490 unsigned int pte,
491 unsigned int tmp)
492{
493 int small_sequence;
494
495 /*
496 * A huge PTE describes an area the size of the
497 * configured huge page size. This is twice the
498 * of the large TLB entry size we intend to use.
499 * A TLB entry half the size of the configured
500 * huge page size is configured into entrylo0
501 * and entrylo1 to cover the contiguous huge PTE
502 * address space.
503 */
504 small_sequence = (HPAGE_SIZE >> 7) < 0x10000;
505
506 /* We can clobber tmp. It isn't used after this.*/
507 if (!small_sequence)
508 uasm_i_lui(p, tmp, HPAGE_SIZE >> (7 + 16));
509
David Daney6dd93442010-02-10 15:12:47 -0800510 build_convert_pte_to_entrylo(p, pte);
David Daney9b8c3892010-02-10 15:12:44 -0800511 UASM_i_MTC0(p, pte, C0_ENTRYLO0); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700512 /* convert to entrylo1 */
513 if (small_sequence)
514 UASM_i_ADDIU(p, pte, pte, HPAGE_SIZE >> 7);
515 else
516 UASM_i_ADDU(p, pte, pte, tmp);
517
David Daney9b8c3892010-02-10 15:12:44 -0800518 UASM_i_MTC0(p, pte, C0_ENTRYLO1); /* load it */
David Daneyfd062c82009-05-27 17:47:44 -0700519}
520
521static __cpuinit void build_huge_handler_tail(u32 **p,
522 struct uasm_reloc **r,
523 struct uasm_label **l,
524 unsigned int pte,
525 unsigned int ptr)
526{
527#ifdef CONFIG_SMP
528 UASM_i_SC(p, pte, 0, ptr);
529 uasm_il_beqz(p, r, pte, label_tlb_huge_update);
530 UASM_i_LW(p, pte, 0, ptr); /* Needed because SC killed our PTE */
531#else
532 UASM_i_SW(p, pte, 0, ptr);
533#endif
534 build_huge_update_entries(p, pte, ptr);
535 build_huge_tlb_write_entry(p, l, r, pte, tlb_indexed);
536}
537#endif /* CONFIG_HUGETLB_PAGE */
538
Ralf Baechle875d43e2005-09-03 15:56:16 -0700539#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700540/*
541 * TMP and PTR are scratch.
542 * TMP will be clobbered, PTR will hold the pmd entry.
543 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000544static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000545build_get_pmde64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700546 unsigned int tmp, unsigned int ptr)
547{
David Daney826222842009-10-14 12:16:56 -0700548#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700549 long pgdc = (long)pgd_current;
David Daney826222842009-10-14 12:16:56 -0700550#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700551 /*
552 * The vmalloc handling is not in the hotpath.
553 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000554 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
David Daney1ec56322010-04-28 12:16:18 -0700555
556 if (check_for_high_segbits) {
557 /*
558 * The kernel currently implicitely assumes that the
559 * MIPS SEGBITS parameter for the processor is
560 * (PGDIR_SHIFT+PGDIR_BITS) or less, and will never
561 * allocate virtual addresses outside the maximum
562 * range for SEGBITS = (PGDIR_SHIFT+PGDIR_BITS). But
563 * that doesn't prevent user code from accessing the
564 * higher xuseg addresses. Here, we make sure that
565 * everything but the lower xuseg addresses goes down
566 * the module_alloc/vmalloc path.
567 */
568 uasm_i_dsrl_safe(p, ptr, tmp, PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
569 uasm_il_bnez(p, r, ptr, label_vmalloc);
570 } else {
571 uasm_il_bltz(p, r, tmp, label_vmalloc);
572 }
Thiemo Seufere30ec452008-01-28 20:05:38 +0000573 /* No uasm_i_nop needed here, since the next insn doesn't touch TMP. */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700574
David Daney826222842009-10-14 12:16:56 -0700575#ifdef CONFIG_MIPS_PGD_C0_CONTEXT
576 /*
577 * &pgd << 11 stored in CONTEXT [23..63].
578 */
579 UASM_i_MFC0(p, ptr, C0_CONTEXT);
580 uasm_i_dins(p, ptr, 0, 0, 23); /* Clear lower 23 bits of context. */
581 uasm_i_ori(p, ptr, ptr, 0x540); /* 1 0 1 0 1 << 6 xkphys cached */
582 uasm_i_drotr(p, ptr, ptr, 11);
583#elif defined(CONFIG_SMP)
Ralf Baechle41c594a2006-04-05 09:45:45 +0100584# ifdef CONFIG_MIPS_MT_SMTC
585 /*
586 * SMTC uses TCBind value as "CPU" index
587 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000588 uasm_i_mfc0(p, ptr, C0_TCBIND);
David Daney3be60222010-04-28 12:16:17 -0700589 uasm_i_dsrl_safe(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100590# else
Linus Torvalds1da177e2005-04-16 15:20:36 -0700591 /*
Thiemo Seufer1b3a6e92005-04-01 14:07:13 +0000592 * 64 bit SMP running in XKPHYS has smp_processor_id() << 3
Linus Torvalds1da177e2005-04-16 15:20:36 -0700593 * stored in CONTEXT.
594 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000595 uasm_i_dmfc0(p, ptr, C0_CONTEXT);
David Daney3be60222010-04-28 12:16:17 -0700596 uasm_i_dsrl_safe(p, ptr, ptr, 23);
David Daney826222842009-10-14 12:16:56 -0700597# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000598 UASM_i_LA_mostly(p, tmp, pgdc);
599 uasm_i_daddu(p, ptr, ptr, tmp);
600 uasm_i_dmfc0(p, tmp, C0_BADVADDR);
601 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700602#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000603 UASM_i_LA_mostly(p, ptr, pgdc);
604 uasm_i_ld(p, ptr, uasm_rel_lo(pgdc), ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700605#endif
606
Thiemo Seufere30ec452008-01-28 20:05:38 +0000607 uasm_l_vmalloc_done(l, *p);
Ralf Baechle242954b2006-10-24 02:29:01 +0100608
David Daney3be60222010-04-28 12:16:17 -0700609 /* get pgd offset in bytes */
610 uasm_i_dsrl_safe(p, tmp, tmp, PGDIR_SHIFT - 3);
Ralf Baechle242954b2006-10-24 02:29:01 +0100611
Thiemo Seufere30ec452008-01-28 20:05:38 +0000612 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PGD - 1)<<3);
613 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pgd offset */
David Daney325f8a02009-12-04 13:52:36 -0800614#ifndef __PAGETABLE_PMD_FOLDED
Thiemo Seufere30ec452008-01-28 20:05:38 +0000615 uasm_i_dmfc0(p, tmp, C0_BADVADDR); /* get faulting address */
616 uasm_i_ld(p, ptr, 0, ptr); /* get pmd pointer */
David Daney3be60222010-04-28 12:16:17 -0700617 uasm_i_dsrl_safe(p, tmp, tmp, PMD_SHIFT-3); /* get pmd offset in bytes */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000618 uasm_i_andi(p, tmp, tmp, (PTRS_PER_PMD - 1)<<3);
619 uasm_i_daddu(p, ptr, ptr, tmp); /* add in pmd offset */
David Daney325f8a02009-12-04 13:52:36 -0800620#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -0700621}
622
David Daney1ec56322010-04-28 12:16:18 -0700623enum vmalloc64_mode {not_refill, refill};
Linus Torvalds1da177e2005-04-16 15:20:36 -0700624/*
625 * BVADDR is the faulting address, PTR is scratch.
626 * PTR will hold the pgd for vmalloc.
627 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000628static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +0000629build_get_pgd_vmalloc64(u32 **p, struct uasm_label **l, struct uasm_reloc **r,
David Daney1ec56322010-04-28 12:16:18 -0700630 unsigned int bvaddr, unsigned int ptr,
631 enum vmalloc64_mode mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700632{
633 long swpd = (long)swapper_pg_dir;
David Daney1ec56322010-04-28 12:16:18 -0700634 int single_insn_swpd;
635 int did_vmalloc_branch = 0;
636
637 single_insn_swpd = uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700638
Thiemo Seufere30ec452008-01-28 20:05:38 +0000639 uasm_l_vmalloc(l, *p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700640
David Daney1ec56322010-04-28 12:16:18 -0700641 if (mode == refill && check_for_high_segbits) {
642 if (single_insn_swpd) {
643 uasm_il_bltz(p, r, bvaddr, label_vmalloc_done);
644 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
645 did_vmalloc_branch = 1;
646 /* fall through */
647 } else {
648 uasm_il_bgez(p, r, bvaddr, label_large_segbits_fault);
649 }
650 }
651 if (!did_vmalloc_branch) {
652 if (uasm_in_compat_space_p(swpd) && !uasm_rel_lo(swpd)) {
653 uasm_il_b(p, r, label_vmalloc_done);
654 uasm_i_lui(p, ptr, uasm_rel_hi(swpd));
655 } else {
656 UASM_i_LA_mostly(p, ptr, swpd);
657 uasm_il_b(p, r, label_vmalloc_done);
658 if (uasm_in_compat_space_p(swpd))
659 uasm_i_addiu(p, ptr, ptr, uasm_rel_lo(swpd));
660 else
661 uasm_i_daddiu(p, ptr, ptr, uasm_rel_lo(swpd));
662 }
663 }
664 if (mode == refill && check_for_high_segbits) {
665 uasm_l_large_segbits_fault(l, *p);
666 /*
667 * We get here if we are an xsseg address, or if we are
668 * an xuseg address above (PGDIR_SHIFT+PGDIR_BITS) boundary.
669 *
670 * Ignoring xsseg (assume disabled so would generate
671 * (address errors?), the only remaining possibility
672 * is the upper xuseg addresses. On processors with
673 * TLB_SEGBITS <= PGDIR_SHIFT+PGDIR_BITS, these
674 * addresses would have taken an address error. We try
675 * to mimic that here by taking a load/istream page
676 * fault.
677 */
678 UASM_i_LA(p, ptr, (unsigned long)tlb_do_page_fault_0);
679 uasm_i_jr(p, ptr);
680 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700681 }
682}
683
Ralf Baechle875d43e2005-09-03 15:56:16 -0700684#else /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700685
686/*
687 * TMP and PTR are scratch.
688 * TMP will be clobbered, PTR will hold the pgd entry.
689 */
Ralf Baechle234fcd12008-03-08 09:56:28 +0000690static void __cpuinit __maybe_unused
Linus Torvalds1da177e2005-04-16 15:20:36 -0700691build_get_pgde32(u32 **p, unsigned int tmp, unsigned int ptr)
692{
693 long pgdc = (long)pgd_current;
694
695 /* 32 bit SMP has smp_processor_id() stored in CONTEXT. */
696#ifdef CONFIG_SMP
Ralf Baechle41c594a2006-04-05 09:45:45 +0100697#ifdef CONFIG_MIPS_MT_SMTC
698 /*
699 * SMTC uses TCBind value as "CPU" index
700 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000701 uasm_i_mfc0(p, ptr, C0_TCBIND);
702 UASM_i_LA_mostly(p, tmp, pgdc);
703 uasm_i_srl(p, ptr, ptr, 19);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100704#else
705 /*
706 * smp_processor_id() << 3 is stored in CONTEXT.
707 */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000708 uasm_i_mfc0(p, ptr, C0_CONTEXT);
709 UASM_i_LA_mostly(p, tmp, pgdc);
710 uasm_i_srl(p, ptr, ptr, 23);
Ralf Baechle41c594a2006-04-05 09:45:45 +0100711#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000712 uasm_i_addu(p, ptr, tmp, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700713#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000714 UASM_i_LA_mostly(p, ptr, pgdc);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700715#endif
Thiemo Seufere30ec452008-01-28 20:05:38 +0000716 uasm_i_mfc0(p, tmp, C0_BADVADDR); /* get faulting address */
717 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
718 uasm_i_srl(p, tmp, tmp, PGDIR_SHIFT); /* get pgd only bits */
719 uasm_i_sll(p, tmp, tmp, PGD_T_LOG2);
720 uasm_i_addu(p, ptr, ptr, tmp); /* add in pgd offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700721}
722
Ralf Baechle875d43e2005-09-03 15:56:16 -0700723#endif /* !CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700724
Ralf Baechle234fcd12008-03-08 09:56:28 +0000725static void __cpuinit build_adjust_context(u32 **p, unsigned int ctx)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700726{
Ralf Baechle242954b2006-10-24 02:29:01 +0100727 unsigned int shift = 4 - (PTE_T_LOG2 + 1) + PAGE_SHIFT - 12;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700728 unsigned int mask = (PTRS_PER_PTE / 2 - 1) << (PTE_T_LOG2 + 1);
729
Ralf Baechle10cc3522007-10-11 23:46:15 +0100730 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700731 case CPU_VR41XX:
732 case CPU_VR4111:
733 case CPU_VR4121:
734 case CPU_VR4122:
735 case CPU_VR4131:
736 case CPU_VR4181:
737 case CPU_VR4181A:
738 case CPU_VR4133:
739 shift += 2;
740 break;
741
742 default:
743 break;
744 }
745
746 if (shift)
Thiemo Seufere30ec452008-01-28 20:05:38 +0000747 UASM_i_SRL(p, ctx, ctx, shift);
748 uasm_i_andi(p, ctx, ctx, mask);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700749}
750
Ralf Baechle234fcd12008-03-08 09:56:28 +0000751static void __cpuinit build_get_ptep(u32 **p, unsigned int tmp, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700752{
753 /*
754 * Bug workaround for the Nevada. It seems as if under certain
755 * circumstances the move from cp0_context might produce a
756 * bogus result when the mfc0 instruction and its consumer are
757 * in a different cacheline or a load instruction, probably any
758 * memory reference, is between them.
759 */
Ralf Baechle10cc3522007-10-11 23:46:15 +0100760 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700761 case CPU_NEVADA:
Thiemo Seufere30ec452008-01-28 20:05:38 +0000762 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700763 GET_CONTEXT(p, tmp); /* get context reg */
764 break;
765
766 default:
767 GET_CONTEXT(p, tmp); /* get context reg */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000768 UASM_i_LW(p, ptr, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700769 break;
770 }
771
772 build_adjust_context(p, tmp);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000773 UASM_i_ADDU(p, ptr, ptr, tmp); /* add in offset */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700774}
775
Ralf Baechle234fcd12008-03-08 09:56:28 +0000776static void __cpuinit build_update_entries(u32 **p, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -0700777 unsigned int ptep)
778{
779 /*
780 * 64bit address support (36bit on a 32bit CPU) in a 32bit
781 * Kernel is a special case. Only a few CPUs use it.
782 */
783#ifdef CONFIG_64BIT_PHYS_ADDR
784 if (cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +0000785 uasm_i_ld(p, tmp, 0, ptep); /* get even pte */
786 uasm_i_ld(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
David Daney6dd93442010-02-10 15:12:47 -0800787 if (kernel_uses_smartmips_rixi) {
788 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
789 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
790 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
791 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
792 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
793 } else {
David Daney3be60222010-04-28 12:16:17 -0700794 uasm_i_dsrl_safe(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
David Daney6dd93442010-02-10 15:12:47 -0800795 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
David Daney3be60222010-04-28 12:16:17 -0700796 uasm_i_dsrl_safe(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
David Daney6dd93442010-02-10 15:12:47 -0800797 }
David Daney9b8c3892010-02-10 15:12:44 -0800798 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700799 } else {
800 int pte_off_even = sizeof(pte_t) / 2;
801 int pte_off_odd = pte_off_even + sizeof(pte_t);
802
803 /* The pte entries are pre-shifted */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000804 uasm_i_lw(p, tmp, pte_off_even, ptep); /* get even pte */
David Daney9b8c3892010-02-10 15:12:44 -0800805 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000806 uasm_i_lw(p, ptep, pte_off_odd, ptep); /* get odd pte */
David Daney9b8c3892010-02-10 15:12:44 -0800807 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700808 }
809#else
Thiemo Seufere30ec452008-01-28 20:05:38 +0000810 UASM_i_LW(p, tmp, 0, ptep); /* get even pte */
811 UASM_i_LW(p, ptep, sizeof(pte_t), ptep); /* get odd pte */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700812 if (r45k_bvahwbug())
813 build_tlb_probe_entry(p);
David Daney6dd93442010-02-10 15:12:47 -0800814 if (kernel_uses_smartmips_rixi) {
815 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_NO_EXEC));
816 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_NO_EXEC));
817 UASM_i_ROTR(p, tmp, tmp, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
818 if (r4k_250MHZhwbug())
819 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
820 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
821 UASM_i_ROTR(p, ptep, ptep, ilog2(_PAGE_GLOBAL) - ilog2(_PAGE_NO_EXEC));
822 } else {
823 UASM_i_SRL(p, tmp, tmp, ilog2(_PAGE_GLOBAL)); /* convert to entrylo0 */
824 if (r4k_250MHZhwbug())
825 UASM_i_MTC0(p, 0, C0_ENTRYLO0);
826 UASM_i_MTC0(p, tmp, C0_ENTRYLO0); /* load it */
827 UASM_i_SRL(p, ptep, ptep, ilog2(_PAGE_GLOBAL)); /* convert to entrylo1 */
828 if (r45k_bvahwbug())
829 uasm_i_mfc0(p, tmp, C0_INDEX);
830 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700831 if (r4k_250MHZhwbug())
David Daney9b8c3892010-02-10 15:12:44 -0800832 UASM_i_MTC0(p, 0, C0_ENTRYLO1);
833 UASM_i_MTC0(p, ptep, C0_ENTRYLO1); /* load it */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700834#endif
835}
836
David Daneye6f72d32009-05-20 11:40:58 -0700837/*
838 * For a 64-bit kernel, we are using the 64-bit XTLB refill exception
839 * because EXL == 0. If we wrap, we can also use the 32 instruction
840 * slots before the XTLB refill exception handler which belong to the
841 * unused TLB refill exception.
842 */
843#define MIPS64_REFILL_INSNS 32
844
Ralf Baechle234fcd12008-03-08 09:56:28 +0000845static void __cpuinit build_r4000_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700846{
847 u32 *p = tlb_handler;
Thiemo Seufere30ec452008-01-28 20:05:38 +0000848 struct uasm_label *l = labels;
849 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700850 u32 *f;
851 unsigned int final_len;
852
853 memset(tlb_handler, 0, sizeof(tlb_handler));
854 memset(labels, 0, sizeof(labels));
855 memset(relocs, 0, sizeof(relocs));
856 memset(final_handler, 0, sizeof(final_handler));
857
858 /*
859 * create the plain linear handler
860 */
861 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +0100862 unsigned int segbits = 44;
863
864 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
865 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000866 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -0700867 uasm_i_dsrl_safe(&p, K1, K0, 62);
868 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
869 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +0100870 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000871 uasm_il_bnez(&p, &r, K0, label_leave);
872 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700873 }
874
Ralf Baechle875d43e2005-09-03 15:56:16 -0700875#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -0700876 build_get_pmde64(&p, &l, &r, K0, K1); /* get pmd in K1 */
877#else
878 build_get_pgde32(&p, K0, K1); /* get pgd in K1 */
879#endif
880
David Daneyfd062c82009-05-27 17:47:44 -0700881#ifdef CONFIG_HUGETLB_PAGE
882 build_is_huge_pte(&p, &r, K0, K1, label_tlb_huge_update);
883#endif
884
Linus Torvalds1da177e2005-04-16 15:20:36 -0700885 build_get_ptep(&p, K0, K1);
886 build_update_entries(&p, K0, K1);
887 build_tlb_write_entry(&p, &l, &r, tlb_random);
Thiemo Seufere30ec452008-01-28 20:05:38 +0000888 uasm_l_leave(&l, p);
889 uasm_i_eret(&p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700890
David Daneyfd062c82009-05-27 17:47:44 -0700891#ifdef CONFIG_HUGETLB_PAGE
892 uasm_l_tlb_huge_update(&l, p);
893 UASM_i_LW(&p, K0, 0, K1);
894 build_huge_update_entries(&p, K0, K1);
895 build_huge_tlb_write_entry(&p, &l, &r, K0, tlb_random);
896#endif
897
Ralf Baechle875d43e2005-09-03 15:56:16 -0700898#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -0700899 build_get_pgd_vmalloc64(&p, &l, &r, K0, K1, refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700900#endif
901
902 /*
903 * Overflow check: For the 64bit handler, we need at least one
904 * free instruction slot for the wrap-around branch. In worst
905 * case, if the intended insertion point is a delay slot, we
Matt LaPlante4b3f6862006-10-03 22:21:02 +0200906 * need three, with the second nop'ed and the third being
Linus Torvalds1da177e2005-04-16 15:20:36 -0700907 * unused.
908 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800909 /* Loongson2 ebase is different than r4k, we have more space */
910#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700911 if ((p - tlb_handler) > 64)
912 panic("TLB refill handler space exceeded");
913#else
David Daneye6f72d32009-05-20 11:40:58 -0700914 if (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 1)
915 || (((p - tlb_handler) > (MIPS64_REFILL_INSNS * 2) - 3)
916 && uasm_insn_has_bdelay(relocs,
917 tlb_handler + MIPS64_REFILL_INSNS - 3)))
Linus Torvalds1da177e2005-04-16 15:20:36 -0700918 panic("TLB refill handler space exceeded");
919#endif
920
921 /*
922 * Now fold the handler in the TLB refill handler space.
923 */
Fuxin Zhang2a21c732007-06-06 14:52:43 +0800924#if defined(CONFIG_32BIT) || defined(CONFIG_CPU_LOONGSON2)
Linus Torvalds1da177e2005-04-16 15:20:36 -0700925 f = final_handler;
926 /* Simplest case, just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000927 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700928 final_len = p - tlb_handler;
Ralf Baechle875d43e2005-09-03 15:56:16 -0700929#else /* CONFIG_64BIT */
David Daneye6f72d32009-05-20 11:40:58 -0700930 f = final_handler + MIPS64_REFILL_INSNS;
931 if ((p - tlb_handler) <= MIPS64_REFILL_INSNS) {
Linus Torvalds1da177e2005-04-16 15:20:36 -0700932 /* Just copy the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000933 uasm_copy_handler(relocs, labels, tlb_handler, p, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700934 final_len = p - tlb_handler;
935 } else {
David Daneyfd062c82009-05-27 17:47:44 -0700936#if defined(CONFIG_HUGETLB_PAGE)
937 const enum label_id ls = label_tlb_huge_update;
David Daney95affdd2009-05-20 11:40:59 -0700938#else
939 const enum label_id ls = label_vmalloc;
940#endif
941 u32 *split;
942 int ov = 0;
943 int i;
944
945 for (i = 0; i < ARRAY_SIZE(labels) && labels[i].lab != ls; i++)
946 ;
947 BUG_ON(i == ARRAY_SIZE(labels));
948 split = labels[i].addr;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700949
950 /*
David Daney95affdd2009-05-20 11:40:59 -0700951 * See if we have overflown one way or the other.
Linus Torvalds1da177e2005-04-16 15:20:36 -0700952 */
David Daney95affdd2009-05-20 11:40:59 -0700953 if (split > tlb_handler + MIPS64_REFILL_INSNS ||
954 split < p - MIPS64_REFILL_INSNS)
955 ov = 1;
Linus Torvalds1da177e2005-04-16 15:20:36 -0700956
David Daney95affdd2009-05-20 11:40:59 -0700957 if (ov) {
958 /*
959 * Split two instructions before the end. One
960 * for the branch and one for the instruction
961 * in the delay slot.
962 */
963 split = tlb_handler + MIPS64_REFILL_INSNS - 2;
964
965 /*
966 * If the branch would fall in a delay slot,
967 * we must back up an additional instruction
968 * so that it is no longer in a delay slot.
969 */
970 if (uasm_insn_has_bdelay(relocs, split - 1))
971 split--;
972 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700973 /* Copy first part of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000974 uasm_copy_handler(relocs, labels, tlb_handler, split, f);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700975 f += split - tlb_handler;
976
David Daney95affdd2009-05-20 11:40:59 -0700977 if (ov) {
978 /* Insert branch. */
979 uasm_l_split(&l, final_handler);
980 uasm_il_b(&f, &r, label_split);
981 if (uasm_insn_has_bdelay(relocs, split))
982 uasm_i_nop(&f);
983 else {
984 uasm_copy_handler(relocs, labels,
985 split, split + 1, f);
986 uasm_move_labels(labels, f, f + 1, -1);
987 f++;
988 split++;
989 }
Linus Torvalds1da177e2005-04-16 15:20:36 -0700990 }
991
992 /* Copy the rest of the handler. */
Thiemo Seufere30ec452008-01-28 20:05:38 +0000993 uasm_copy_handler(relocs, labels, split, p, final_handler);
David Daneye6f72d32009-05-20 11:40:58 -0700994 final_len = (f - (final_handler + MIPS64_REFILL_INSNS)) +
995 (p - split);
Linus Torvalds1da177e2005-04-16 15:20:36 -0700996 }
Ralf Baechle875d43e2005-09-03 15:56:16 -0700997#endif /* CONFIG_64BIT */
Linus Torvalds1da177e2005-04-16 15:20:36 -0700998
Thiemo Seufere30ec452008-01-28 20:05:38 +0000999 uasm_resolve_relocs(relocs, labels);
1000 pr_debug("Wrote TLB refill handler (%u instructions).\n",
1001 final_len);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001002
Ralf Baechle91b05e62006-03-29 18:53:00 +01001003 memcpy((void *)ebase, final_handler, 0x100);
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001004
1005 dump_handler((u32 *)ebase, 64);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001006}
1007
1008/*
Linus Torvalds1da177e2005-04-16 15:20:36 -07001009 * 128 instructions for the fastpath handler is generous and should
1010 * never be exceeded.
1011 */
1012#define FASTPATH_SIZE 128
1013
Franck Bui-Huucbdbe072007-10-18 09:11:16 +02001014u32 handle_tlbl[FASTPATH_SIZE] __cacheline_aligned;
1015u32 handle_tlbs[FASTPATH_SIZE] __cacheline_aligned;
1016u32 handle_tlbm[FASTPATH_SIZE] __cacheline_aligned;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001017
Ralf Baechle234fcd12008-03-08 09:56:28 +00001018static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001019iPTE_LW(u32 **p, unsigned int pte, unsigned int ptr)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001020{
1021#ifdef CONFIG_SMP
1022# ifdef CONFIG_64BIT_PHYS_ADDR
1023 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001024 uasm_i_lld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001025 else
1026# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001027 UASM_i_LL(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001028#else
1029# ifdef CONFIG_64BIT_PHYS_ADDR
1030 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001031 uasm_i_ld(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001032 else
1033# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001034 UASM_i_LW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001035#endif
1036}
1037
Ralf Baechle234fcd12008-03-08 09:56:28 +00001038static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001039iPTE_SW(u32 **p, struct uasm_reloc **r, unsigned int pte, unsigned int ptr,
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001040 unsigned int mode)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001041{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001042#ifdef CONFIG_64BIT_PHYS_ADDR
1043 unsigned int hwmode = mode & (_PAGE_VALID | _PAGE_DIRTY);
1044#endif
1045
Thiemo Seufere30ec452008-01-28 20:05:38 +00001046 uasm_i_ori(p, pte, pte, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001047#ifdef CONFIG_SMP
1048# ifdef CONFIG_64BIT_PHYS_ADDR
1049 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001050 uasm_i_scd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001051 else
1052# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001053 UASM_i_SC(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001054
1055 if (r10000_llsc_war())
Thiemo Seufere30ec452008-01-28 20:05:38 +00001056 uasm_il_beqzl(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001057 else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001058 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001059
1060# ifdef CONFIG_64BIT_PHYS_ADDR
1061 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001062 /* no uasm_i_nop needed */
1063 uasm_i_ll(p, pte, sizeof(pte_t) / 2, ptr);
1064 uasm_i_ori(p, pte, pte, hwmode);
1065 uasm_i_sc(p, pte, sizeof(pte_t) / 2, ptr);
1066 uasm_il_beqz(p, r, pte, label_smp_pgtable_change);
1067 /* no uasm_i_nop needed */
1068 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001069 } else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001070 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001071# else
Thiemo Seufere30ec452008-01-28 20:05:38 +00001072 uasm_i_nop(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001073# endif
1074#else
1075# ifdef CONFIG_64BIT_PHYS_ADDR
1076 if (cpu_has_64bits)
Thiemo Seufere30ec452008-01-28 20:05:38 +00001077 uasm_i_sd(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001078 else
1079# endif
Thiemo Seufere30ec452008-01-28 20:05:38 +00001080 UASM_i_SW(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001081
1082# ifdef CONFIG_64BIT_PHYS_ADDR
1083 if (!cpu_has_64bits) {
Thiemo Seufere30ec452008-01-28 20:05:38 +00001084 uasm_i_lw(p, pte, sizeof(pte_t) / 2, ptr);
1085 uasm_i_ori(p, pte, pte, hwmode);
1086 uasm_i_sw(p, pte, sizeof(pte_t) / 2, ptr);
1087 uasm_i_lw(p, pte, 0, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001088 }
1089# endif
1090#endif
1091}
1092
1093/*
1094 * Check if PTE is present, if not then jump to LABEL. PTR points to
1095 * the page table where this PTE is located, PTE will be re-loaded
1096 * with it's original value.
1097 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001098static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001099build_pte_present(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001100 unsigned int pte, unsigned int ptr, enum label_id lid)
1101{
David Daney6dd93442010-02-10 15:12:47 -08001102 if (kernel_uses_smartmips_rixi) {
1103 uasm_i_andi(p, pte, pte, _PAGE_PRESENT);
1104 uasm_il_beqz(p, r, pte, lid);
1105 } else {
1106 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1107 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_READ);
1108 uasm_il_bnez(p, r, pte, lid);
1109 }
David Daneybd1437e2009-05-08 15:10:50 -07001110 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001111}
1112
1113/* Make PTE valid, store result in PTR. */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001114static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001115build_make_valid(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001116 unsigned int ptr)
1117{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001118 unsigned int mode = _PAGE_VALID | _PAGE_ACCESSED;
1119
1120 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001121}
1122
1123/*
1124 * Check if PTE can be written to, if not branch to LABEL. Regardless
1125 * restore PTE with value from PTR when done.
1126 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001127static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001128build_pte_writable(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001129 unsigned int pte, unsigned int ptr, enum label_id lid)
1130{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001131 uasm_i_andi(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1132 uasm_i_xori(p, pte, pte, _PAGE_PRESENT | _PAGE_WRITE);
1133 uasm_il_bnez(p, r, pte, lid);
David Daneybd1437e2009-05-08 15:10:50 -07001134 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001135}
1136
1137/* Make PTE writable, update software status bits as well, then store
1138 * at PTR.
1139 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001140static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001141build_make_write(u32 **p, struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001142 unsigned int ptr)
1143{
Thiemo Seufer63b2d2f2005-04-28 08:52:57 +00001144 unsigned int mode = (_PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID
1145 | _PAGE_DIRTY);
1146
1147 iPTE_SW(p, r, pte, ptr, mode);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001148}
1149
1150/*
1151 * Check if PTE can be modified, if not branch to LABEL. Regardless
1152 * restore PTE with value from PTR when done.
1153 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001154static void __cpuinit
David Daneybd1437e2009-05-08 15:10:50 -07001155build_pte_modifiable(u32 **p, struct uasm_reloc **r,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001156 unsigned int pte, unsigned int ptr, enum label_id lid)
1157{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001158 uasm_i_andi(p, pte, pte, _PAGE_WRITE);
1159 uasm_il_beqz(p, r, pte, lid);
David Daneybd1437e2009-05-08 15:10:50 -07001160 iPTE_LW(p, pte, ptr);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001161}
1162
David Daney826222842009-10-14 12:16:56 -07001163#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001164/*
1165 * R3000 style TLB load/store/modify handlers.
1166 */
1167
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001168/*
1169 * This places the pte into ENTRYLO0 and writes it with tlbwi.
1170 * Then it returns.
1171 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001172static void __cpuinit
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001173build_r3000_pte_reload_tlbwi(u32 **p, unsigned int pte, unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001174{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001175 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1176 uasm_i_mfc0(p, tmp, C0_EPC); /* cp0 delay */
1177 uasm_i_tlbwi(p);
1178 uasm_i_jr(p, tmp);
1179 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001180}
1181
1182/*
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001183 * This places the pte into ENTRYLO0 and writes it with tlbwi
1184 * or tlbwr as appropriate. This is because the index register
1185 * may have the probe fail bit set as a result of a trap on a
1186 * kseg2 access, i.e. without refill. Then it returns.
Linus Torvalds1da177e2005-04-16 15:20:36 -07001187 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001188static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001189build_r3000_tlb_reload_write(u32 **p, struct uasm_label **l,
1190 struct uasm_reloc **r, unsigned int pte,
1191 unsigned int tmp)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001192{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001193 uasm_i_mfc0(p, tmp, C0_INDEX);
1194 uasm_i_mtc0(p, pte, C0_ENTRYLO0); /* cp0 delay */
1195 uasm_il_bltz(p, r, tmp, label_r3000_write_probe_fail); /* cp0 delay */
1196 uasm_i_mfc0(p, tmp, C0_EPC); /* branch delay */
1197 uasm_i_tlbwi(p); /* cp0 delay */
1198 uasm_i_jr(p, tmp);
1199 uasm_i_rfe(p); /* branch delay */
1200 uasm_l_r3000_write_probe_fail(l, *p);
1201 uasm_i_tlbwr(p); /* cp0 delay */
1202 uasm_i_jr(p, tmp);
1203 uasm_i_rfe(p); /* branch delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001204}
1205
Ralf Baechle234fcd12008-03-08 09:56:28 +00001206static void __cpuinit
Linus Torvalds1da177e2005-04-16 15:20:36 -07001207build_r3000_tlbchange_handler_head(u32 **p, unsigned int pte,
1208 unsigned int ptr)
1209{
1210 long pgdc = (long)pgd_current;
1211
Thiemo Seufere30ec452008-01-28 20:05:38 +00001212 uasm_i_mfc0(p, pte, C0_BADVADDR);
1213 uasm_i_lui(p, ptr, uasm_rel_hi(pgdc)); /* cp0 delay */
1214 uasm_i_lw(p, ptr, uasm_rel_lo(pgdc), ptr);
1215 uasm_i_srl(p, pte, pte, 22); /* load delay */
1216 uasm_i_sll(p, pte, pte, 2);
1217 uasm_i_addu(p, ptr, ptr, pte);
1218 uasm_i_mfc0(p, pte, C0_CONTEXT);
1219 uasm_i_lw(p, ptr, 0, ptr); /* cp0 delay */
1220 uasm_i_andi(p, pte, pte, 0xffc); /* load delay */
1221 uasm_i_addu(p, ptr, ptr, pte);
1222 uasm_i_lw(p, pte, 0, ptr);
1223 uasm_i_tlbp(p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001224}
1225
Ralf Baechle234fcd12008-03-08 09:56:28 +00001226static void __cpuinit build_r3000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001227{
1228 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001229 struct uasm_label *l = labels;
1230 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001231
1232 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1233 memset(labels, 0, sizeof(labels));
1234 memset(relocs, 0, sizeof(relocs));
1235
1236 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001237 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001238 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001239 build_make_valid(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001240 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001241
Thiemo Seufere30ec452008-01-28 20:05:38 +00001242 uasm_l_nopage_tlbl(&l, p);
1243 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1244 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001245
1246 if ((p - handle_tlbl) > FASTPATH_SIZE)
1247 panic("TLB load handler fastpath space exceeded");
1248
Thiemo Seufere30ec452008-01-28 20:05:38 +00001249 uasm_resolve_relocs(relocs, labels);
1250 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1251 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001252
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001253 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001254}
1255
Ralf Baechle234fcd12008-03-08 09:56:28 +00001256static void __cpuinit build_r3000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001257{
1258 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001259 struct uasm_label *l = labels;
1260 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001261
1262 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1263 memset(labels, 0, sizeof(labels));
1264 memset(relocs, 0, sizeof(relocs));
1265
1266 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001267 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001268 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001269 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001270 build_r3000_tlb_reload_write(&p, &l, &r, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001271
Thiemo Seufere30ec452008-01-28 20:05:38 +00001272 uasm_l_nopage_tlbs(&l, p);
1273 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1274 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001275
1276 if ((p - handle_tlbs) > FASTPATH_SIZE)
1277 panic("TLB store handler fastpath space exceeded");
1278
Thiemo Seufere30ec452008-01-28 20:05:38 +00001279 uasm_resolve_relocs(relocs, labels);
1280 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1281 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001282
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001283 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001284}
1285
Ralf Baechle234fcd12008-03-08 09:56:28 +00001286static void __cpuinit build_r3000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001287{
1288 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001289 struct uasm_label *l = labels;
1290 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001291
1292 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1293 memset(labels, 0, sizeof(labels));
1294 memset(relocs, 0, sizeof(relocs));
1295
1296 build_r3000_tlbchange_handler_head(&p, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001297 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001298 uasm_i_nop(&p); /* load delay */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001299 build_make_write(&p, &r, K0, K1);
Maciej W. Rozyckifded2e52005-06-13 20:24:00 +00001300 build_r3000_pte_reload_tlbwi(&p, K0, K1);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001301
Thiemo Seufere30ec452008-01-28 20:05:38 +00001302 uasm_l_nopage_tlbm(&l, p);
1303 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1304 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001305
1306 if ((p - handle_tlbm) > FASTPATH_SIZE)
1307 panic("TLB modify handler fastpath space exceeded");
1308
Thiemo Seufere30ec452008-01-28 20:05:38 +00001309 uasm_resolve_relocs(relocs, labels);
1310 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1311 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001312
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001313 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001314}
David Daney826222842009-10-14 12:16:56 -07001315#endif /* CONFIG_MIPS_PGD_C0_CONTEXT */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001316
1317/*
1318 * R4000 style TLB load/store/modify handlers.
1319 */
Ralf Baechle234fcd12008-03-08 09:56:28 +00001320static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001321build_r4000_tlbchange_handler_head(u32 **p, struct uasm_label **l,
1322 struct uasm_reloc **r, unsigned int pte,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001323 unsigned int ptr)
1324{
Ralf Baechle875d43e2005-09-03 15:56:16 -07001325#ifdef CONFIG_64BIT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001326 build_get_pmde64(p, l, r, pte, ptr); /* get pmd in ptr */
1327#else
1328 build_get_pgde32(p, pte, ptr); /* get pgd in ptr */
1329#endif
1330
David Daneyfd062c82009-05-27 17:47:44 -07001331#ifdef CONFIG_HUGETLB_PAGE
1332 /*
1333 * For huge tlb entries, pmd doesn't contain an address but
1334 * instead contains the tlb pte. Check the PAGE_HUGE bit and
1335 * see if we need to jump to huge tlb processing.
1336 */
1337 build_is_huge_pte(p, r, pte, ptr, label_tlb_huge_update);
1338#endif
1339
Thiemo Seufere30ec452008-01-28 20:05:38 +00001340 UASM_i_MFC0(p, pte, C0_BADVADDR);
1341 UASM_i_LW(p, ptr, 0, ptr);
1342 UASM_i_SRL(p, pte, pte, PAGE_SHIFT + PTE_ORDER - PTE_T_LOG2);
1343 uasm_i_andi(p, pte, pte, (PTRS_PER_PTE - 1) << PTE_T_LOG2);
1344 UASM_i_ADDU(p, ptr, ptr, pte);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001345
1346#ifdef CONFIG_SMP
Thiemo Seufere30ec452008-01-28 20:05:38 +00001347 uasm_l_smp_pgtable_change(l, *p);
1348#endif
David Daneybd1437e2009-05-08 15:10:50 -07001349 iPTE_LW(p, pte, ptr); /* get even pte */
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001350 if (!m4kc_tlbp_war())
1351 build_tlb_probe_entry(p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001352}
1353
Ralf Baechle234fcd12008-03-08 09:56:28 +00001354static void __cpuinit
Thiemo Seufere30ec452008-01-28 20:05:38 +00001355build_r4000_tlbchange_handler_tail(u32 **p, struct uasm_label **l,
1356 struct uasm_reloc **r, unsigned int tmp,
Linus Torvalds1da177e2005-04-16 15:20:36 -07001357 unsigned int ptr)
1358{
Thiemo Seufere30ec452008-01-28 20:05:38 +00001359 uasm_i_ori(p, ptr, ptr, sizeof(pte_t));
1360 uasm_i_xori(p, ptr, ptr, sizeof(pte_t));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001361 build_update_entries(p, tmp, ptr);
1362 build_tlb_write_entry(p, l, r, tlb_indexed);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001363 uasm_l_leave(l, *p);
1364 uasm_i_eret(p); /* return from trap */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001365
Ralf Baechle875d43e2005-09-03 15:56:16 -07001366#ifdef CONFIG_64BIT
David Daney1ec56322010-04-28 12:16:18 -07001367 build_get_pgd_vmalloc64(p, l, r, tmp, ptr, not_refill);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001368#endif
1369}
1370
Ralf Baechle234fcd12008-03-08 09:56:28 +00001371static void __cpuinit build_r4000_tlb_load_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001372{
1373 u32 *p = handle_tlbl;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001374 struct uasm_label *l = labels;
1375 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001376
1377 memset(handle_tlbl, 0, sizeof(handle_tlbl));
1378 memset(labels, 0, sizeof(labels));
1379 memset(relocs, 0, sizeof(relocs));
1380
1381 if (bcm1250_m3_war()) {
Ralf Baechle3d452852010-03-23 17:56:38 +01001382 unsigned int segbits = 44;
1383
1384 uasm_i_dmfc0(&p, K0, C0_BADVADDR);
1385 uasm_i_dmfc0(&p, K1, C0_ENTRYHI);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001386 uasm_i_xor(&p, K0, K0, K1);
David Daney3be60222010-04-28 12:16:17 -07001387 uasm_i_dsrl_safe(&p, K1, K0, 62);
1388 uasm_i_dsrl_safe(&p, K0, K0, 12 + 1);
1389 uasm_i_dsll_safe(&p, K0, K0, 64 + 12 + 1 - segbits);
Ralf Baechle3d452852010-03-23 17:56:38 +01001390 uasm_i_or(&p, K0, K0, K1);
Thiemo Seufere30ec452008-01-28 20:05:38 +00001391 uasm_il_bnez(&p, &r, K0, label_leave);
1392 /* No need for uasm_i_nop */
Linus Torvalds1da177e2005-04-16 15:20:36 -07001393 }
1394
1395 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001396 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001397 if (m4kc_tlbp_war())
1398 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001399
1400 if (kernel_uses_smartmips_rixi) {
1401 /*
1402 * If the page is not _PAGE_VALID, RI or XI could not
1403 * have triggered it. Skip the expensive test..
1404 */
1405 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1406 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround1);
1407 uasm_i_nop(&p);
1408
1409 uasm_i_tlbr(&p);
1410 /* Examine entrylo 0 or 1 based on ptr. */
1411 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1412 uasm_i_beqz(&p, K0, 8);
1413
1414 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1415 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1416 /*
1417 * If the entryLo (now in K0) is valid (bit 1), RI or
1418 * XI must have triggered it.
1419 */
1420 uasm_i_andi(&p, K0, K0, 2);
1421 uasm_il_bnez(&p, &r, K0, label_nopage_tlbl);
1422
1423 uasm_l_tlbl_goaround1(&l, p);
1424 /* Reload the PTE value */
1425 iPTE_LW(&p, K0, K1);
1426 }
Linus Torvalds1da177e2005-04-16 15:20:36 -07001427 build_make_valid(&p, &r, K0, K1);
1428 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1429
David Daneyfd062c82009-05-27 17:47:44 -07001430#ifdef CONFIG_HUGETLB_PAGE
1431 /*
1432 * This is the entry point when build_r4000_tlbchange_handler_head
1433 * spots a huge page.
1434 */
1435 uasm_l_tlb_huge_update(&l, p);
1436 iPTE_LW(&p, K0, K1);
1437 build_pte_present(&p, &r, K0, K1, label_nopage_tlbl);
1438 build_tlb_probe_entry(&p);
David Daney6dd93442010-02-10 15:12:47 -08001439
1440 if (kernel_uses_smartmips_rixi) {
1441 /*
1442 * If the page is not _PAGE_VALID, RI or XI could not
1443 * have triggered it. Skip the expensive test..
1444 */
1445 uasm_i_andi(&p, K0, K0, _PAGE_VALID);
1446 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1447 uasm_i_nop(&p);
1448
1449 uasm_i_tlbr(&p);
1450 /* Examine entrylo 0 or 1 based on ptr. */
1451 uasm_i_andi(&p, K0, K1, sizeof(pte_t));
1452 uasm_i_beqz(&p, K0, 8);
1453
1454 UASM_i_MFC0(&p, K0, C0_ENTRYLO0); /* load it in the delay slot*/
1455 UASM_i_MFC0(&p, K0, C0_ENTRYLO1); /* load it if ptr is odd */
1456 /*
1457 * If the entryLo (now in K0) is valid (bit 1), RI or
1458 * XI must have triggered it.
1459 */
1460 uasm_i_andi(&p, K0, K0, 2);
1461 uasm_il_beqz(&p, &r, K0, label_tlbl_goaround2);
1462 /* Reload the PTE value */
1463 iPTE_LW(&p, K0, K1);
1464
1465 /*
1466 * We clobbered C0_PAGEMASK, restore it. On the other branch
1467 * it is restored in build_huge_tlb_write_entry.
1468 */
1469 build_restore_pagemask(&p, &r, K0, label_nopage_tlbl);
1470
1471 uasm_l_tlbl_goaround2(&l, p);
1472 }
David Daneyfd062c82009-05-27 17:47:44 -07001473 uasm_i_ori(&p, K0, K0, (_PAGE_ACCESSED | _PAGE_VALID));
1474 build_huge_handler_tail(&p, &r, &l, K0, K1);
1475#endif
1476
Thiemo Seufere30ec452008-01-28 20:05:38 +00001477 uasm_l_nopage_tlbl(&l, p);
1478 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_0 & 0x0fffffff);
1479 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001480
1481 if ((p - handle_tlbl) > FASTPATH_SIZE)
1482 panic("TLB load handler fastpath space exceeded");
1483
Thiemo Seufere30ec452008-01-28 20:05:38 +00001484 uasm_resolve_relocs(relocs, labels);
1485 pr_debug("Wrote TLB load handler fastpath (%u instructions).\n",
1486 (unsigned int)(p - handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001487
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001488 dump_handler(handle_tlbl, ARRAY_SIZE(handle_tlbl));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001489}
1490
Ralf Baechle234fcd12008-03-08 09:56:28 +00001491static void __cpuinit build_r4000_tlb_store_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001492{
1493 u32 *p = handle_tlbs;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001494 struct uasm_label *l = labels;
1495 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001496
1497 memset(handle_tlbs, 0, sizeof(handle_tlbs));
1498 memset(labels, 0, sizeof(labels));
1499 memset(relocs, 0, sizeof(relocs));
1500
1501 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001502 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001503 if (m4kc_tlbp_war())
1504 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001505 build_make_write(&p, &r, K0, K1);
1506 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1507
David Daneyfd062c82009-05-27 17:47:44 -07001508#ifdef CONFIG_HUGETLB_PAGE
1509 /*
1510 * This is the entry point when
1511 * build_r4000_tlbchange_handler_head spots a huge page.
1512 */
1513 uasm_l_tlb_huge_update(&l, p);
1514 iPTE_LW(&p, K0, K1);
1515 build_pte_writable(&p, &r, K0, K1, label_nopage_tlbs);
1516 build_tlb_probe_entry(&p);
1517 uasm_i_ori(&p, K0, K0,
1518 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1519 build_huge_handler_tail(&p, &r, &l, K0, K1);
1520#endif
1521
Thiemo Seufere30ec452008-01-28 20:05:38 +00001522 uasm_l_nopage_tlbs(&l, p);
1523 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1524 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001525
1526 if ((p - handle_tlbs) > FASTPATH_SIZE)
1527 panic("TLB store handler fastpath space exceeded");
1528
Thiemo Seufere30ec452008-01-28 20:05:38 +00001529 uasm_resolve_relocs(relocs, labels);
1530 pr_debug("Wrote TLB store handler fastpath (%u instructions).\n",
1531 (unsigned int)(p - handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001532
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001533 dump_handler(handle_tlbs, ARRAY_SIZE(handle_tlbs));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001534}
1535
Ralf Baechle234fcd12008-03-08 09:56:28 +00001536static void __cpuinit build_r4000_tlb_modify_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001537{
1538 u32 *p = handle_tlbm;
Thiemo Seufere30ec452008-01-28 20:05:38 +00001539 struct uasm_label *l = labels;
1540 struct uasm_reloc *r = relocs;
Linus Torvalds1da177e2005-04-16 15:20:36 -07001541
1542 memset(handle_tlbm, 0, sizeof(handle_tlbm));
1543 memset(labels, 0, sizeof(labels));
1544 memset(relocs, 0, sizeof(relocs));
1545
1546 build_r4000_tlbchange_handler_head(&p, &l, &r, K0, K1);
David Daneybd1437e2009-05-08 15:10:50 -07001547 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
Maciej W. Rozycki8df5bea2006-08-23 14:26:50 +01001548 if (m4kc_tlbp_war())
1549 build_tlb_probe_entry(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001550 /* Present and writable bits set, set accessed and dirty bits. */
1551 build_make_write(&p, &r, K0, K1);
1552 build_r4000_tlbchange_handler_tail(&p, &l, &r, K0, K1);
1553
David Daneyfd062c82009-05-27 17:47:44 -07001554#ifdef CONFIG_HUGETLB_PAGE
1555 /*
1556 * This is the entry point when
1557 * build_r4000_tlbchange_handler_head spots a huge page.
1558 */
1559 uasm_l_tlb_huge_update(&l, p);
1560 iPTE_LW(&p, K0, K1);
1561 build_pte_modifiable(&p, &r, K0, K1, label_nopage_tlbm);
1562 build_tlb_probe_entry(&p);
1563 uasm_i_ori(&p, K0, K0,
1564 _PAGE_ACCESSED | _PAGE_MODIFIED | _PAGE_VALID | _PAGE_DIRTY);
1565 build_huge_handler_tail(&p, &r, &l, K0, K1);
1566#endif
1567
Thiemo Seufere30ec452008-01-28 20:05:38 +00001568 uasm_l_nopage_tlbm(&l, p);
1569 uasm_i_j(&p, (unsigned long)tlb_do_page_fault_1 & 0x0fffffff);
1570 uasm_i_nop(&p);
Linus Torvalds1da177e2005-04-16 15:20:36 -07001571
1572 if ((p - handle_tlbm) > FASTPATH_SIZE)
1573 panic("TLB modify handler fastpath space exceeded");
1574
Thiemo Seufere30ec452008-01-28 20:05:38 +00001575 uasm_resolve_relocs(relocs, labels);
1576 pr_debug("Wrote TLB modify handler fastpath (%u instructions).\n",
1577 (unsigned int)(p - handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001578
Franck Bui-Huu92b1e6a2007-10-18 09:11:17 +02001579 dump_handler(handle_tlbm, ARRAY_SIZE(handle_tlbm));
Linus Torvalds1da177e2005-04-16 15:20:36 -07001580}
1581
Ralf Baechle234fcd12008-03-08 09:56:28 +00001582void __cpuinit build_tlb_refill_handler(void)
Linus Torvalds1da177e2005-04-16 15:20:36 -07001583{
1584 /*
1585 * The refill handler is generated per-CPU, multi-node systems
1586 * may have local storage for it. The other handlers are only
1587 * needed once.
1588 */
1589 static int run_once = 0;
1590
David Daney1ec56322010-04-28 12:16:18 -07001591#ifdef CONFIG_64BIT
1592 check_for_high_segbits = current_cpu_data.vmbits > (PGDIR_SHIFT + PGD_ORDER + PAGE_SHIFT - 3);
1593#endif
1594
Ralf Baechle10cc3522007-10-11 23:46:15 +01001595 switch (current_cpu_type()) {
Linus Torvalds1da177e2005-04-16 15:20:36 -07001596 case CPU_R2000:
1597 case CPU_R3000:
1598 case CPU_R3000A:
1599 case CPU_R3081E:
1600 case CPU_TX3912:
1601 case CPU_TX3922:
1602 case CPU_TX3927:
David Daney826222842009-10-14 12:16:56 -07001603#ifndef CONFIG_MIPS_PGD_C0_CONTEXT
Linus Torvalds1da177e2005-04-16 15:20:36 -07001604 build_r3000_tlb_refill_handler();
1605 if (!run_once) {
1606 build_r3000_tlb_load_handler();
1607 build_r3000_tlb_store_handler();
1608 build_r3000_tlb_modify_handler();
1609 run_once++;
1610 }
David Daney826222842009-10-14 12:16:56 -07001611#else
1612 panic("No R3000 TLB refill handler");
1613#endif
Linus Torvalds1da177e2005-04-16 15:20:36 -07001614 break;
1615
1616 case CPU_R6000:
1617 case CPU_R6000A:
1618 panic("No R6000 TLB refill handler yet");
1619 break;
1620
1621 case CPU_R8000:
1622 panic("No R8000 TLB refill handler yet");
1623 break;
1624
1625 default:
1626 build_r4000_tlb_refill_handler();
1627 if (!run_once) {
1628 build_r4000_tlb_load_handler();
1629 build_r4000_tlb_store_handler();
1630 build_r4000_tlb_modify_handler();
1631 run_once++;
1632 }
1633 }
1634}
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001635
Ralf Baechle234fcd12008-03-08 09:56:28 +00001636void __cpuinit flush_tlb_handlers(void)
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001637{
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001638 local_flush_icache_range((unsigned long)handle_tlbl,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001639 (unsigned long)handle_tlbl + sizeof(handle_tlbl));
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001640 local_flush_icache_range((unsigned long)handle_tlbs,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001641 (unsigned long)handle_tlbs + sizeof(handle_tlbs));
Thomas Bogendoerfere0cee3e2008-08-04 20:53:57 +02001642 local_flush_icache_range((unsigned long)handle_tlbm,
Ralf Baechle1d40cfc2005-07-15 15:23:23 +00001643 (unsigned long)handle_tlbm + sizeof(handle_tlbm));
1644}