blob: 04da674683783ebce980973413f3903016d568da [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* tlb-miss.S: TLB miss handlers
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
16#include <asm/highmem.h>
17#include <asm/spr-regs.h>
18
19 .section .text
20 .balign 4
21
22 .globl __entry_insn_mmu_miss
23__entry_insn_mmu_miss:
24 break
25 nop
26
27 .globl __entry_insn_mmu_exception
28__entry_insn_mmu_exception:
29 break
30 nop
31
32 .globl __entry_data_mmu_miss
33__entry_data_mmu_miss:
34 break
35 nop
36
37 .globl __entry_data_mmu_exception
38__entry_data_mmu_exception:
39 break
40 nop
41
42###############################################################################
43#
44# handle a lookup failure of one sort or another in a kernel TLB handler
45# On entry:
46# GR29 - faulting address
47# SCR2 - saved CCR
48#
49###############################################################################
50 .type __tlb_kernel_fault,@function
51__tlb_kernel_fault:
52 # see if we're supposed to re-enable single-step mode upon return
53 sethi.p %hi(__break_tlb_miss_return_break),gr30
54 setlo %lo(__break_tlb_miss_return_break),gr30
55 movsg pcsr,gr31
56
57 subcc gr31,gr30,gr0,icc0
58 beq icc0,#0,__tlb_kernel_fault_sstep
59
60 movsg scr2,gr30
61 movgs gr30,ccr
62 movgs gr29,scr2 /* save EAR0 value */
63 sethi.p %hi(__kernel_current_task),gr29
64 setlo %lo(__kernel_current_task),gr29
65 ldi.p @(gr29,#0),gr29 /* restore GR29 */
66
67 bra __entry_kernel_handle_mmu_fault
68
69 # we've got to re-enable single-stepping
70__tlb_kernel_fault_sstep:
71 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
72 setlo %lo(__break_tlb_miss_real_return_info),gr30
73 lddi @(gr30,0),gr30
74 movgs gr30,pcsr
75 movgs gr31,psr
76
77 movsg scr2,gr30
78 movgs gr30,ccr
79 movgs gr29,scr2 /* save EAR0 value */
80 sethi.p %hi(__kernel_current_task),gr29
81 setlo %lo(__kernel_current_task),gr29
82 ldi.p @(gr29,#0),gr29 /* restore GR29 */
83 bra __entry_kernel_handle_mmu_fault_sstep
84
85 .size __tlb_kernel_fault, .-__tlb_kernel_fault
86
87###############################################################################
88#
89# handle a lookup failure of one sort or another in a user TLB handler
90# On entry:
91# GR28 - faulting address
92# SCR2 - saved CCR
93#
94###############################################################################
95 .type __tlb_user_fault,@function
96__tlb_user_fault:
97 # see if we're supposed to re-enable single-step mode upon return
98 sethi.p %hi(__break_tlb_miss_return_break),gr30
99 setlo %lo(__break_tlb_miss_return_break),gr30
100 movsg pcsr,gr31
101 subcc gr31,gr30,gr0,icc0
102 beq icc0,#0,__tlb_user_fault_sstep
103
104 movsg scr2,gr30
105 movgs gr30,ccr
106 bra __entry_uspace_handle_mmu_fault
107
108 # we've got to re-enable single-stepping
109__tlb_user_fault_sstep:
110 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
111 setlo %lo(__break_tlb_miss_real_return_info),gr30
112 lddi @(gr30,0),gr30
113 movgs gr30,pcsr
114 movgs gr31,psr
115 movsg scr2,gr30
116 movgs gr30,ccr
117 bra __entry_uspace_handle_mmu_fault_sstep
118
119 .size __tlb_user_fault, .-__tlb_user_fault
120
121###############################################################################
122#
123# Kernel instruction TLB miss handler
124# On entry:
125# GR1 - kernel stack pointer
126# GR28 - saved exception frame pointer
127# GR29 - faulting address
128# GR31 - EAR0 ^ SCR0
129# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
130# DAMR3 - mapped page directory
131# DAMR4 - mapped page table as matched by SCR0
132#
133###############################################################################
134 .globl __entry_kernel_insn_tlb_miss
135 .type __entry_kernel_insn_tlb_miss,@function
136__entry_kernel_insn_tlb_miss:
137#if 0
138 sethi.p %hi(0xe1200004),gr30
139 setlo %lo(0xe1200004),gr30
140 st gr0,@(gr30,gr0)
141 sethi.p %hi(0xffc00100),gr30
142 setlo %lo(0xffc00100),gr30
143 sth gr30,@(gr30,gr0)
144 membar
145#endif
146
147 movsg ccr,gr30 /* save CCR */
148 movgs gr30,scr2
149
150 # see if the cached page table mapping is appropriate
151 srlicc.p gr31,#26,gr0,icc0
152 setlos 0x3ffc,gr30
153 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
154 bne icc0,#0,__itlb_k_PTD_miss
155
156__itlb_k_PTD_mapped:
157 # access the PTD with EAR0[25:14]
158 # - DAMLR4 points to the virtual address of the appropriate page table
159 # - the PTD holds 4096 PTEs
160 # - the PTD must be accessed uncached
161 # - the PTE must be marked accessed if it was valid
162 #
163 and gr31,gr30,gr31
164 movsg damlr4,gr30
165 add gr30,gr31,gr31
166 ldi @(gr31,#0),gr30 /* fetch the PTE */
167 andicc gr30,#_PAGE_PRESENT,gr0,icc0
168 ori.p gr30,#_PAGE_ACCESSED,gr30
169 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
170 sti.p gr30,@(gr31,#0) /* update the PTE */
171 andi gr30,#~_PAGE_ACCESSED,gr30
172
173 # we're using IAMR1 as an extra TLB entry
174 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
175 # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
176 # - IAMPR1 has no WP bit, and we mustn't lose WP information
177 movsg iampr1,gr31
178 andicc gr31,#xAMPRx_V,gr0,icc0
179 setlos.p 0xfffff000,gr31
180 beq icc0,#0,__itlb_k_nopunt /* punt not required */
181
182 movsg iamlr1,gr31
183 movgs gr31,tplr /* set TPLR.CXN */
184 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
185
186 movsg dampr1,gr31
187 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
188 movgs gr31,tppr
189 movsg iamlr1,gr31 /* set TPLR.CXN */
190 movgs gr31,tplr
191 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
192 movsg tpxr,gr31 /* check the TLB write error flag */
193 andicc.p gr31,#TPXR_E,gr0,icc0
194 setlos #0xfffff000,gr31
195 bne icc0,#0,__tlb_kernel_fault
196
197__itlb_k_nopunt:
198
199 # assemble the new TLB entry
200 and gr29,gr31,gr29
201 movsg cxnr,gr31
202 or gr29,gr31,gr29
203 movgs gr29,iamlr1 /* xAMLR = address | context number */
204 movgs gr30,iampr1
205 movgs gr29,damlr1
206 movgs gr30,dampr1
207
208 # return, restoring registers
209 movsg scr2,gr30
210 movgs gr30,ccr
211 sethi.p %hi(__kernel_current_task),gr29
212 setlo %lo(__kernel_current_task),gr29
213 ldi @(gr29,#0),gr29
214 rett #0
215 beq icc0,#3,0 /* prevent icache prefetch */
216
217 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
218 # appropriate page table and map that instead
219 # - access the PGD with EAR0[31:26]
220 # - DAMLR3 points to the virtual address of the page directory
221 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
222__itlb_k_PTD_miss:
223 srli gr29,#26,gr31 /* calculate PGE offset */
224 slli gr31,#8,gr31 /* and clear bottom bits */
225
226 movsg damlr3,gr30
227 ld @(gr31,gr30),gr30 /* access the PGE */
228
229 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
230 andicc gr30,#xAMPRx_SS,gr0,icc1
231
232 # map this PTD instead and record coverage address
233 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
234 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
235 slli.p gr31,#18,gr31
236 bne icc1,#0,__itlb_k_bigpage
237 movgs gr30,dampr4
238 movgs gr31,scr0
239
240 # we can now resume normal service
241 setlos 0x3ffc,gr30
242 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
243 bra __itlb_k_PTD_mapped
244
245__itlb_k_bigpage:
246 break
247 nop
248
249 .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
250
251###############################################################################
252#
253# Kernel data TLB miss handler
254# On entry:
255# GR1 - kernel stack pointer
256# GR28 - saved exception frame pointer
257# GR29 - faulting address
258# GR31 - EAR0 ^ SCR1
259# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
260# DAMR3 - mapped page directory
261# DAMR5 - mapped page table as matched by SCR1
262#
263###############################################################################
264 .globl __entry_kernel_data_tlb_miss
265 .type __entry_kernel_data_tlb_miss,@function
266__entry_kernel_data_tlb_miss:
267#if 0
268 sethi.p %hi(0xe1200004),gr30
269 setlo %lo(0xe1200004),gr30
270 st gr0,@(gr30,gr0)
271 sethi.p %hi(0xffc00100),gr30
272 setlo %lo(0xffc00100),gr30
273 sth gr30,@(gr30,gr0)
274 membar
275#endif
276
277 movsg ccr,gr30 /* save CCR */
278 movgs gr30,scr2
279
280 # see if the cached page table mapping is appropriate
281 srlicc.p gr31,#26,gr0,icc0
282 setlos 0x3ffc,gr30
283 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
284 bne icc0,#0,__dtlb_k_PTD_miss
285
286__dtlb_k_PTD_mapped:
287 # access the PTD with EAR0[25:14]
288 # - DAMLR5 points to the virtual address of the appropriate page table
289 # - the PTD holds 4096 PTEs
290 # - the PTD must be accessed uncached
291 # - the PTE must be marked accessed if it was valid
292 #
293 and gr31,gr30,gr31
294 movsg damlr5,gr30
295 add gr30,gr31,gr31
296 ldi @(gr31,#0),gr30 /* fetch the PTE */
297 andicc gr30,#_PAGE_PRESENT,gr0,icc0
298 ori.p gr30,#_PAGE_ACCESSED,gr30
299 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
300 sti.p gr30,@(gr31,#0) /* update the PTE */
301 andi gr30,#~_PAGE_ACCESSED,gr30
302
303 # we're using DAMR1 as an extra TLB entry
304 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
305 # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
306 movsg dampr1,gr31
307 andicc gr31,#xAMPRx_V,gr0,icc0
308 setlos.p 0xfffff000,gr31
309 beq icc0,#0,__dtlb_k_nopunt /* punt not required */
310
311 movsg damlr1,gr31
312 movgs gr31,tplr /* set TPLR.CXN */
313 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
314
315 movsg dampr1,gr31
316 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
317 movgs gr31,tppr
318 movsg damlr1,gr31 /* set TPLR.CXN */
319 movgs gr31,tplr
320 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
321 movsg tpxr,gr31 /* check the TLB write error flag */
322 andicc.p gr31,#TPXR_E,gr0,icc0
323 setlos #0xfffff000,gr31
324 bne icc0,#0,__tlb_kernel_fault
325
326__dtlb_k_nopunt:
327
328 # assemble the new TLB entry
329 and gr29,gr31,gr29
330 movsg cxnr,gr31
331 or gr29,gr31,gr29
332 movgs gr29,iamlr1 /* xAMLR = address | context number */
333 movgs gr30,iampr1
334 movgs gr29,damlr1
335 movgs gr30,dampr1
336
337 # return, restoring registers
338 movsg scr2,gr30
339 movgs gr30,ccr
340 sethi.p %hi(__kernel_current_task),gr29
341 setlo %lo(__kernel_current_task),gr29
342 ldi @(gr29,#0),gr29
343 rett #0
344 beq icc0,#3,0 /* prevent icache prefetch */
345
346 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
347 # appropriate page table and map that instead
348 # - access the PGD with EAR0[31:26]
349 # - DAMLR3 points to the virtual address of the page directory
350 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
351__dtlb_k_PTD_miss:
352 srli gr29,#26,gr31 /* calculate PGE offset */
353 slli gr31,#8,gr31 /* and clear bottom bits */
354
355 movsg damlr3,gr30
356 ld @(gr31,gr30),gr30 /* access the PGE */
357
358 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
359 andicc gr30,#xAMPRx_SS,gr0,icc1
360
361 # map this PTD instead and record coverage address
362 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
363 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
364 slli.p gr31,#18,gr31
365 bne icc1,#0,__dtlb_k_bigpage
366 movgs gr30,dampr5
367 movgs gr31,scr1
368
369 # we can now resume normal service
370 setlos 0x3ffc,gr30
371 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
372 bra __dtlb_k_PTD_mapped
373
374__dtlb_k_bigpage:
375 break
376 nop
377
378 .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
379
380###############################################################################
381#
382# Userspace instruction TLB miss handler (with PGE prediction)
383# On entry:
384# GR28 - faulting address
385# GR31 - EAR0 ^ SCR0
386# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
387# DAMR3 - mapped page directory
388# DAMR4 - mapped page table as matched by SCR0
389#
390###############################################################################
391 .globl __entry_user_insn_tlb_miss
392 .type __entry_user_insn_tlb_miss,@function
393__entry_user_insn_tlb_miss:
394#if 0
395 sethi.p %hi(0xe1200004),gr30
396 setlo %lo(0xe1200004),gr30
397 st gr0,@(gr30,gr0)
398 sethi.p %hi(0xffc00100),gr30
399 setlo %lo(0xffc00100),gr30
400 sth gr30,@(gr30,gr0)
401 membar
402#endif
403
404 movsg ccr,gr30 /* save CCR */
405 movgs gr30,scr2
406
407 # see if the cached page table mapping is appropriate
408 srlicc.p gr31,#26,gr0,icc0
409 setlos 0x3ffc,gr30
410 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
411 bne icc0,#0,__itlb_u_PTD_miss
412
413__itlb_u_PTD_mapped:
414 # access the PTD with EAR0[25:14]
415 # - DAMLR4 points to the virtual address of the appropriate page table
416 # - the PTD holds 4096 PTEs
417 # - the PTD must be accessed uncached
418 # - the PTE must be marked accessed if it was valid
419 #
420 and gr31,gr30,gr31
421 movsg damlr4,gr30
422 add gr30,gr31,gr31
423 ldi @(gr31,#0),gr30 /* fetch the PTE */
424 andicc gr30,#_PAGE_PRESENT,gr0,icc0
425 ori.p gr30,#_PAGE_ACCESSED,gr30
426 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
427 sti.p gr30,@(gr31,#0) /* update the PTE */
428 andi gr30,#~_PAGE_ACCESSED,gr30
429
430 # we're using IAMR1/DAMR1 as an extra TLB entry
431 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
432 movsg dampr1,gr31
433 andicc gr31,#xAMPRx_V,gr0,icc0
434 setlos.p 0xfffff000,gr31
435 beq icc0,#0,__itlb_u_nopunt /* punt not required */
436
437 movsg dampr1,gr31
438 movgs gr31,tppr
439 movsg damlr1,gr31 /* set TPLR.CXN */
440 movgs gr31,tplr
441 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
442 movsg tpxr,gr31 /* check the TLB write error flag */
443 andicc.p gr31,#TPXR_E,gr0,icc0
444 setlos #0xfffff000,gr31
445 bne icc0,#0,__tlb_user_fault
446
447__itlb_u_nopunt:
448
449 # assemble the new TLB entry
450 and gr28,gr31,gr28
451 movsg cxnr,gr31
452 or gr28,gr31,gr28
453 movgs gr28,iamlr1 /* xAMLR = address | context number */
454 movgs gr30,iampr1
455 movgs gr28,damlr1
456 movgs gr30,dampr1
457
458 # return, restoring registers
459 movsg scr2,gr30
460 movgs gr30,ccr
461 rett #0
462 beq icc0,#3,0 /* prevent icache prefetch */
463
464 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
465 # appropriate page table and map that instead
466 # - access the PGD with EAR0[31:26]
467 # - DAMLR3 points to the virtual address of the page directory
468 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
469__itlb_u_PTD_miss:
470 srli gr28,#26,gr31 /* calculate PGE offset */
471 slli gr31,#8,gr31 /* and clear bottom bits */
472
473 movsg damlr3,gr30
474 ld @(gr31,gr30),gr30 /* access the PGE */
475
476 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
477 andicc gr30,#xAMPRx_SS,gr0,icc1
478
479 # map this PTD instead and record coverage address
480 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
481 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
482 slli.p gr31,#18,gr31
483 bne icc1,#0,__itlb_u_bigpage
484 movgs gr30,dampr4
485 movgs gr31,scr0
486
487 # we can now resume normal service
488 setlos 0x3ffc,gr30
489 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
490 bra __itlb_u_PTD_mapped
491
492__itlb_u_bigpage:
493 break
494 nop
495
496 .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
497
498###############################################################################
499#
500# Userspace data TLB miss handler
501# On entry:
502# GR28 - faulting address
503# GR31 - EAR0 ^ SCR1
504# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
505# DAMR3 - mapped page directory
506# DAMR5 - mapped page table as matched by SCR1
507#
508###############################################################################
509 .globl __entry_user_data_tlb_miss
510 .type __entry_user_data_tlb_miss,@function
511__entry_user_data_tlb_miss:
512#if 0
513 sethi.p %hi(0xe1200004),gr30
514 setlo %lo(0xe1200004),gr30
515 st gr0,@(gr30,gr0)
516 sethi.p %hi(0xffc00100),gr30
517 setlo %lo(0xffc00100),gr30
518 sth gr30,@(gr30,gr0)
519 membar
520#endif
521
522 movsg ccr,gr30 /* save CCR */
523 movgs gr30,scr2
524
525 # see if the cached page table mapping is appropriate
526 srlicc.p gr31,#26,gr0,icc0
527 setlos 0x3ffc,gr30
528 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
529 bne icc0,#0,__dtlb_u_PTD_miss
530
531__dtlb_u_PTD_mapped:
532 # access the PTD with EAR0[25:14]
533 # - DAMLR5 points to the virtual address of the appropriate page table
534 # - the PTD holds 4096 PTEs
535 # - the PTD must be accessed uncached
536 # - the PTE must be marked accessed if it was valid
537 #
538 and gr31,gr30,gr31
539 movsg damlr5,gr30
540
541__dtlb_u_using_iPTD:
542 add gr30,gr31,gr31
543 ldi @(gr31,#0),gr30 /* fetch the PTE */
544 andicc gr30,#_PAGE_PRESENT,gr0,icc0
545 ori.p gr30,#_PAGE_ACCESSED,gr30
546 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
547 sti.p gr30,@(gr31,#0) /* update the PTE */
548 andi gr30,#~_PAGE_ACCESSED,gr30
549
550 # we're using DAMR1 as an extra TLB entry
551 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
552 movsg dampr1,gr31
553 andicc gr31,#xAMPRx_V,gr0,icc0
554 setlos.p 0xfffff000,gr31
555 beq icc0,#0,__dtlb_u_nopunt /* punt not required */
556
557 movsg dampr1,gr31
558 movgs gr31,tppr
559 movsg damlr1,gr31 /* set TPLR.CXN */
560 movgs gr31,tplr
561 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
562 movsg tpxr,gr31 /* check the TLB write error flag */
563 andicc.p gr31,#TPXR_E,gr0,icc0
564 setlos #0xfffff000,gr31
565 bne icc0,#0,__tlb_user_fault
566
567__dtlb_u_nopunt:
568
569 # assemble the new TLB entry
570 and gr28,gr31,gr28
571 movsg cxnr,gr31
572 or gr28,gr31,gr28
573 movgs gr28,iamlr1 /* xAMLR = address | context number */
574 movgs gr30,iampr1
575 movgs gr28,damlr1
576 movgs gr30,dampr1
577
578 # return, restoring registers
579 movsg scr2,gr30
580 movgs gr30,ccr
581 rett #0
582 beq icc0,#3,0 /* prevent icache prefetch */
583
584 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
585 # appropriate page table and map that instead
586 # - first of all, check the insn PGE cache - we may well get a hit there
587 # - access the PGD with EAR0[31:26]
588 # - DAMLR3 points to the virtual address of the page directory
589 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
590__dtlb_u_PTD_miss:
591 movsg scr0,gr31 /* consult the insn-PGE-cache key */
592 xor gr28,gr31,gr31
593 srlicc gr31,#26,gr0,icc0
594 srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
595 bne icc0,#0,__dtlb_u_iPGE_miss
596
597 # what we're looking for is covered by the insn-PGE-cache
598 setlos 0x3ffc,gr30
599 and gr31,gr30,gr31
600 movsg damlr4,gr30
601 bra __dtlb_u_using_iPTD
602
603__dtlb_u_iPGE_miss:
604 srli gr28,#26,gr31 /* calculate PGE offset */
605 slli gr31,#8,gr31 /* and clear bottom bits */
606
607 movsg damlr3,gr30
608 ld @(gr31,gr30),gr30 /* access the PGE */
609
610 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
611 andicc gr30,#xAMPRx_SS,gr0,icc1
612
613 # map this PTD instead and record coverage address
614 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
615 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
616 slli.p gr31,#18,gr31
617 bne icc1,#0,__dtlb_u_bigpage
618 movgs gr30,dampr5
619 movgs gr31,scr1
620
621 # we can now resume normal service
622 setlos 0x3ffc,gr30
623 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
624 bra __dtlb_u_PTD_mapped
625
626__dtlb_u_bigpage:
627 break
628 nop
629
630 .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss