blob: f3ac019bb18bf401155d254d2cbcb6a426fbbf1a [file] [log] [blame]
Linus Torvalds1da177e2005-04-16 15:20:36 -07001/* tlb-miss.S: TLB miss handlers
2 *
3 * Copyright (C) 2004 Red Hat, Inc. All Rights Reserved.
4 * Written by David Howells (dhowells@redhat.com)
5 *
6 * This program is free software; you can redistribute it and/or
7 * modify it under the terms of the GNU General Public License
8 * as published by the Free Software Foundation; either version
9 * 2 of the License, or (at your option) any later version.
10 */
11
12#include <linux/sys.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070013#include <linux/linkage.h>
14#include <asm/page.h>
15#include <asm/pgtable.h>
Linus Torvalds1da177e2005-04-16 15:20:36 -070016#include <asm/spr-regs.h>
17
Denys Vlasenkob6f4e452010-02-20 01:03:56 +010018 .section .text..tlbmiss
Linus Torvalds1da177e2005-04-16 15:20:36 -070019 .balign 4
20
21 .globl __entry_insn_mmu_miss
22__entry_insn_mmu_miss:
23 break
24 nop
25
26 .globl __entry_insn_mmu_exception
27__entry_insn_mmu_exception:
28 break
29 nop
30
31 .globl __entry_data_mmu_miss
32__entry_data_mmu_miss:
33 break
34 nop
35
36 .globl __entry_data_mmu_exception
37__entry_data_mmu_exception:
38 break
39 nop
40
41###############################################################################
42#
43# handle a lookup failure of one sort or another in a kernel TLB handler
44# On entry:
45# GR29 - faulting address
46# SCR2 - saved CCR
47#
48###############################################################################
49 .type __tlb_kernel_fault,@function
50__tlb_kernel_fault:
51 # see if we're supposed to re-enable single-step mode upon return
52 sethi.p %hi(__break_tlb_miss_return_break),gr30
53 setlo %lo(__break_tlb_miss_return_break),gr30
54 movsg pcsr,gr31
55
56 subcc gr31,gr30,gr0,icc0
57 beq icc0,#0,__tlb_kernel_fault_sstep
58
59 movsg scr2,gr30
60 movgs gr30,ccr
61 movgs gr29,scr2 /* save EAR0 value */
62 sethi.p %hi(__kernel_current_task),gr29
63 setlo %lo(__kernel_current_task),gr29
64 ldi.p @(gr29,#0),gr29 /* restore GR29 */
65
66 bra __entry_kernel_handle_mmu_fault
67
68 # we've got to re-enable single-stepping
69__tlb_kernel_fault_sstep:
70 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
71 setlo %lo(__break_tlb_miss_real_return_info),gr30
72 lddi @(gr30,0),gr30
73 movgs gr30,pcsr
74 movgs gr31,psr
75
76 movsg scr2,gr30
77 movgs gr30,ccr
78 movgs gr29,scr2 /* save EAR0 value */
79 sethi.p %hi(__kernel_current_task),gr29
80 setlo %lo(__kernel_current_task),gr29
81 ldi.p @(gr29,#0),gr29 /* restore GR29 */
82 bra __entry_kernel_handle_mmu_fault_sstep
83
84 .size __tlb_kernel_fault, .-__tlb_kernel_fault
85
86###############################################################################
87#
88# handle a lookup failure of one sort or another in a user TLB handler
89# On entry:
90# GR28 - faulting address
91# SCR2 - saved CCR
92#
93###############################################################################
94 .type __tlb_user_fault,@function
95__tlb_user_fault:
96 # see if we're supposed to re-enable single-step mode upon return
97 sethi.p %hi(__break_tlb_miss_return_break),gr30
98 setlo %lo(__break_tlb_miss_return_break),gr30
99 movsg pcsr,gr31
100 subcc gr31,gr30,gr0,icc0
101 beq icc0,#0,__tlb_user_fault_sstep
102
103 movsg scr2,gr30
104 movgs gr30,ccr
105 bra __entry_uspace_handle_mmu_fault
106
107 # we've got to re-enable single-stepping
108__tlb_user_fault_sstep:
109 sethi.p %hi(__break_tlb_miss_real_return_info),gr30
110 setlo %lo(__break_tlb_miss_real_return_info),gr30
111 lddi @(gr30,0),gr30
112 movgs gr30,pcsr
113 movgs gr31,psr
114 movsg scr2,gr30
115 movgs gr30,ccr
116 bra __entry_uspace_handle_mmu_fault_sstep
117
118 .size __tlb_user_fault, .-__tlb_user_fault
119
120###############################################################################
121#
122# Kernel instruction TLB miss handler
123# On entry:
124# GR1 - kernel stack pointer
125# GR28 - saved exception frame pointer
126# GR29 - faulting address
127# GR31 - EAR0 ^ SCR0
128# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
129# DAMR3 - mapped page directory
130# DAMR4 - mapped page table as matched by SCR0
131#
132###############################################################################
133 .globl __entry_kernel_insn_tlb_miss
134 .type __entry_kernel_insn_tlb_miss,@function
135__entry_kernel_insn_tlb_miss:
136#if 0
137 sethi.p %hi(0xe1200004),gr30
138 setlo %lo(0xe1200004),gr30
139 st gr0,@(gr30,gr0)
140 sethi.p %hi(0xffc00100),gr30
141 setlo %lo(0xffc00100),gr30
142 sth gr30,@(gr30,gr0)
143 membar
144#endif
145
146 movsg ccr,gr30 /* save CCR */
147 movgs gr30,scr2
148
149 # see if the cached page table mapping is appropriate
150 srlicc.p gr31,#26,gr0,icc0
151 setlos 0x3ffc,gr30
152 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
153 bne icc0,#0,__itlb_k_PTD_miss
154
155__itlb_k_PTD_mapped:
156 # access the PTD with EAR0[25:14]
157 # - DAMLR4 points to the virtual address of the appropriate page table
158 # - the PTD holds 4096 PTEs
159 # - the PTD must be accessed uncached
160 # - the PTE must be marked accessed if it was valid
161 #
162 and gr31,gr30,gr31
163 movsg damlr4,gr30
164 add gr30,gr31,gr31
165 ldi @(gr31,#0),gr30 /* fetch the PTE */
166 andicc gr30,#_PAGE_PRESENT,gr0,icc0
167 ori.p gr30,#_PAGE_ACCESSED,gr30
168 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
169 sti.p gr30,@(gr31,#0) /* update the PTE */
170 andi gr30,#~_PAGE_ACCESSED,gr30
171
172 # we're using IAMR1 as an extra TLB entry
173 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
174 # - need to check DAMR1 lest we cause an multiple-DAT-hit exception
175 # - IAMPR1 has no WP bit, and we mustn't lose WP information
176 movsg iampr1,gr31
177 andicc gr31,#xAMPRx_V,gr0,icc0
178 setlos.p 0xfffff000,gr31
179 beq icc0,#0,__itlb_k_nopunt /* punt not required */
180
181 movsg iamlr1,gr31
182 movgs gr31,tplr /* set TPLR.CXN */
183 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
184
185 movsg dampr1,gr31
186 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
187 movgs gr31,tppr
188 movsg iamlr1,gr31 /* set TPLR.CXN */
189 movgs gr31,tplr
190 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
191 movsg tpxr,gr31 /* check the TLB write error flag */
192 andicc.p gr31,#TPXR_E,gr0,icc0
193 setlos #0xfffff000,gr31
194 bne icc0,#0,__tlb_kernel_fault
195
196__itlb_k_nopunt:
197
198 # assemble the new TLB entry
199 and gr29,gr31,gr29
200 movsg cxnr,gr31
201 or gr29,gr31,gr29
202 movgs gr29,iamlr1 /* xAMLR = address | context number */
203 movgs gr30,iampr1
204 movgs gr29,damlr1
205 movgs gr30,dampr1
206
207 # return, restoring registers
208 movsg scr2,gr30
209 movgs gr30,ccr
210 sethi.p %hi(__kernel_current_task),gr29
211 setlo %lo(__kernel_current_task),gr29
212 ldi @(gr29,#0),gr29
213 rett #0
214 beq icc0,#3,0 /* prevent icache prefetch */
215
216 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
217 # appropriate page table and map that instead
218 # - access the PGD with EAR0[31:26]
219 # - DAMLR3 points to the virtual address of the page directory
220 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
221__itlb_k_PTD_miss:
222 srli gr29,#26,gr31 /* calculate PGE offset */
223 slli gr31,#8,gr31 /* and clear bottom bits */
224
225 movsg damlr3,gr30
226 ld @(gr31,gr30),gr30 /* access the PGE */
227
228 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
229 andicc gr30,#xAMPRx_SS,gr0,icc1
230
231 # map this PTD instead and record coverage address
232 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
233 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
234 slli.p gr31,#18,gr31
235 bne icc1,#0,__itlb_k_bigpage
236 movgs gr30,dampr4
237 movgs gr31,scr0
238
239 # we can now resume normal service
240 setlos 0x3ffc,gr30
241 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
242 bra __itlb_k_PTD_mapped
243
244__itlb_k_bigpage:
245 break
246 nop
247
248 .size __entry_kernel_insn_tlb_miss, .-__entry_kernel_insn_tlb_miss
249
250###############################################################################
251#
252# Kernel data TLB miss handler
253# On entry:
254# GR1 - kernel stack pointer
255# GR28 - saved exception frame pointer
256# GR29 - faulting address
257# GR31 - EAR0 ^ SCR1
258# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
259# DAMR3 - mapped page directory
260# DAMR5 - mapped page table as matched by SCR1
261#
262###############################################################################
263 .globl __entry_kernel_data_tlb_miss
264 .type __entry_kernel_data_tlb_miss,@function
265__entry_kernel_data_tlb_miss:
266#if 0
267 sethi.p %hi(0xe1200004),gr30
268 setlo %lo(0xe1200004),gr30
269 st gr0,@(gr30,gr0)
270 sethi.p %hi(0xffc00100),gr30
271 setlo %lo(0xffc00100),gr30
272 sth gr30,@(gr30,gr0)
273 membar
274#endif
275
276 movsg ccr,gr30 /* save CCR */
277 movgs gr30,scr2
278
279 # see if the cached page table mapping is appropriate
280 srlicc.p gr31,#26,gr0,icc0
281 setlos 0x3ffc,gr30
282 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
283 bne icc0,#0,__dtlb_k_PTD_miss
284
285__dtlb_k_PTD_mapped:
286 # access the PTD with EAR0[25:14]
287 # - DAMLR5 points to the virtual address of the appropriate page table
288 # - the PTD holds 4096 PTEs
289 # - the PTD must be accessed uncached
290 # - the PTE must be marked accessed if it was valid
291 #
292 and gr31,gr30,gr31
293 movsg damlr5,gr30
294 add gr30,gr31,gr31
295 ldi @(gr31,#0),gr30 /* fetch the PTE */
296 andicc gr30,#_PAGE_PRESENT,gr0,icc0
297 ori.p gr30,#_PAGE_ACCESSED,gr30
298 beq icc0,#0,__tlb_kernel_fault /* jump if PTE invalid */
299 sti.p gr30,@(gr31,#0) /* update the PTE */
300 andi gr30,#~_PAGE_ACCESSED,gr30
301
302 # we're using DAMR1 as an extra TLB entry
303 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
304 # - need to check IAMR1 lest we cause an multiple-DAT-hit exception
305 movsg dampr1,gr31
306 andicc gr31,#xAMPRx_V,gr0,icc0
307 setlos.p 0xfffff000,gr31
308 beq icc0,#0,__dtlb_k_nopunt /* punt not required */
309
310 movsg damlr1,gr31
311 movgs gr31,tplr /* set TPLR.CXN */
312 tlbpr gr31,gr0,#4,#0 /* delete matches from TLB, IAMR1, DAMR1 */
313
314 movsg dampr1,gr31
315 ori gr31,#xAMPRx_V,gr31 /* entry was invalidated by tlbpr #4 */
316 movgs gr31,tppr
317 movsg damlr1,gr31 /* set TPLR.CXN */
318 movgs gr31,tplr
319 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
320 movsg tpxr,gr31 /* check the TLB write error flag */
321 andicc.p gr31,#TPXR_E,gr0,icc0
322 setlos #0xfffff000,gr31
323 bne icc0,#0,__tlb_kernel_fault
324
325__dtlb_k_nopunt:
326
327 # assemble the new TLB entry
328 and gr29,gr31,gr29
329 movsg cxnr,gr31
330 or gr29,gr31,gr29
331 movgs gr29,iamlr1 /* xAMLR = address | context number */
332 movgs gr30,iampr1
333 movgs gr29,damlr1
334 movgs gr30,dampr1
335
336 # return, restoring registers
337 movsg scr2,gr30
338 movgs gr30,ccr
339 sethi.p %hi(__kernel_current_task),gr29
340 setlo %lo(__kernel_current_task),gr29
341 ldi @(gr29,#0),gr29
342 rett #0
343 beq icc0,#3,0 /* prevent icache prefetch */
344
345 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
346 # appropriate page table and map that instead
347 # - access the PGD with EAR0[31:26]
348 # - DAMLR3 points to the virtual address of the page directory
349 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
350__dtlb_k_PTD_miss:
351 srli gr29,#26,gr31 /* calculate PGE offset */
352 slli gr31,#8,gr31 /* and clear bottom bits */
353
354 movsg damlr3,gr30
355 ld @(gr31,gr30),gr30 /* access the PGE */
356
357 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
358 andicc gr30,#xAMPRx_SS,gr0,icc1
359
360 # map this PTD instead and record coverage address
361 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
362 beq icc0,#0,__tlb_kernel_fault /* jump if PGE not present */
363 slli.p gr31,#18,gr31
364 bne icc1,#0,__dtlb_k_bigpage
365 movgs gr30,dampr5
366 movgs gr31,scr1
367
368 # we can now resume normal service
369 setlos 0x3ffc,gr30
370 srli.p gr29,#12,gr31 /* use EAR0[25:14] as PTE index */
371 bra __dtlb_k_PTD_mapped
372
373__dtlb_k_bigpage:
374 break
375 nop
376
377 .size __entry_kernel_data_tlb_miss, .-__entry_kernel_data_tlb_miss
378
379###############################################################################
380#
381# Userspace instruction TLB miss handler (with PGE prediction)
382# On entry:
383# GR28 - faulting address
384# GR31 - EAR0 ^ SCR0
385# SCR0 - base of virtual range covered by cached PGE from last ITLB miss (or 0xffffffff)
386# DAMR3 - mapped page directory
387# DAMR4 - mapped page table as matched by SCR0
388#
389###############################################################################
390 .globl __entry_user_insn_tlb_miss
391 .type __entry_user_insn_tlb_miss,@function
392__entry_user_insn_tlb_miss:
393#if 0
394 sethi.p %hi(0xe1200004),gr30
395 setlo %lo(0xe1200004),gr30
396 st gr0,@(gr30,gr0)
397 sethi.p %hi(0xffc00100),gr30
398 setlo %lo(0xffc00100),gr30
399 sth gr30,@(gr30,gr0)
400 membar
401#endif
402
403 movsg ccr,gr30 /* save CCR */
404 movgs gr30,scr2
405
406 # see if the cached page table mapping is appropriate
407 srlicc.p gr31,#26,gr0,icc0
408 setlos 0x3ffc,gr30
409 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
410 bne icc0,#0,__itlb_u_PTD_miss
411
412__itlb_u_PTD_mapped:
413 # access the PTD with EAR0[25:14]
414 # - DAMLR4 points to the virtual address of the appropriate page table
415 # - the PTD holds 4096 PTEs
416 # - the PTD must be accessed uncached
417 # - the PTE must be marked accessed if it was valid
418 #
419 and gr31,gr30,gr31
420 movsg damlr4,gr30
421 add gr30,gr31,gr31
422 ldi @(gr31,#0),gr30 /* fetch the PTE */
423 andicc gr30,#_PAGE_PRESENT,gr0,icc0
424 ori.p gr30,#_PAGE_ACCESSED,gr30
425 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
426 sti.p gr30,@(gr31,#0) /* update the PTE */
427 andi gr30,#~_PAGE_ACCESSED,gr30
428
429 # we're using IAMR1/DAMR1 as an extra TLB entry
430 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
431 movsg dampr1,gr31
432 andicc gr31,#xAMPRx_V,gr0,icc0
433 setlos.p 0xfffff000,gr31
434 beq icc0,#0,__itlb_u_nopunt /* punt not required */
435
436 movsg dampr1,gr31
437 movgs gr31,tppr
438 movsg damlr1,gr31 /* set TPLR.CXN */
439 movgs gr31,tplr
440 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
441 movsg tpxr,gr31 /* check the TLB write error flag */
442 andicc.p gr31,#TPXR_E,gr0,icc0
443 setlos #0xfffff000,gr31
444 bne icc0,#0,__tlb_user_fault
445
446__itlb_u_nopunt:
447
448 # assemble the new TLB entry
449 and gr28,gr31,gr28
450 movsg cxnr,gr31
451 or gr28,gr31,gr28
452 movgs gr28,iamlr1 /* xAMLR = address | context number */
453 movgs gr30,iampr1
454 movgs gr28,damlr1
455 movgs gr30,dampr1
456
457 # return, restoring registers
458 movsg scr2,gr30
459 movgs gr30,ccr
460 rett #0
461 beq icc0,#3,0 /* prevent icache prefetch */
462
463 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
464 # appropriate page table and map that instead
465 # - access the PGD with EAR0[31:26]
466 # - DAMLR3 points to the virtual address of the page directory
467 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
468__itlb_u_PTD_miss:
469 srli gr28,#26,gr31 /* calculate PGE offset */
470 slli gr31,#8,gr31 /* and clear bottom bits */
471
472 movsg damlr3,gr30
473 ld @(gr31,gr30),gr30 /* access the PGE */
474
475 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
476 andicc gr30,#xAMPRx_SS,gr0,icc1
477
478 # map this PTD instead and record coverage address
479 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
480 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
481 slli.p gr31,#18,gr31
482 bne icc1,#0,__itlb_u_bigpage
483 movgs gr30,dampr4
484 movgs gr31,scr0
485
486 # we can now resume normal service
487 setlos 0x3ffc,gr30
488 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
489 bra __itlb_u_PTD_mapped
490
491__itlb_u_bigpage:
492 break
493 nop
494
495 .size __entry_user_insn_tlb_miss, .-__entry_user_insn_tlb_miss
496
497###############################################################################
498#
499# Userspace data TLB miss handler
500# On entry:
501# GR28 - faulting address
502# GR31 - EAR0 ^ SCR1
503# SCR1 - base of virtual range covered by cached PGE from last DTLB miss (or 0xffffffff)
504# DAMR3 - mapped page directory
505# DAMR5 - mapped page table as matched by SCR1
506#
507###############################################################################
508 .globl __entry_user_data_tlb_miss
509 .type __entry_user_data_tlb_miss,@function
510__entry_user_data_tlb_miss:
511#if 0
512 sethi.p %hi(0xe1200004),gr30
513 setlo %lo(0xe1200004),gr30
514 st gr0,@(gr30,gr0)
515 sethi.p %hi(0xffc00100),gr30
516 setlo %lo(0xffc00100),gr30
517 sth gr30,@(gr30,gr0)
518 membar
519#endif
520
521 movsg ccr,gr30 /* save CCR */
522 movgs gr30,scr2
523
524 # see if the cached page table mapping is appropriate
525 srlicc.p gr31,#26,gr0,icc0
526 setlos 0x3ffc,gr30
527 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
528 bne icc0,#0,__dtlb_u_PTD_miss
529
530__dtlb_u_PTD_mapped:
531 # access the PTD with EAR0[25:14]
532 # - DAMLR5 points to the virtual address of the appropriate page table
533 # - the PTD holds 4096 PTEs
534 # - the PTD must be accessed uncached
535 # - the PTE must be marked accessed if it was valid
536 #
537 and gr31,gr30,gr31
538 movsg damlr5,gr30
539
540__dtlb_u_using_iPTD:
541 add gr30,gr31,gr31
542 ldi @(gr31,#0),gr30 /* fetch the PTE */
543 andicc gr30,#_PAGE_PRESENT,gr0,icc0
544 ori.p gr30,#_PAGE_ACCESSED,gr30
545 beq icc0,#0,__tlb_user_fault /* jump if PTE invalid */
546 sti.p gr30,@(gr31,#0) /* update the PTE */
547 andi gr30,#~_PAGE_ACCESSED,gr30
548
549 # we're using DAMR1 as an extra TLB entry
550 # - punt the entry here (if valid) to the real TLB and then replace with the new PTE
551 movsg dampr1,gr31
552 andicc gr31,#xAMPRx_V,gr0,icc0
553 setlos.p 0xfffff000,gr31
554 beq icc0,#0,__dtlb_u_nopunt /* punt not required */
555
556 movsg dampr1,gr31
557 movgs gr31,tppr
558 movsg damlr1,gr31 /* set TPLR.CXN */
559 movgs gr31,tplr
560 tlbpr gr31,gr0,#2,#0 /* save to the TLB */
561 movsg tpxr,gr31 /* check the TLB write error flag */
562 andicc.p gr31,#TPXR_E,gr0,icc0
563 setlos #0xfffff000,gr31
564 bne icc0,#0,__tlb_user_fault
565
566__dtlb_u_nopunt:
567
568 # assemble the new TLB entry
569 and gr28,gr31,gr28
570 movsg cxnr,gr31
571 or gr28,gr31,gr28
572 movgs gr28,iamlr1 /* xAMLR = address | context number */
573 movgs gr30,iampr1
574 movgs gr28,damlr1
575 movgs gr30,dampr1
576
577 # return, restoring registers
578 movsg scr2,gr30
579 movgs gr30,ccr
580 rett #0
581 beq icc0,#3,0 /* prevent icache prefetch */
582
583 # the PTE we want wasn't in the PTD we have mapped, so we need to go looking for a more
584 # appropriate page table and map that instead
585 # - first of all, check the insn PGE cache - we may well get a hit there
586 # - access the PGD with EAR0[31:26]
587 # - DAMLR3 points to the virtual address of the page directory
588 # - the PGD holds 64 PGEs and each PGE/PME points to a set of page tables
589__dtlb_u_PTD_miss:
590 movsg scr0,gr31 /* consult the insn-PGE-cache key */
591 xor gr28,gr31,gr31
592 srlicc gr31,#26,gr0,icc0
593 srli gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
594 bne icc0,#0,__dtlb_u_iPGE_miss
595
596 # what we're looking for is covered by the insn-PGE-cache
597 setlos 0x3ffc,gr30
598 and gr31,gr30,gr31
599 movsg damlr4,gr30
600 bra __dtlb_u_using_iPTD
601
602__dtlb_u_iPGE_miss:
603 srli gr28,#26,gr31 /* calculate PGE offset */
604 slli gr31,#8,gr31 /* and clear bottom bits */
605
606 movsg damlr3,gr30
607 ld @(gr31,gr30),gr30 /* access the PGE */
608
609 andicc.p gr30,#_PAGE_PRESENT,gr0,icc0
610 andicc gr30,#xAMPRx_SS,gr0,icc1
611
612 # map this PTD instead and record coverage address
613 ori.p gr30,#xAMPRx_L|xAMPRx_SS_16Kb|xAMPRx_S|xAMPRx_C|xAMPRx_V,gr30
614 beq icc0,#0,__tlb_user_fault /* jump if PGE not present */
615 slli.p gr31,#18,gr31
616 bne icc1,#0,__dtlb_u_bigpage
617 movgs gr30,dampr5
618 movgs gr31,scr1
619
620 # we can now resume normal service
621 setlos 0x3ffc,gr30
622 srli.p gr28,#12,gr31 /* use EAR0[25:14] as PTE index */
623 bra __dtlb_u_PTD_mapped
624
625__dtlb_u_bigpage:
626 break
627 nop
628
629 .size __entry_user_data_tlb_miss, .-__entry_user_data_tlb_miss