Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 1 | /* |
| 2 | * TLB Exception Handling for ARC |
| 3 | * |
| 4 | * Copyright (C) 2004, 2007-2010, 2011-2012 Synopsys, Inc. (www.synopsys.com) |
| 5 | * |
| 6 | * This program is free software; you can redistribute it and/or modify |
| 7 | * it under the terms of the GNU General Public License version 2 as |
| 8 | * published by the Free Software Foundation. |
| 9 | * |
| 10 | * Vineetg: April 2011 : |
| 11 | * -MMU v1: moved out legacy code into a seperate file |
| 12 | * -MMU v3: PD{0,1} bits layout changed: They don't overlap anymore, |
| 13 | * helps avoid a shift when preparing PD0 from PTE |
| 14 | * |
| 15 | * Vineetg: July 2009 |
| 16 | * -For MMU V2, we need not do heuristics at the time of commiting a D-TLB |
| 17 | * entry, so that it doesn't knock out it's I-TLB entry |
| 18 | * -Some more fine tuning: |
| 19 | * bmsk instead of add, asl.cc instead of branch, delay slot utilise etc |
| 20 | * |
| 21 | * Vineetg: July 2009 |
| 22 | * -Practically rewrote the I/D TLB Miss handlers |
| 23 | * Now 40 and 135 instructions a peice as compared to 131 and 449 resp. |
| 24 | * Hence Leaner by 1.5 K |
| 25 | * Used Conditional arithmetic to replace excessive branching |
| 26 | * Also used short instructions wherever possible |
| 27 | * |
| 28 | * Vineetg: Aug 13th 2008 |
| 29 | * -Passing ECR (Exception Cause REG) to do_page_fault( ) for printing |
| 30 | * more information in case of a Fatality |
| 31 | * |
| 32 | * Vineetg: March 25th Bug #92690 |
| 33 | * -Added Debug Code to check if sw-ASID == hw-ASID |
| 34 | |
| 35 | * Rahul Trivedi, Amit Bhor: Codito Technologies 2004 |
| 36 | */ |
| 37 | |
| 38 | .cpu A7 |
| 39 | |
| 40 | #include <linux/linkage.h> |
| 41 | #include <asm/entry.h> |
Vineet Gupta | da1677b | 2013-05-14 13:28:17 +0530 | [diff] [blame] | 42 | #include <asm/mmu.h> |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 43 | #include <asm/pgtable.h> |
| 44 | #include <asm/arcregs.h> |
| 45 | #include <asm/cache.h> |
| 46 | #include <asm/processor.h> |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 47 | #include <asm/tlb-mmu1.h> |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 48 | |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 49 | ;----------------------------------------------------------------- |
| 50 | ; ARC700 Exception Handling doesn't auto-switch stack and it only provides |
| 51 | ; ONE scratch AUX reg "ARC_REG_SCRATCH_DATA0" |
| 52 | ; |
| 53 | ; For Non-SMP, the scratch AUX reg is repurposed to cache task PGD, so a |
| 54 | ; "global" is used to free-up FIRST core reg to be able to code the rest of |
| 55 | ; exception prologue (IRQ auto-disabled on Exceptions, so it's IRQ-safe). |
| 56 | ; Since the Fast Path TLB Miss handler is coded with 4 regs, the remaining 3 |
| 57 | ; need to be saved as well by extending the "global" to be 4 words. Hence |
| 58 | ; ".size ex_saved_reg1, 16" |
| 59 | ; [All of this dance is to avoid stack switching for each TLB Miss, since we |
| 60 | ; only need to save only a handful of regs, as opposed to complete reg file] |
| 61 | ; |
| 62 | ; For ARC700 SMP, the "global" obviously can't be used for free up the FIRST |
| 63 | ; core reg as it will not be SMP safe. |
| 64 | ; Thus scratch AUX reg is used (and no longer used to cache task PGD). |
| 65 | ; To save the rest of 3 regs - per cpu, the global is made "per-cpu". |
| 66 | ; Epilogue thus has to locate the "per-cpu" storage for regs. |
| 67 | ; To avoid cache line bouncing the per-cpu global is aligned/sized per |
| 68 | ; L1_CACHE_SHIFT, despite fundamentally needing to be 12 bytes only. Hence |
| 69 | ; ".size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT)" |
| 70 | |
| 71 | ; As simple as that.... |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 72 | ;-------------------------------------------------------------------------- |
| 73 | |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 74 | ; scratch memory to save [r0-r3] used to code TLB refill Handler |
Vineet Gupta | 8b5850f | 2013-01-18 15:12:25 +0530 | [diff] [blame] | 75 | ARCFP_DATA ex_saved_reg1 |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 76 | .align 1 << L1_CACHE_SHIFT |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 77 | .type ex_saved_reg1, @object |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 78 | #ifdef CONFIG_SMP |
| 79 | .size ex_saved_reg1, (CONFIG_NR_CPUS << L1_CACHE_SHIFT) |
| 80 | ex_saved_reg1: |
| 81 | .zero (CONFIG_NR_CPUS << L1_CACHE_SHIFT) |
| 82 | #else |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 83 | .size ex_saved_reg1, 16 |
| 84 | ex_saved_reg1: |
| 85 | .zero 16 |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 86 | #endif |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 87 | |
Vineet Gupta | 4b06ff3 | 2013-07-10 11:40:27 +0530 | [diff] [blame] | 88 | .macro TLBMISS_FREEUP_REGS |
| 89 | #ifdef CONFIG_SMP |
| 90 | sr r0, [ARC_REG_SCRATCH_DATA0] ; freeup r0 to code with |
| 91 | GET_CPU_ID r0 ; get to per cpu scratch mem, |
| 92 | lsl r0, r0, L1_CACHE_SHIFT ; cache line wide per cpu |
| 93 | add r0, @ex_saved_reg1, r0 |
| 94 | #else |
| 95 | st r0, [@ex_saved_reg1] |
| 96 | mov_s r0, @ex_saved_reg1 |
| 97 | #endif |
| 98 | st_s r1, [r0, 4] |
| 99 | st_s r2, [r0, 8] |
| 100 | st_s r3, [r0, 12] |
| 101 | |
| 102 | ; VERIFY if the ASID in MMU-PID Reg is same as |
| 103 | ; one in Linux data structures |
| 104 | |
| 105 | DBG_ASID_MISMATCH |
| 106 | .endm |
| 107 | |
| 108 | .macro TLBMISS_RESTORE_REGS |
| 109 | #ifdef CONFIG_SMP |
| 110 | GET_CPU_ID r0 ; get to per cpu scratch mem |
| 111 | lsl r0, r0, L1_CACHE_SHIFT ; each is cache line wide |
| 112 | add r0, @ex_saved_reg1, r0 |
| 113 | ld_s r3, [r0,12] |
| 114 | ld_s r2, [r0, 8] |
| 115 | ld_s r1, [r0, 4] |
| 116 | lr r0, [ARC_REG_SCRATCH_DATA0] |
| 117 | #else |
| 118 | mov_s r0, @ex_saved_reg1 |
| 119 | ld_s r3, [r0,12] |
| 120 | ld_s r2, [r0, 8] |
| 121 | ld_s r1, [r0, 4] |
| 122 | ld_s r0, [r0] |
| 123 | #endif |
| 124 | .endm |
| 125 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 126 | ;============================================================================ |
| 127 | ; Troubleshooting Stuff |
| 128 | ;============================================================================ |
| 129 | |
| 130 | ; Linux keeps ASID (Address Space ID) in task->active_mm->context.asid |
| 131 | ; When Creating TLB Entries, instead of doing 3 dependent loads from memory, |
| 132 | ; we use the MMU PID Reg to get current ASID. |
| 133 | ; In bizzare scenrios SW and HW ASID can get out-of-sync which is trouble. |
| 134 | ; So we try to detect this in TLB Mis shandler |
| 135 | |
| 136 | |
| 137 | .macro DBG_ASID_MISMATCH |
| 138 | |
| 139 | #ifdef CONFIG_ARC_DBG_TLB_PARANOIA |
| 140 | |
| 141 | ; make sure h/w ASID is same as s/w ASID |
| 142 | |
| 143 | GET_CURR_TASK_ON_CPU r3 |
| 144 | ld r0, [r3, TASK_ACT_MM] |
| 145 | ld r0, [r0, MM_CTXT+MM_CTXT_ASID] |
| 146 | |
| 147 | lr r1, [ARC_REG_PID] |
| 148 | and r1, r1, 0xFF |
| 149 | breq r1, r0, 5f |
| 150 | |
| 151 | ; Error if H/w and S/w ASID don't match, but NOT if in kernel mode |
| 152 | lr r0, [erstatus] |
| 153 | bbit0 r0, STATUS_U_BIT, 5f |
| 154 | |
| 155 | ; We sure are in troubled waters, Flag the error, but to do so |
| 156 | ; need to switch to kernel mode stack to call error routine |
| 157 | GET_TSK_STACK_BASE r3, sp |
| 158 | |
| 159 | ; Call printk to shoutout aloud |
| 160 | mov r0, 1 |
| 161 | j print_asid_mismatch |
| 162 | |
| 163 | 5: ; ASIDs match so proceed normally |
| 164 | nop |
| 165 | |
| 166 | #endif |
| 167 | |
| 168 | .endm |
| 169 | |
| 170 | ;============================================================================ |
| 171 | ;TLB Miss handling Code |
| 172 | ;============================================================================ |
| 173 | |
| 174 | ;----------------------------------------------------------------------------- |
| 175 | ; This macro does the page-table lookup for the faulting address. |
| 176 | ; OUT: r0 = PTE faulted on, r1 = ptr to PTE, r2 = Faulting V-address |
| 177 | .macro LOAD_FAULT_PTE |
| 178 | |
| 179 | lr r2, [efa] |
| 180 | |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 181 | #ifndef CONFIG_SMP |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 182 | lr r1, [ARC_REG_SCRATCH_DATA0] ; current pgd |
Vineet Gupta | 41195d2 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 183 | #else |
| 184 | GET_CURR_TASK_ON_CPU r1 |
| 185 | ld r1, [r1, TASK_ACT_MM] |
| 186 | ld r1, [r1, MM_PGD] |
| 187 | #endif |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 188 | |
| 189 | lsr r0, r2, PGDIR_SHIFT ; Bits for indexing into PGD |
| 190 | ld.as r1, [r1, r0] ; PGD entry corresp to faulting addr |
| 191 | and.f r1, r1, PAGE_MASK ; Ignoring protection and other flags |
| 192 | ; contains Ptr to Page Table |
| 193 | bz.d do_slow_path_pf ; if no Page Table, do page fault |
| 194 | |
| 195 | ; Get the PTE entry: The idea is |
| 196 | ; (1) x = addr >> PAGE_SHIFT -> masks page-off bits from @fault-addr |
| 197 | ; (2) y = x & (PTRS_PER_PTE - 1) -> to get index |
| 198 | ; (3) z = pgtbl[y] |
| 199 | ; To avoid the multiply by in end, we do the -2, <<2 below |
| 200 | |
| 201 | lsr r0, r2, (PAGE_SHIFT - 2) |
| 202 | and r0, r0, ( (PTRS_PER_PTE - 1) << 2) |
| 203 | ld.aw r0, [r1, r0] ; get PTE and PTE ptr for fault addr |
Vineet Gupta | 0ef88a5 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 204 | #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT |
| 205 | and.f 0, r0, _PAGE_PRESENT |
| 206 | bz 1f |
Vineet Gupta | dc81df2 | 2013-06-17 14:33:15 +0530 | [diff] [blame] | 207 | ld r3, [num_pte_not_present] |
| 208 | add r3, r3, 1 |
| 209 | st r3, [num_pte_not_present] |
Vineet Gupta | 0ef88a5 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 210 | 1: |
| 211 | #endif |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 212 | |
| 213 | .endm |
| 214 | |
| 215 | ;----------------------------------------------------------------- |
| 216 | ; Convert Linux PTE entry into TLB entry |
| 217 | ; A one-word PTE entry is programmed as two-word TLB Entry [PD0:PD1] in mmu |
| 218 | ; IN: r0 = PTE, r1 = ptr to PTE |
| 219 | |
| 220 | .macro CONV_PTE_TO_TLB |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 221 | and r3, r0, PTE_BITS_RWX ; r w x |
| 222 | lsl r2, r3, 3 ; r w x 0 0 0 |
| 223 | and.f 0, r0, _PAGE_GLOBAL |
| 224 | or.z r2, r2, r3 ; r w x r w x |
| 225 | |
| 226 | and r3, r0, PTE_BITS_NON_RWX_IN_PD1 ; Extract PFN+cache bits from PTE |
| 227 | or r3, r3, r2 |
| 228 | |
| 229 | sr r3, [ARC_REG_TLBPD1] ; these go in PD1 |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 230 | |
| 231 | and r2, r0, PTE_BITS_IN_PD0 ; Extract other PTE flags: (V)alid, (G)lb |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 232 | |
| 233 | lr r3,[ARC_REG_TLBPD0] ; MMU prepares PD0 with vaddr and asid |
| 234 | |
| 235 | or r3, r3, r2 ; S | vaddr | {sasid|asid} |
| 236 | sr r3,[ARC_REG_TLBPD0] ; rewrite PD0 |
| 237 | .endm |
| 238 | |
| 239 | ;----------------------------------------------------------------- |
| 240 | ; Commit the TLB entry into MMU |
| 241 | |
| 242 | .macro COMMIT_ENTRY_TO_MMU |
| 243 | |
| 244 | /* Get free TLB slot: Set = computed from vaddr, way = random */ |
| 245 | sr TLBGetIndex, [ARC_REG_TLBCOMMAND] |
| 246 | |
| 247 | /* Commit the Write */ |
| 248 | #if (CONFIG_ARC_MMU_VER >= 2) /* introduced in v2 */ |
| 249 | sr TLBWriteNI, [ARC_REG_TLBCOMMAND] |
| 250 | #else |
| 251 | sr TLBWrite, [ARC_REG_TLBCOMMAND] |
| 252 | #endif |
| 253 | .endm |
| 254 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 255 | |
Vineet Gupta | 8b5850f | 2013-01-18 15:12:25 +0530 | [diff] [blame] | 256 | ARCFP_CODE ;Fast Path Code, candidate for ICCM |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 257 | |
| 258 | ;----------------------------------------------------------------------------- |
| 259 | ; I-TLB Miss Exception Handler |
| 260 | ;----------------------------------------------------------------------------- |
| 261 | |
| 262 | ARC_ENTRY EV_TLBMissI |
| 263 | |
| 264 | TLBMISS_FREEUP_REGS |
| 265 | |
Vineet Gupta | 0ef88a5 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 266 | #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT |
| 267 | ld r0, [@numitlb] |
| 268 | add r0, r0, 1 |
| 269 | st r0, [@numitlb] |
| 270 | #endif |
| 271 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 272 | ;---------------------------------------------------------------- |
Vineet Gupta | dc81df2 | 2013-06-17 14:33:15 +0530 | [diff] [blame] | 273 | ; Get the PTE corresponding to V-addr accessed, r2 is setup with EFA |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 274 | LOAD_FAULT_PTE |
| 275 | |
| 276 | ;---------------------------------------------------------------- |
| 277 | ; VERIFY_PTE: Check if PTE permissions approp for executing code |
| 278 | cmp_s r2, VMALLOC_START |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 279 | mov_s r2, (_PAGE_PRESENT | _PAGE_EXECUTE) |
| 280 | or.hs r2, r2, _PAGE_GLOBAL |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 281 | |
| 282 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
| 283 | xor.f r3, r3, r2 ; check ( ( pte & flags_test ) == flags_test ) |
| 284 | bnz do_slow_path_pf |
| 285 | |
| 286 | ; Let Linux VM know that the page was accessed |
Vineet Gupta | c3e757a | 2013-06-17 11:35:15 +0530 | [diff] [blame] | 287 | or r0, r0, _PAGE_ACCESSED ; set Accessed Bit |
| 288 | st_s r0, [r1] ; Write back PTE |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 289 | |
| 290 | CONV_PTE_TO_TLB |
| 291 | COMMIT_ENTRY_TO_MMU |
| 292 | TLBMISS_RESTORE_REGS |
| 293 | rtie |
| 294 | |
| 295 | ARC_EXIT EV_TLBMissI |
| 296 | |
| 297 | ;----------------------------------------------------------------------------- |
| 298 | ; D-TLB Miss Exception Handler |
| 299 | ;----------------------------------------------------------------------------- |
| 300 | |
| 301 | ARC_ENTRY EV_TLBMissD |
| 302 | |
| 303 | TLBMISS_FREEUP_REGS |
| 304 | |
Vineet Gupta | 0ef88a5 | 2013-01-18 15:12:23 +0530 | [diff] [blame] | 305 | #ifdef CONFIG_ARC_DBG_TLB_MISS_COUNT |
| 306 | ld r0, [@numdtlb] |
| 307 | add r0, r0, 1 |
| 308 | st r0, [@numdtlb] |
| 309 | #endif |
| 310 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 311 | ;---------------------------------------------------------------- |
| 312 | ; Get the PTE corresponding to V-addr accessed |
Vineet Gupta | dc81df2 | 2013-06-17 14:33:15 +0530 | [diff] [blame] | 313 | ; If PTE exists, it will setup, r0 = PTE, r1 = Ptr to PTE, r2 = EFA |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 314 | LOAD_FAULT_PTE |
| 315 | |
| 316 | ;---------------------------------------------------------------- |
| 317 | ; VERIFY_PTE: Chk if PTE permissions approp for data access (R/W/R+W) |
| 318 | |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 319 | cmp_s r2, VMALLOC_START |
| 320 | mov_s r2, _PAGE_PRESENT ; common bit for K/U PTE |
| 321 | or.hs r2, r2, _PAGE_GLOBAL ; kernel PTE only |
| 322 | |
| 323 | ; Linux PTE [RWX] bits are semantically overloaded: |
| 324 | ; -If PAGE_GLOBAL set, they refer to kernel-only flags (vmalloc) |
| 325 | ; -Otherwise they are user-mode permissions, and those are exactly |
| 326 | ; same for kernel mode as well (e.g. copy_(to|from)_user) |
| 327 | |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 328 | lr r3, [ecr] |
| 329 | btst_s r3, ECR_C_BIT_DTLB_LD_MISS ; Read Access |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 330 | or.nz r2, r2, _PAGE_READ ; chk for Read flag in PTE |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 331 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; Write Access |
Vineet Gupta | 64b703e | 2013-06-17 18:12:13 +0530 | [diff] [blame] | 332 | or.nz r2, r2, _PAGE_WRITE ; chk for Write flag in PTE |
| 333 | ; Above laddering takes care of XCHG access (both R and W) |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 334 | |
| 335 | ; By now, r2 setup with all the Flags we need to check in PTE |
| 336 | and r3, r0, r2 ; Mask out NON Flag bits from PTE |
| 337 | brne.d r3, r2, do_slow_path_pf ; is ((pte & flags_test) == flags_test) |
| 338 | |
| 339 | ;---------------------------------------------------------------- |
| 340 | ; UPDATE_PTE: Let Linux VM know that page was accessed/dirty |
| 341 | lr r3, [ecr] |
Vineet Gupta | c3e757a | 2013-06-17 11:35:15 +0530 | [diff] [blame] | 342 | or r0, r0, _PAGE_ACCESSED ; Accessed bit always |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 343 | btst_s r3, ECR_C_BIT_DTLB_ST_MISS ; See if it was a Write Access ? |
| 344 | or.nz r0, r0, _PAGE_MODIFIED ; if Write, set Dirty bit as well |
| 345 | st_s r0, [r1] ; Write back PTE |
| 346 | |
| 347 | CONV_PTE_TO_TLB |
| 348 | |
| 349 | #if (CONFIG_ARC_MMU_VER == 1) |
| 350 | ; MMU with 2 way set assoc J-TLB, needs some help in pathetic case of |
| 351 | ; memcpy where 3 parties contend for 2 ways, ensuing a livelock. |
| 352 | ; But only for old MMU or one with Metal Fix |
| 353 | TLB_WRITE_HEURISTICS |
| 354 | #endif |
| 355 | |
| 356 | COMMIT_ENTRY_TO_MMU |
| 357 | TLBMISS_RESTORE_REGS |
| 358 | rtie |
| 359 | |
| 360 | ;-------- Common routine to call Linux Page Fault Handler ----------- |
| 361 | do_slow_path_pf: |
| 362 | |
| 363 | ; Restore the 4-scratch regs saved by fast path miss handler |
| 364 | TLBMISS_RESTORE_REGS |
| 365 | |
| 366 | ; Slow path TLB Miss handled as a regular ARC Exception |
| 367 | ; (stack switching / save the complete reg-file). |
Vineet Gupta | 37f3ac4 | 2013-07-09 15:07:13 +0530 | [diff] [blame] | 368 | EXCEPTION_PROLOGUE |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 369 | |
| 370 | ; ------- setup args for Linux Page fault Hanlder --------- |
| 371 | mov_s r0, sp |
Vineet Gupta | 3e1ae441 | 2013-06-12 13:49:02 +0530 | [diff] [blame] | 372 | lr r1, [efa] |
Vineet Gupta | cc562d2 | 2013-01-18 15:12:19 +0530 | [diff] [blame] | 373 | |
| 374 | ; We don't want exceptions to be disabled while the fault is handled. |
| 375 | ; Now that we have saved the context we return from exception hence |
| 376 | ; exceptions get re-enable |
| 377 | |
| 378 | FAKE_RET_FROM_EXCPN r9 |
| 379 | |
| 380 | bl do_page_fault |
| 381 | b ret_from_exception |
| 382 | |
| 383 | ARC_EXIT EV_TLBMissD |
| 384 | |
| 385 | ARC_ENTRY EV_TLBMissB ; Bogus entry to measure sz of DTLBMiss hdlr |