Marc Zyngier | 55c7401 | 2012-12-10 16:40:18 +0000 | [diff] [blame^] | 1 | /* |
| 2 | * Copyright (C) 2012,2013 - ARM Ltd |
| 3 | * Author: Marc Zyngier <marc.zyngier@arm.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License version 2 as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program. If not, see <http://www.gnu.org/licenses/>. |
| 16 | */ |
| 17 | |
| 18 | #include <linux/linkage.h> |
| 19 | #include <linux/irqchip/arm-gic.h> |
| 20 | |
| 21 | #include <asm/assembler.h> |
| 22 | #include <asm/memory.h> |
| 23 | #include <asm/asm-offsets.h> |
| 24 | #include <asm/fpsimdmacros.h> |
| 25 | #include <asm/kvm.h> |
| 26 | #include <asm/kvm_asm.h> |
| 27 | #include <asm/kvm_arm.h> |
| 28 | #include <asm/kvm_mmu.h> |
| 29 | |
| 30 | #define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x) |
| 31 | #define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x) |
| 32 | #define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x) |
| 33 | #define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x) |
| 34 | |
| 35 | .text |
| 36 | .pushsection .hyp.text, "ax" |
| 37 | .align PAGE_SHIFT |
| 38 | |
| 39 | __kvm_hyp_code_start: |
| 40 | .globl __kvm_hyp_code_start |
| 41 | |
| 42 | .macro save_common_regs |
| 43 | // x2: base address for cpu context |
| 44 | // x3: tmp register |
| 45 | |
| 46 | add x3, x2, #CPU_XREG_OFFSET(19) |
| 47 | stp x19, x20, [x3] |
| 48 | stp x21, x22, [x3, #16] |
| 49 | stp x23, x24, [x3, #32] |
| 50 | stp x25, x26, [x3, #48] |
| 51 | stp x27, x28, [x3, #64] |
| 52 | stp x29, lr, [x3, #80] |
| 53 | |
| 54 | mrs x19, sp_el0 |
| 55 | mrs x20, elr_el2 // EL1 PC |
| 56 | mrs x21, spsr_el2 // EL1 pstate |
| 57 | |
| 58 | stp x19, x20, [x3, #96] |
| 59 | str x21, [x3, #112] |
| 60 | |
| 61 | mrs x22, sp_el1 |
| 62 | mrs x23, elr_el1 |
| 63 | mrs x24, spsr_el1 |
| 64 | |
| 65 | str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] |
| 66 | str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] |
| 67 | str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] |
| 68 | .endm |
| 69 | |
| 70 | .macro restore_common_regs |
| 71 | // x2: base address for cpu context |
| 72 | // x3: tmp register |
| 73 | |
| 74 | ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)] |
| 75 | ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)] |
| 76 | ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)] |
| 77 | |
| 78 | msr sp_el1, x22 |
| 79 | msr elr_el1, x23 |
| 80 | msr spsr_el1, x24 |
| 81 | |
| 82 | add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0 |
| 83 | ldp x19, x20, [x3] |
| 84 | ldr x21, [x3, #16] |
| 85 | |
| 86 | msr sp_el0, x19 |
| 87 | msr elr_el2, x20 // EL1 PC |
| 88 | msr spsr_el2, x21 // EL1 pstate |
| 89 | |
| 90 | add x3, x2, #CPU_XREG_OFFSET(19) |
| 91 | ldp x19, x20, [x3] |
| 92 | ldp x21, x22, [x3, #16] |
| 93 | ldp x23, x24, [x3, #32] |
| 94 | ldp x25, x26, [x3, #48] |
| 95 | ldp x27, x28, [x3, #64] |
| 96 | ldp x29, lr, [x3, #80] |
| 97 | .endm |
| 98 | |
| 99 | .macro save_host_regs |
| 100 | save_common_regs |
| 101 | .endm |
| 102 | |
| 103 | .macro restore_host_regs |
| 104 | restore_common_regs |
| 105 | .endm |
| 106 | |
| 107 | .macro save_fpsimd |
| 108 | // x2: cpu context address |
| 109 | // x3, x4: tmp regs |
| 110 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) |
| 111 | fpsimd_save x3, 4 |
| 112 | .endm |
| 113 | |
| 114 | .macro restore_fpsimd |
| 115 | // x2: cpu context address |
| 116 | // x3, x4: tmp regs |
| 117 | add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS) |
| 118 | fpsimd_restore x3, 4 |
| 119 | .endm |
| 120 | |
| 121 | .macro save_guest_regs |
| 122 | // x0 is the vcpu address |
| 123 | // x1 is the return code, do not corrupt! |
| 124 | // x2 is the cpu context |
| 125 | // x3 is a tmp register |
| 126 | // Guest's x0-x3 are on the stack |
| 127 | |
| 128 | // Compute base to save registers |
| 129 | add x3, x2, #CPU_XREG_OFFSET(4) |
| 130 | stp x4, x5, [x3] |
| 131 | stp x6, x7, [x3, #16] |
| 132 | stp x8, x9, [x3, #32] |
| 133 | stp x10, x11, [x3, #48] |
| 134 | stp x12, x13, [x3, #64] |
| 135 | stp x14, x15, [x3, #80] |
| 136 | stp x16, x17, [x3, #96] |
| 137 | str x18, [x3, #112] |
| 138 | |
| 139 | pop x6, x7 // x2, x3 |
| 140 | pop x4, x5 // x0, x1 |
| 141 | |
| 142 | add x3, x2, #CPU_XREG_OFFSET(0) |
| 143 | stp x4, x5, [x3] |
| 144 | stp x6, x7, [x3, #16] |
| 145 | |
| 146 | save_common_regs |
| 147 | .endm |
| 148 | |
| 149 | .macro restore_guest_regs |
| 150 | // x0 is the vcpu address. |
| 151 | // x2 is the cpu context |
| 152 | // x3 is a tmp register |
| 153 | |
| 154 | // Prepare x0-x3 for later restore |
| 155 | add x3, x2, #CPU_XREG_OFFSET(0) |
| 156 | ldp x4, x5, [x3] |
| 157 | ldp x6, x7, [x3, #16] |
| 158 | push x4, x5 // Push x0-x3 on the stack |
| 159 | push x6, x7 |
| 160 | |
| 161 | // x4-x18 |
| 162 | ldp x4, x5, [x3, #32] |
| 163 | ldp x6, x7, [x3, #48] |
| 164 | ldp x8, x9, [x3, #64] |
| 165 | ldp x10, x11, [x3, #80] |
| 166 | ldp x12, x13, [x3, #96] |
| 167 | ldp x14, x15, [x3, #112] |
| 168 | ldp x16, x17, [x3, #128] |
| 169 | ldr x18, [x3, #144] |
| 170 | |
| 171 | // x19-x29, lr, sp*, elr*, spsr* |
| 172 | restore_common_regs |
| 173 | |
| 174 | // Last bits of the 64bit state |
| 175 | pop x2, x3 |
| 176 | pop x0, x1 |
| 177 | |
| 178 | // Do not touch any register after this! |
| 179 | .endm |
| 180 | |
| 181 | /* |
| 182 | * Macros to perform system register save/restore. |
| 183 | * |
| 184 | * Ordering here is absolutely critical, and must be kept consistent |
| 185 | * in {save,restore}_sysregs, {save,restore}_guest_32bit_state, |
| 186 | * and in kvm_asm.h. |
| 187 | * |
| 188 | * In other words, don't touch any of these unless you know what |
| 189 | * you are doing. |
| 190 | */ |
| 191 | .macro save_sysregs |
| 192 | // x2: base address for cpu context |
| 193 | // x3: tmp register |
| 194 | |
| 195 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) |
| 196 | |
| 197 | mrs x4, vmpidr_el2 |
| 198 | mrs x5, csselr_el1 |
| 199 | mrs x6, sctlr_el1 |
| 200 | mrs x7, actlr_el1 |
| 201 | mrs x8, cpacr_el1 |
| 202 | mrs x9, ttbr0_el1 |
| 203 | mrs x10, ttbr1_el1 |
| 204 | mrs x11, tcr_el1 |
| 205 | mrs x12, esr_el1 |
| 206 | mrs x13, afsr0_el1 |
| 207 | mrs x14, afsr1_el1 |
| 208 | mrs x15, far_el1 |
| 209 | mrs x16, mair_el1 |
| 210 | mrs x17, vbar_el1 |
| 211 | mrs x18, contextidr_el1 |
| 212 | mrs x19, tpidr_el0 |
| 213 | mrs x20, tpidrro_el0 |
| 214 | mrs x21, tpidr_el1 |
| 215 | mrs x22, amair_el1 |
| 216 | mrs x23, cntkctl_el1 |
| 217 | |
| 218 | stp x4, x5, [x3] |
| 219 | stp x6, x7, [x3, #16] |
| 220 | stp x8, x9, [x3, #32] |
| 221 | stp x10, x11, [x3, #48] |
| 222 | stp x12, x13, [x3, #64] |
| 223 | stp x14, x15, [x3, #80] |
| 224 | stp x16, x17, [x3, #96] |
| 225 | stp x18, x19, [x3, #112] |
| 226 | stp x20, x21, [x3, #128] |
| 227 | stp x22, x23, [x3, #144] |
| 228 | .endm |
| 229 | |
| 230 | .macro restore_sysregs |
| 231 | // x2: base address for cpu context |
| 232 | // x3: tmp register |
| 233 | |
| 234 | add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1) |
| 235 | |
| 236 | ldp x4, x5, [x3] |
| 237 | ldp x6, x7, [x3, #16] |
| 238 | ldp x8, x9, [x3, #32] |
| 239 | ldp x10, x11, [x3, #48] |
| 240 | ldp x12, x13, [x3, #64] |
| 241 | ldp x14, x15, [x3, #80] |
| 242 | ldp x16, x17, [x3, #96] |
| 243 | ldp x18, x19, [x3, #112] |
| 244 | ldp x20, x21, [x3, #128] |
| 245 | ldp x22, x23, [x3, #144] |
| 246 | |
| 247 | msr vmpidr_el2, x4 |
| 248 | msr csselr_el1, x5 |
| 249 | msr sctlr_el1, x6 |
| 250 | msr actlr_el1, x7 |
| 251 | msr cpacr_el1, x8 |
| 252 | msr ttbr0_el1, x9 |
| 253 | msr ttbr1_el1, x10 |
| 254 | msr tcr_el1, x11 |
| 255 | msr esr_el1, x12 |
| 256 | msr afsr0_el1, x13 |
| 257 | msr afsr1_el1, x14 |
| 258 | msr far_el1, x15 |
| 259 | msr mair_el1, x16 |
| 260 | msr vbar_el1, x17 |
| 261 | msr contextidr_el1, x18 |
| 262 | msr tpidr_el0, x19 |
| 263 | msr tpidrro_el0, x20 |
| 264 | msr tpidr_el1, x21 |
| 265 | msr amair_el1, x22 |
| 266 | msr cntkctl_el1, x23 |
| 267 | .endm |
| 268 | |
| 269 | .macro activate_traps |
| 270 | ldr x2, [x0, #VCPU_IRQ_LINES] |
| 271 | ldr x1, [x0, #VCPU_HCR_EL2] |
| 272 | orr x2, x2, x1 |
| 273 | msr hcr_el2, x2 |
| 274 | |
| 275 | ldr x2, =(CPTR_EL2_TTA) |
| 276 | msr cptr_el2, x2 |
| 277 | |
| 278 | ldr x2, =(1 << 15) // Trap CP15 Cr=15 |
| 279 | msr hstr_el2, x2 |
| 280 | |
| 281 | mrs x2, mdcr_el2 |
| 282 | and x2, x2, #MDCR_EL2_HPMN_MASK |
| 283 | orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR) |
| 284 | msr mdcr_el2, x2 |
| 285 | .endm |
| 286 | |
| 287 | .macro deactivate_traps |
| 288 | mov x2, #HCR_RW |
| 289 | msr hcr_el2, x2 |
| 290 | msr cptr_el2, xzr |
| 291 | msr hstr_el2, xzr |
| 292 | |
| 293 | mrs x2, mdcr_el2 |
| 294 | and x2, x2, #MDCR_EL2_HPMN_MASK |
| 295 | msr mdcr_el2, x2 |
| 296 | .endm |
| 297 | |
| 298 | .macro activate_vm |
| 299 | ldr x1, [x0, #VCPU_KVM] |
| 300 | kern_hyp_va x1 |
| 301 | ldr x2, [x1, #KVM_VTTBR] |
| 302 | msr vttbr_el2, x2 |
| 303 | .endm |
| 304 | |
| 305 | .macro deactivate_vm |
| 306 | msr vttbr_el2, xzr |
| 307 | .endm |
| 308 | |
| 309 | __save_sysregs: |
| 310 | save_sysregs |
| 311 | ret |
| 312 | |
| 313 | __restore_sysregs: |
| 314 | restore_sysregs |
| 315 | ret |
| 316 | |
| 317 | __save_fpsimd: |
| 318 | save_fpsimd |
| 319 | ret |
| 320 | |
| 321 | __restore_fpsimd: |
| 322 | restore_fpsimd |
| 323 | ret |
| 324 | |
| 325 | /* |
| 326 | * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu); |
| 327 | * |
| 328 | * This is the world switch. The first half of the function |
| 329 | * deals with entering the guest, and anything from __kvm_vcpu_return |
| 330 | * to the end of the function deals with reentering the host. |
| 331 | * On the enter path, only x0 (vcpu pointer) must be preserved until |
| 332 | * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception |
| 333 | * code) must both be preserved until the epilogue. |
| 334 | * In both cases, x2 points to the CPU context we're saving/restoring from/to. |
| 335 | */ |
| 336 | ENTRY(__kvm_vcpu_run) |
| 337 | kern_hyp_va x0 |
| 338 | msr tpidr_el2, x0 // Save the vcpu register |
| 339 | |
| 340 | // Host context |
| 341 | ldr x2, [x0, #VCPU_HOST_CONTEXT] |
| 342 | kern_hyp_va x2 |
| 343 | |
| 344 | save_host_regs |
| 345 | bl __save_fpsimd |
| 346 | bl __save_sysregs |
| 347 | |
| 348 | activate_traps |
| 349 | activate_vm |
| 350 | |
| 351 | // Guest context |
| 352 | add x2, x0, #VCPU_CONTEXT |
| 353 | |
| 354 | bl __restore_sysregs |
| 355 | bl __restore_fpsimd |
| 356 | restore_guest_regs |
| 357 | |
| 358 | // That's it, no more messing around. |
| 359 | eret |
| 360 | |
| 361 | __kvm_vcpu_return: |
| 362 | // Assume x0 is the vcpu pointer, x1 the return code |
| 363 | // Guest's x0-x3 are on the stack |
| 364 | |
| 365 | // Guest context |
| 366 | add x2, x0, #VCPU_CONTEXT |
| 367 | |
| 368 | save_guest_regs |
| 369 | bl __save_fpsimd |
| 370 | bl __save_sysregs |
| 371 | |
| 372 | deactivate_traps |
| 373 | deactivate_vm |
| 374 | |
| 375 | // Host context |
| 376 | ldr x2, [x0, #VCPU_HOST_CONTEXT] |
| 377 | kern_hyp_va x2 |
| 378 | |
| 379 | bl __restore_sysregs |
| 380 | bl __restore_fpsimd |
| 381 | restore_host_regs |
| 382 | |
| 383 | mov x0, x1 |
| 384 | ret |
| 385 | END(__kvm_vcpu_run) |
| 386 | |
| 387 | // void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
| 388 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
| 389 | kern_hyp_va x0 |
| 390 | ldr x2, [x0, #KVM_VTTBR] |
| 391 | msr vttbr_el2, x2 |
| 392 | isb |
| 393 | |
| 394 | /* |
| 395 | * We could do so much better if we had the VA as well. |
| 396 | * Instead, we invalidate Stage-2 for this IPA, and the |
| 397 | * whole of Stage-1. Weep... |
| 398 | */ |
| 399 | tlbi ipas2e1is, x1 |
| 400 | dsb sy |
| 401 | tlbi vmalle1is |
| 402 | dsb sy |
| 403 | isb |
| 404 | |
| 405 | msr vttbr_el2, xzr |
| 406 | ret |
| 407 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
| 408 | |
| 409 | ENTRY(__kvm_flush_vm_context) |
| 410 | tlbi alle1is |
| 411 | ic ialluis |
| 412 | dsb sy |
| 413 | ret |
| 414 | ENDPROC(__kvm_flush_vm_context) |
| 415 | |
| 416 | __kvm_hyp_panic: |
| 417 | // Guess the context by looking at VTTBR: |
| 418 | // If zero, then we're already a host. |
| 419 | // Otherwise restore a minimal host context before panicing. |
| 420 | mrs x0, vttbr_el2 |
| 421 | cbz x0, 1f |
| 422 | |
| 423 | mrs x0, tpidr_el2 |
| 424 | |
| 425 | deactivate_traps |
| 426 | deactivate_vm |
| 427 | |
| 428 | ldr x2, [x0, #VCPU_HOST_CONTEXT] |
| 429 | kern_hyp_va x2 |
| 430 | |
| 431 | bl __restore_sysregs |
| 432 | |
| 433 | 1: adr x0, __hyp_panic_str |
| 434 | adr x1, 2f |
| 435 | ldp x2, x3, [x1] |
| 436 | sub x0, x0, x2 |
| 437 | add x0, x0, x3 |
| 438 | mrs x1, spsr_el2 |
| 439 | mrs x2, elr_el2 |
| 440 | mrs x3, esr_el2 |
| 441 | mrs x4, far_el2 |
| 442 | mrs x5, hpfar_el2 |
| 443 | mrs x6, par_el1 |
| 444 | mrs x7, tpidr_el2 |
| 445 | |
| 446 | mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\ |
| 447 | PSR_MODE_EL1h) |
| 448 | msr spsr_el2, lr |
| 449 | ldr lr, =panic |
| 450 | msr elr_el2, lr |
| 451 | eret |
| 452 | |
| 453 | .align 3 |
| 454 | 2: .quad HYP_PAGE_OFFSET |
| 455 | .quad PAGE_OFFSET |
| 456 | ENDPROC(__kvm_hyp_panic) |
| 457 | |
| 458 | __hyp_panic_str: |
| 459 | .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0" |
| 460 | |
| 461 | .align 2 |
| 462 | |
| 463 | ENTRY(kvm_call_hyp) |
| 464 | hvc #0 |
| 465 | ret |
| 466 | ENDPROC(kvm_call_hyp) |
| 467 | |
| 468 | .macro invalid_vector label, target |
| 469 | .align 2 |
| 470 | \label: |
| 471 | b \target |
| 472 | ENDPROC(\label) |
| 473 | .endm |
| 474 | |
| 475 | /* None of these should ever happen */ |
| 476 | invalid_vector el2t_sync_invalid, __kvm_hyp_panic |
| 477 | invalid_vector el2t_irq_invalid, __kvm_hyp_panic |
| 478 | invalid_vector el2t_fiq_invalid, __kvm_hyp_panic |
| 479 | invalid_vector el2t_error_invalid, __kvm_hyp_panic |
| 480 | invalid_vector el2h_sync_invalid, __kvm_hyp_panic |
| 481 | invalid_vector el2h_irq_invalid, __kvm_hyp_panic |
| 482 | invalid_vector el2h_fiq_invalid, __kvm_hyp_panic |
| 483 | invalid_vector el2h_error_invalid, __kvm_hyp_panic |
| 484 | invalid_vector el1_sync_invalid, __kvm_hyp_panic |
| 485 | invalid_vector el1_irq_invalid, __kvm_hyp_panic |
| 486 | invalid_vector el1_fiq_invalid, __kvm_hyp_panic |
| 487 | invalid_vector el1_error_invalid, __kvm_hyp_panic |
| 488 | |
| 489 | el1_sync: // Guest trapped into EL2 |
| 490 | push x0, x1 |
| 491 | push x2, x3 |
| 492 | |
| 493 | mrs x1, esr_el2 |
| 494 | lsr x2, x1, #ESR_EL2_EC_SHIFT |
| 495 | |
| 496 | cmp x2, #ESR_EL2_EC_HVC64 |
| 497 | b.ne el1_trap |
| 498 | |
| 499 | mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest |
| 500 | cbnz x3, el1_trap // called HVC |
| 501 | |
| 502 | /* Here, we're pretty sure the host called HVC. */ |
| 503 | pop x2, x3 |
| 504 | pop x0, x1 |
| 505 | |
| 506 | push lr, xzr |
| 507 | |
| 508 | /* |
| 509 | * Compute the function address in EL2, and shuffle the parameters. |
| 510 | */ |
| 511 | kern_hyp_va x0 |
| 512 | mov lr, x0 |
| 513 | mov x0, x1 |
| 514 | mov x1, x2 |
| 515 | mov x2, x3 |
| 516 | blr lr |
| 517 | |
| 518 | pop lr, xzr |
| 519 | eret |
| 520 | |
| 521 | el1_trap: |
| 522 | /* |
| 523 | * x1: ESR |
| 524 | * x2: ESR_EC |
| 525 | */ |
| 526 | cmp x2, #ESR_EL2_EC_DABT |
| 527 | mov x0, #ESR_EL2_EC_IABT |
| 528 | ccmp x2, x0, #4, ne |
| 529 | b.ne 1f // Not an abort we care about |
| 530 | |
| 531 | /* This is an abort. Check for permission fault */ |
| 532 | and x2, x1, #ESR_EL2_FSC_TYPE |
| 533 | cmp x2, #FSC_PERM |
| 534 | b.ne 1f // Not a permission fault |
| 535 | |
| 536 | /* |
| 537 | * Check for Stage-1 page table walk, which is guaranteed |
| 538 | * to give a valid HPFAR_EL2. |
| 539 | */ |
| 540 | tbnz x1, #7, 1f // S1PTW is set |
| 541 | |
| 542 | /* |
| 543 | * Permission fault, HPFAR_EL2 is invalid. |
| 544 | * Resolve the IPA the hard way using the guest VA. |
| 545 | * Stage-1 translation already validated the memory access rights. |
| 546 | * As such, we can use the EL1 translation regime, and don't have |
| 547 | * to distinguish between EL0 and EL1 access. |
| 548 | */ |
| 549 | mrs x2, far_el2 |
| 550 | at s1e1r, x2 |
| 551 | isb |
| 552 | |
| 553 | /* Read result */ |
| 554 | mrs x3, par_el1 |
| 555 | tbnz x3, #0, 3f // Bail out if we failed the translation |
| 556 | ubfx x3, x3, #12, #36 // Extract IPA |
| 557 | lsl x3, x3, #4 // and present it like HPFAR |
| 558 | b 2f |
| 559 | |
| 560 | 1: mrs x3, hpfar_el2 |
| 561 | mrs x2, far_el2 |
| 562 | |
| 563 | 2: mrs x0, tpidr_el2 |
| 564 | str x1, [x0, #VCPU_ESR_EL2] |
| 565 | str x2, [x0, #VCPU_FAR_EL2] |
| 566 | str x3, [x0, #VCPU_HPFAR_EL2] |
| 567 | |
| 568 | mov x1, #ARM_EXCEPTION_TRAP |
| 569 | b __kvm_vcpu_return |
| 570 | |
| 571 | /* |
| 572 | * Translation failed. Just return to the guest and |
| 573 | * let it fault again. Another CPU is probably playing |
| 574 | * behind our back. |
| 575 | */ |
| 576 | 3: pop x2, x3 |
| 577 | pop x0, x1 |
| 578 | |
| 579 | eret |
| 580 | |
| 581 | el1_irq: |
| 582 | push x0, x1 |
| 583 | push x2, x3 |
| 584 | mrs x0, tpidr_el2 |
| 585 | mov x1, #ARM_EXCEPTION_IRQ |
| 586 | b __kvm_vcpu_return |
| 587 | |
| 588 | .ltorg |
| 589 | |
| 590 | .align 11 |
| 591 | |
| 592 | ENTRY(__kvm_hyp_vector) |
| 593 | ventry el2t_sync_invalid // Synchronous EL2t |
| 594 | ventry el2t_irq_invalid // IRQ EL2t |
| 595 | ventry el2t_fiq_invalid // FIQ EL2t |
| 596 | ventry el2t_error_invalid // Error EL2t |
| 597 | |
| 598 | ventry el2h_sync_invalid // Synchronous EL2h |
| 599 | ventry el2h_irq_invalid // IRQ EL2h |
| 600 | ventry el2h_fiq_invalid // FIQ EL2h |
| 601 | ventry el2h_error_invalid // Error EL2h |
| 602 | |
| 603 | ventry el1_sync // Synchronous 64-bit EL1 |
| 604 | ventry el1_irq // IRQ 64-bit EL1 |
| 605 | ventry el1_fiq_invalid // FIQ 64-bit EL1 |
| 606 | ventry el1_error_invalid // Error 64-bit EL1 |
| 607 | |
| 608 | ventry el1_sync // Synchronous 32-bit EL1 |
| 609 | ventry el1_irq // IRQ 32-bit EL1 |
| 610 | ventry el1_fiq_invalid // FIQ 32-bit EL1 |
| 611 | ventry el1_error_invalid // Error 32-bit EL1 |
| 612 | ENDPROC(__kvm_hyp_vector) |
| 613 | |
| 614 | __kvm_hyp_code_end: |
| 615 | .globl __kvm_hyp_code_end |
| 616 | |
| 617 | .popsection |