Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 18 | |
| 19 | #include <linux/linkage.h> |
| 20 | #include <linux/const.h> |
| 21 | #include <asm/unified.h> |
| 22 | #include <asm/page.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 23 | #include <asm/ptrace.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 24 | #include <asm/asm-offsets.h> |
| 25 | #include <asm/kvm_asm.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 26 | #include <asm/kvm_arm.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 27 | #include <asm/vfpmacros.h> |
| 28 | #include "interrupts_head.S" |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 29 | |
| 30 | .text |
| 31 | |
| 32 | __kvm_hyp_code_start: |
| 33 | .globl __kvm_hyp_code_start |
| 34 | |
| 35 | /******************************************************************** |
| 36 | * Flush per-VMID TLBs |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 37 | * |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 39 | * |
| 40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs |
| 41 | * inside the inner-shareable domain (which is the case for all v7 |
| 42 | * implementations). If we come across a non-IS SMP implementation, we'll |
| 43 | * have to use an IPI based mechanism. Until then, we stick to the simple |
| 44 | * hardware assisted version. |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 45 | * |
| 46 | * As v7 does not support flushing per IPA, just nuke the whole TLB |
| 47 | * instead, ignoring the ipa value. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 48 | */ |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 50 | push {r2, r3} |
| 51 | |
Marc Zyngier | 479c5ae | 2013-06-21 13:08:47 +0100 | [diff] [blame] | 52 | dsb ishst |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 53 | add r0, r0, #KVM_VTTBR |
| 54 | ldrd r2, r3, [r0] |
| 55 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR |
| 56 | isb |
| 57 | mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) |
Will Deacon | e3ab547 | 2013-05-13 12:08:06 +0100 | [diff] [blame] | 58 | dsb ish |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 59 | isb |
| 60 | mov r2, #0 |
| 61 | mov r3, #0 |
| 62 | mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 |
| 63 | isb @ Not necessary if followed by eret |
| 64 | |
| 65 | pop {r2, r3} |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 66 | bx lr |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 67 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 68 | |
| 69 | /******************************************************************** |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 70 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
| 71 | * domain, for all VMIDs |
| 72 | * |
| 73 | * void __kvm_flush_vm_context(void); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 74 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 75 | ENTRY(__kvm_flush_vm_context) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 76 | mov r0, #0 @ rn parameter for c15 flushes is SBZ |
| 77 | |
| 78 | /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ |
| 79 | mcr p15, 4, r0, c8, c3, 4 |
| 80 | /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ |
| 81 | mcr p15, 0, r0, c7, c1, 0 |
Will Deacon | e3ab547 | 2013-05-13 12:08:06 +0100 | [diff] [blame] | 82 | dsb ish |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 83 | isb @ Not necessary if followed by eret |
| 84 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 85 | bx lr |
| 86 | ENDPROC(__kvm_flush_vm_context) |
| 87 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 88 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 89 | /******************************************************************** |
| 90 | * Hypervisor world-switch code |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 91 | * |
| 92 | * |
| 93 | * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 94 | */ |
| 95 | ENTRY(__kvm_vcpu_run) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 96 | @ Save the vcpu pointer |
| 97 | mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR |
| 98 | |
| 99 | save_host_regs |
| 100 | |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 101 | restore_vgic_state |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 102 | restore_timer_state |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 103 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 104 | @ Store hardware CP15 state and load guest state |
| 105 | read_cp15_state store_to_vcpu = 0 |
| 106 | write_cp15_state read_from_vcpu = 1 |
| 107 | |
| 108 | @ If the host kernel has not been configured with VFPv3 support, |
| 109 | @ then it is safer if we deny guests from using it as well. |
| 110 | #ifdef CONFIG_VFPv3 |
| 111 | @ Set FPEXC_EN so the guest doesn't trap floating point instructions |
| 112 | VFPFMRX r2, FPEXC @ VMRS |
| 113 | push {r2} |
| 114 | orr r2, r2, #FPEXC_EN |
| 115 | VFPFMXR FPEXC, r2 @ VMSR |
| 116 | #endif |
| 117 | |
| 118 | @ Configure Hyp-role |
| 119 | configure_hyp_role vmentry |
| 120 | |
| 121 | @ Trap coprocessor CRx accesses |
| 122 | set_hstr vmentry |
| 123 | set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) |
| 124 | set_hdcr vmentry |
| 125 | |
| 126 | @ Write configured ID register into MIDR alias |
| 127 | ldr r1, [vcpu, #VCPU_MIDR] |
| 128 | mcr p15, 4, r1, c0, c0, 0 |
| 129 | |
| 130 | @ Write guest view of MPIDR into VMPIDR |
| 131 | ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] |
| 132 | mcr p15, 4, r1, c0, c0, 5 |
| 133 | |
| 134 | @ Set up guest memory translation |
| 135 | ldr r1, [vcpu, #VCPU_KVM] |
| 136 | add r1, r1, #KVM_VTTBR |
| 137 | ldrd r2, r3, [r1] |
| 138 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR |
| 139 | |
| 140 | @ We're all done, just restore the GPRs and go to the guest |
| 141 | restore_guest_regs |
| 142 | clrex @ Clear exclusive monitor |
| 143 | eret |
| 144 | |
| 145 | __kvm_vcpu_return: |
| 146 | /* |
| 147 | * return convention: |
| 148 | * guest r0, r1, r2 saved on the stack |
| 149 | * r0: vcpu pointer |
| 150 | * r1: exception code |
| 151 | */ |
| 152 | save_guest_regs |
| 153 | |
| 154 | @ Set VMID == 0 |
| 155 | mov r2, #0 |
| 156 | mov r3, #0 |
| 157 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR |
| 158 | |
| 159 | @ Don't trap coprocessor accesses for host kernel |
| 160 | set_hstr vmexit |
| 161 | set_hdcr vmexit |
| 162 | set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) |
| 163 | |
| 164 | #ifdef CONFIG_VFPv3 |
| 165 | @ Save floating point registers we if let guest use them. |
| 166 | tst r2, #(HCPTR_TCP(10) | HCPTR_TCP(11)) |
| 167 | bne after_vfp_restore |
| 168 | |
| 169 | @ Switch VFP/NEON hardware state to the host's |
| 170 | add r7, vcpu, #VCPU_VFP_GUEST |
| 171 | store_vfp_state r7 |
| 172 | add r7, vcpu, #VCPU_VFP_HOST |
| 173 | ldr r7, [r7] |
| 174 | restore_vfp_state r7 |
| 175 | |
| 176 | after_vfp_restore: |
| 177 | @ Restore FPEXC_EN which we clobbered on entry |
| 178 | pop {r2} |
| 179 | VFPFMXR FPEXC, r2 |
| 180 | #endif |
| 181 | |
| 182 | @ Reset Hyp-role |
| 183 | configure_hyp_role vmexit |
| 184 | |
| 185 | @ Let host read hardware MIDR |
| 186 | mrc p15, 0, r2, c0, c0, 0 |
| 187 | mcr p15, 4, r2, c0, c0, 0 |
| 188 | |
| 189 | @ Back to hardware MPIDR |
| 190 | mrc p15, 0, r2, c0, c0, 5 |
| 191 | mcr p15, 4, r2, c0, c0, 5 |
| 192 | |
| 193 | @ Store guest CP15 state and restore host state |
| 194 | read_cp15_state store_to_vcpu = 1 |
| 195 | write_cp15_state read_from_vcpu = 0 |
| 196 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 197 | save_timer_state |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 198 | save_vgic_state |
| 199 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 200 | restore_host_regs |
| 201 | clrex @ Clear exclusive monitor |
| 202 | mov r0, r1 @ Return the return code |
| 203 | mov r1, #0 @ Clear upper bits in return value |
| 204 | bx lr @ return to IOCTL |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 205 | |
| 206 | /******************************************************************** |
| 207 | * Call function in Hyp mode |
| 208 | * |
| 209 | * |
| 210 | * u64 kvm_call_hyp(void *hypfn, ...); |
| 211 | * |
| 212 | * This is not really a variadic function in the classic C-way and care must |
| 213 | * be taken when calling this to ensure parameters are passed in registers |
| 214 | * only, since the stack will change between the caller and the callee. |
| 215 | * |
| 216 | * Call the function with the first argument containing a pointer to the |
| 217 | * function you wish to call in Hyp mode, and subsequent arguments will be |
| 218 | * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the |
| 219 | * function pointer can be passed). The function being called must be mapped |
| 220 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are |
| 221 | * passed in r0 and r1. |
| 222 | * |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 223 | * A function pointer with a value of 0xffffffff has a special meaning, |
| 224 | * and is used to implement __hyp_get_vectors in the same way as in |
| 225 | * arch/arm/kernel/hyp_stub.S. |
| 226 | * |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 227 | * The calling convention follows the standard AAPCS: |
| 228 | * r0 - r3: caller save |
| 229 | * r12: caller save |
| 230 | * rest: callee save |
| 231 | */ |
| 232 | ENTRY(kvm_call_hyp) |
| 233 | hvc #0 |
| 234 | bx lr |
| 235 | |
| 236 | /******************************************************************** |
| 237 | * Hypervisor exception vector and handlers |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 238 | * |
| 239 | * |
| 240 | * The KVM/ARM Hypervisor ABI is defined as follows: |
| 241 | * |
| 242 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC |
| 243 | * instruction is issued since all traps are disabled when running the host |
| 244 | * kernel as per the Hyp-mode initialization at boot time. |
| 245 | * |
Jonghwan Choi | 0b5e3ba | 2013-02-19 15:19:32 +0900 | [diff] [blame] | 246 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 247 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the |
Jonghwan Choi | 0b5e3ba | 2013-02-19 15:19:32 +0900 | [diff] [blame] | 248 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 249 | * instructions are called from within Hyp-mode. |
| 250 | * |
| 251 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): |
| 252 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The |
| 253 | * exception vector code will check that the HVC comes from VMID==0 and if |
| 254 | * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. |
| 255 | * - r0 contains a pointer to a HYP function |
| 256 | * - r1, r2, and r3 contain arguments to the above function. |
| 257 | * - The HYP function will be called with its arguments in r0, r1 and r2. |
| 258 | * On HYP function return, we return directly to SVC. |
| 259 | * |
| 260 | * Note that the above is used to execute code in Hyp-mode from a host-kernel |
| 261 | * point of view, and is a different concept from performing a world-switch and |
| 262 | * executing guest code SVC mode (with a VMID != 0). |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 263 | */ |
| 264 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 265 | /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ |
| 266 | .macro bad_exception exception_code, panic_str |
| 267 | push {r0-r2} |
| 268 | mrrc p15, 6, r0, r1, c2 @ Read VTTBR |
| 269 | lsr r1, r1, #16 |
| 270 | ands r1, r1, #0xff |
| 271 | beq 99f |
| 272 | |
| 273 | load_vcpu @ Load VCPU pointer |
| 274 | .if \exception_code == ARM_EXCEPTION_DATA_ABORT |
| 275 | mrc p15, 4, r2, c5, c2, 0 @ HSR |
| 276 | mrc p15, 4, r1, c6, c0, 0 @ HDFAR |
| 277 | str r2, [vcpu, #VCPU_HSR] |
| 278 | str r1, [vcpu, #VCPU_HxFAR] |
| 279 | .endif |
| 280 | .if \exception_code == ARM_EXCEPTION_PREF_ABORT |
| 281 | mrc p15, 4, r2, c5, c2, 0 @ HSR |
| 282 | mrc p15, 4, r1, c6, c0, 2 @ HIFAR |
| 283 | str r2, [vcpu, #VCPU_HSR] |
| 284 | str r1, [vcpu, #VCPU_HxFAR] |
| 285 | .endif |
| 286 | mov r1, #\exception_code |
| 287 | b __kvm_vcpu_return |
| 288 | |
| 289 | @ We were in the host already. Let's craft a panic-ing return to SVC. |
| 290 | 99: mrs r2, cpsr |
| 291 | bic r2, r2, #MODE_MASK |
| 292 | orr r2, r2, #SVC_MODE |
| 293 | THUMB( orr r2, r2, #PSR_T_BIT ) |
| 294 | msr spsr_cxsf, r2 |
| 295 | mrs r1, ELR_hyp |
| 296 | ldr r2, =BSYM(panic) |
| 297 | msr ELR_hyp, r2 |
| 298 | ldr r0, =\panic_str |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 299 | clrex @ Clear exclusive monitor |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 300 | eret |
| 301 | .endm |
| 302 | |
| 303 | .text |
| 304 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 305 | .align 5 |
| 306 | __kvm_hyp_vector: |
| 307 | .globl __kvm_hyp_vector |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 308 | |
| 309 | @ Hyp-mode exception vector |
| 310 | W(b) hyp_reset |
| 311 | W(b) hyp_undef |
| 312 | W(b) hyp_svc |
| 313 | W(b) hyp_pabt |
| 314 | W(b) hyp_dabt |
| 315 | W(b) hyp_hvc |
| 316 | W(b) hyp_irq |
| 317 | W(b) hyp_fiq |
| 318 | |
| 319 | .align |
| 320 | hyp_reset: |
| 321 | b hyp_reset |
| 322 | |
| 323 | .align |
| 324 | hyp_undef: |
| 325 | bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str |
| 326 | |
| 327 | .align |
| 328 | hyp_svc: |
| 329 | bad_exception ARM_EXCEPTION_HVC, svc_die_str |
| 330 | |
| 331 | .align |
| 332 | hyp_pabt: |
| 333 | bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str |
| 334 | |
| 335 | .align |
| 336 | hyp_dabt: |
| 337 | bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str |
| 338 | |
| 339 | .align |
| 340 | hyp_hvc: |
| 341 | /* |
| 342 | * Getting here is either becuase of a trap from a guest or from calling |
| 343 | * HVC from the host kernel, which means "switch to Hyp mode". |
| 344 | */ |
| 345 | push {r0, r1, r2} |
| 346 | |
| 347 | @ Check syndrome register |
| 348 | mrc p15, 4, r1, c5, c2, 0 @ HSR |
| 349 | lsr r0, r1, #HSR_EC_SHIFT |
| 350 | #ifdef CONFIG_VFPv3 |
| 351 | cmp r0, #HSR_EC_CP_0_13 |
| 352 | beq switch_to_guest_vfp |
| 353 | #endif |
| 354 | cmp r0, #HSR_EC_HVC |
| 355 | bne guest_trap @ Not HVC instr. |
| 356 | |
| 357 | /* |
| 358 | * Let's check if the HVC came from VMID 0 and allow simple |
| 359 | * switch to Hyp mode |
| 360 | */ |
| 361 | mrrc p15, 6, r0, r2, c2 |
| 362 | lsr r2, r2, #16 |
| 363 | and r2, r2, #0xff |
| 364 | cmp r2, #0 |
| 365 | bne guest_trap @ Guest called HVC |
| 366 | |
| 367 | host_switch_to_hyp: |
| 368 | pop {r0, r1, r2} |
| 369 | |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 370 | /* Check for __hyp_get_vectors */ |
| 371 | cmp r0, #-1 |
| 372 | mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR |
| 373 | beq 1f |
| 374 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 375 | push {lr} |
| 376 | mrs lr, SPSR |
| 377 | push {lr} |
| 378 | |
| 379 | mov lr, r0 |
| 380 | mov r0, r1 |
| 381 | mov r1, r2 |
| 382 | mov r2, r3 |
| 383 | |
| 384 | THUMB( orr lr, #1) |
| 385 | blx lr @ Call the HYP function |
| 386 | |
| 387 | pop {lr} |
| 388 | msr SPSR_csxf, lr |
| 389 | pop {lr} |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 390 | 1: eret |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 391 | |
| 392 | guest_trap: |
| 393 | load_vcpu @ Load VCPU pointer to r0 |
| 394 | str r1, [vcpu, #VCPU_HSR] |
| 395 | |
| 396 | @ Check if we need the fault information |
| 397 | lsr r1, r1, #HSR_EC_SHIFT |
| 398 | cmp r1, #HSR_EC_IABT |
| 399 | mrceq p15, 4, r2, c6, c0, 2 @ HIFAR |
| 400 | beq 2f |
| 401 | cmp r1, #HSR_EC_DABT |
| 402 | bne 1f |
| 403 | mrc p15, 4, r2, c6, c0, 0 @ HDFAR |
| 404 | |
| 405 | 2: str r2, [vcpu, #VCPU_HxFAR] |
| 406 | |
| 407 | /* |
| 408 | * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: |
| 409 | * |
| 410 | * Abort on the stage 2 translation for a memory access from a |
| 411 | * Non-secure PL1 or PL0 mode: |
| 412 | * |
| 413 | * For any Access flag fault or Translation fault, and also for any |
| 414 | * Permission fault on the stage 2 translation of a memory access |
| 415 | * made as part of a translation table walk for a stage 1 translation, |
| 416 | * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR |
| 417 | * is UNKNOWN. |
| 418 | */ |
| 419 | |
| 420 | /* Check for permission fault, and S1PTW */ |
| 421 | mrc p15, 4, r1, c5, c2, 0 @ HSR |
| 422 | and r0, r1, #HSR_FSC_TYPE |
| 423 | cmp r0, #FSC_PERM |
| 424 | tsteq r1, #(1 << 7) @ S1PTW |
| 425 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR |
| 426 | bne 3f |
| 427 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 428 | /* Preserve PAR */ |
| 429 | mrrc p15, 0, r0, r1, c7 @ PAR |
| 430 | push {r0, r1} |
| 431 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 432 | /* Resolve IPA using the xFAR */ |
| 433 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR |
| 434 | isb |
| 435 | mrrc p15, 0, r0, r1, c7 @ PAR |
| 436 | tst r0, #1 |
| 437 | bne 4f @ Failed translation |
| 438 | ubfx r2, r0, #12, #20 |
| 439 | lsl r2, r2, #4 |
| 440 | orr r2, r2, r1, lsl #24 |
| 441 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 442 | /* Restore PAR */ |
| 443 | pop {r0, r1} |
| 444 | mcrr p15, 0, r0, r1, c7 @ PAR |
| 445 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 446 | 3: load_vcpu @ Load VCPU pointer to r0 |
| 447 | str r2, [r0, #VCPU_HPFAR] |
| 448 | |
| 449 | 1: mov r1, #ARM_EXCEPTION_HVC |
| 450 | b __kvm_vcpu_return |
| 451 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 452 | 4: pop {r0, r1} @ Failed translation, return to guest |
| 453 | mcrr p15, 0, r0, r1, c7 @ PAR |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 454 | clrex |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 455 | pop {r0, r1, r2} |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 456 | eret |
| 457 | |
| 458 | /* |
| 459 | * If VFPv3 support is not available, then we will not switch the VFP |
| 460 | * registers; however cp10 and cp11 accesses will still trap and fallback |
| 461 | * to the regular coprocessor emulation code, which currently will |
| 462 | * inject an undefined exception to the guest. |
| 463 | */ |
| 464 | #ifdef CONFIG_VFPv3 |
| 465 | switch_to_guest_vfp: |
| 466 | load_vcpu @ Load VCPU pointer to r0 |
| 467 | push {r3-r7} |
| 468 | |
| 469 | @ NEON/VFP used. Turn on VFP access. |
| 470 | set_hcptr vmexit, (HCPTR_TCP(10) | HCPTR_TCP(11)) |
| 471 | |
| 472 | @ Switch VFP/NEON hardware state to the guest's |
| 473 | add r7, r0, #VCPU_VFP_HOST |
| 474 | ldr r7, [r7] |
| 475 | store_vfp_state r7 |
| 476 | add r7, r0, #VCPU_VFP_GUEST |
| 477 | restore_vfp_state r7 |
| 478 | |
| 479 | pop {r3-r7} |
| 480 | pop {r0-r2} |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 481 | clrex |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 482 | eret |
| 483 | #endif |
| 484 | |
| 485 | .align |
| 486 | hyp_irq: |
| 487 | push {r0, r1, r2} |
| 488 | mov r1, #ARM_EXCEPTION_IRQ |
| 489 | load_vcpu @ Load VCPU pointer to r0 |
| 490 | b __kvm_vcpu_return |
| 491 | |
| 492 | .align |
| 493 | hyp_fiq: |
| 494 | b hyp_fiq |
| 495 | |
| 496 | .ltorg |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 497 | |
| 498 | __kvm_hyp_code_end: |
| 499 | .globl __kvm_hyp_code_end |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 500 | |
| 501 | .section ".rodata" |
| 502 | |
| 503 | und_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 504 | .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 505 | pabt_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 506 | .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 507 | dabt_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 508 | .ascii "unexpected data abort in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 509 | svc_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 510 | .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" |