Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 1 | /* |
| 2 | * Copyright (C) 2012 - Virtual Open Systems and Columbia University |
| 3 | * Author: Christoffer Dall <c.dall@virtualopensystems.com> |
| 4 | * |
| 5 | * This program is free software; you can redistribute it and/or modify |
| 6 | * it under the terms of the GNU General Public License, version 2, as |
| 7 | * published by the Free Software Foundation. |
| 8 | * |
| 9 | * This program is distributed in the hope that it will be useful, |
| 10 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 11 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 12 | * GNU General Public License for more details. |
| 13 | * |
| 14 | * You should have received a copy of the GNU General Public License |
| 15 | * along with this program; if not, write to the Free Software |
| 16 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 17 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 18 | |
| 19 | #include <linux/linkage.h> |
| 20 | #include <linux/const.h> |
| 21 | #include <asm/unified.h> |
| 22 | #include <asm/page.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 23 | #include <asm/ptrace.h> |
Christoffer Dall | 749cf76c | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 24 | #include <asm/asm-offsets.h> |
| 25 | #include <asm/kvm_asm.h> |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 26 | #include <asm/kvm_arm.h> |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 27 | #include <asm/vfpmacros.h> |
| 28 | #include "interrupts_head.S" |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 29 | |
| 30 | .text |
| 31 | |
| 32 | __kvm_hyp_code_start: |
| 33 | .globl __kvm_hyp_code_start |
| 34 | |
| 35 | /******************************************************************** |
| 36 | * Flush per-VMID TLBs |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 37 | * |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 38 | * void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa); |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 39 | * |
| 40 | * We rely on the hardware to broadcast the TLB invalidation to all CPUs |
| 41 | * inside the inner-shareable domain (which is the case for all v7 |
| 42 | * implementations). If we come across a non-IS SMP implementation, we'll |
| 43 | * have to use an IPI based mechanism. Until then, we stick to the simple |
| 44 | * hardware assisted version. |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 45 | * |
| 46 | * As v7 does not support flushing per IPA, just nuke the whole TLB |
| 47 | * instead, ignoring the ipa value. |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 48 | */ |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 49 | ENTRY(__kvm_tlb_flush_vmid_ipa) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 50 | push {r2, r3} |
| 51 | |
Marc Zyngier | 479c5ae | 2013-06-21 13:08:47 +0100 | [diff] [blame] | 52 | dsb ishst |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 53 | add r0, r0, #KVM_VTTBR |
| 54 | ldrd r2, r3, [r0] |
Victor Kamensky | 19b0e60 | 2014-06-12 09:30:02 -0700 | [diff] [blame] | 55 | mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 56 | isb |
| 57 | mcr p15, 0, r0, c8, c3, 0 @ TLBIALLIS (rt ignored) |
Will Deacon | e3ab547 | 2013-05-13 12:08:06 +0100 | [diff] [blame] | 58 | dsb ish |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 59 | isb |
| 60 | mov r2, #0 |
| 61 | mov r3, #0 |
| 62 | mcrr p15, 6, r2, r3, c2 @ Back to VMID #0 |
| 63 | isb @ Not necessary if followed by eret |
| 64 | |
| 65 | pop {r2, r3} |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 66 | bx lr |
Marc Zyngier | 4876276 | 2013-01-28 15:27:00 +0000 | [diff] [blame] | 67 | ENDPROC(__kvm_tlb_flush_vmid_ipa) |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 68 | |
Mario Smarduch | 72fc36b | 2015-01-15 15:58:55 -0800 | [diff] [blame] | 69 | /** |
| 70 | * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs |
| 71 | * |
| 72 | * Reuses __kvm_tlb_flush_vmid_ipa() for ARMv7, without passing address |
| 73 | * parameter |
| 74 | */ |
| 75 | |
| 76 | ENTRY(__kvm_tlb_flush_vmid) |
| 77 | b __kvm_tlb_flush_vmid_ipa |
| 78 | ENDPROC(__kvm_tlb_flush_vmid) |
| 79 | |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 80 | /******************************************************************** |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 81 | * Flush TLBs and instruction caches of all CPUs inside the inner-shareable |
| 82 | * domain, for all VMIDs |
| 83 | * |
| 84 | * void __kvm_flush_vm_context(void); |
Christoffer Dall | d5d8184 | 2013-01-20 18:28:07 -0500 | [diff] [blame] | 85 | */ |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 86 | ENTRY(__kvm_flush_vm_context) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 87 | mov r0, #0 @ rn parameter for c15 flushes is SBZ |
| 88 | |
| 89 | /* Invalidate NS Non-Hyp TLB Inner Shareable (TLBIALLNSNHIS) */ |
| 90 | mcr p15, 4, r0, c8, c3, 4 |
| 91 | /* Invalidate instruction caches Inner Shareable (ICIALLUIS) */ |
| 92 | mcr p15, 0, r0, c7, c1, 0 |
Will Deacon | e3ab547 | 2013-05-13 12:08:06 +0100 | [diff] [blame] | 93 | dsb ish |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 94 | isb @ Not necessary if followed by eret |
| 95 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 96 | bx lr |
| 97 | ENDPROC(__kvm_flush_vm_context) |
| 98 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 99 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 100 | /******************************************************************** |
| 101 | * Hypervisor world-switch code |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 102 | * |
| 103 | * |
| 104 | * int __kvm_vcpu_run(struct kvm_vcpu *vcpu) |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 105 | */ |
| 106 | ENTRY(__kvm_vcpu_run) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 107 | @ Save the vcpu pointer |
| 108 | mcr p15, 4, vcpu, c13, c0, 2 @ HTPIDR |
| 109 | |
| 110 | save_host_regs |
| 111 | |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 112 | restore_vgic_state |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 113 | restore_timer_state |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 114 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 115 | @ Store hardware CP15 state and load guest state |
| 116 | read_cp15_state store_to_vcpu = 0 |
| 117 | write_cp15_state read_from_vcpu = 1 |
| 118 | |
| 119 | @ If the host kernel has not been configured with VFPv3 support, |
| 120 | @ then it is safer if we deny guests from using it as well. |
| 121 | #ifdef CONFIG_VFPv3 |
| 122 | @ Set FPEXC_EN so the guest doesn't trap floating point instructions |
| 123 | VFPFMRX r2, FPEXC @ VMRS |
| 124 | push {r2} |
| 125 | orr r2, r2, #FPEXC_EN |
| 126 | VFPFMXR FPEXC, r2 @ VMSR |
| 127 | #endif |
| 128 | |
| 129 | @ Configure Hyp-role |
| 130 | configure_hyp_role vmentry |
| 131 | |
| 132 | @ Trap coprocessor CRx accesses |
| 133 | set_hstr vmentry |
| 134 | set_hcptr vmentry, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)) |
| 135 | set_hdcr vmentry |
| 136 | |
| 137 | @ Write configured ID register into MIDR alias |
| 138 | ldr r1, [vcpu, #VCPU_MIDR] |
| 139 | mcr p15, 4, r1, c0, c0, 0 |
| 140 | |
| 141 | @ Write guest view of MPIDR into VMPIDR |
| 142 | ldr r1, [vcpu, #CP15_OFFSET(c0_MPIDR)] |
| 143 | mcr p15, 4, r1, c0, c0, 5 |
| 144 | |
| 145 | @ Set up guest memory translation |
| 146 | ldr r1, [vcpu, #VCPU_KVM] |
| 147 | add r1, r1, #KVM_VTTBR |
| 148 | ldrd r2, r3, [r1] |
Victor Kamensky | 19b0e60 | 2014-06-12 09:30:02 -0700 | [diff] [blame] | 149 | mcrr p15, 6, rr_lo_hi(r2, r3), c2 @ Write VTTBR |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 150 | |
| 151 | @ We're all done, just restore the GPRs and go to the guest |
| 152 | restore_guest_regs |
| 153 | clrex @ Clear exclusive monitor |
| 154 | eret |
| 155 | |
| 156 | __kvm_vcpu_return: |
| 157 | /* |
| 158 | * return convention: |
| 159 | * guest r0, r1, r2 saved on the stack |
| 160 | * r0: vcpu pointer |
| 161 | * r1: exception code |
| 162 | */ |
| 163 | save_guest_regs |
| 164 | |
| 165 | @ Set VMID == 0 |
| 166 | mov r2, #0 |
| 167 | mov r3, #0 |
| 168 | mcrr p15, 6, r2, r3, c2 @ Write VTTBR |
| 169 | |
| 170 | @ Don't trap coprocessor accesses for host kernel |
| 171 | set_hstr vmexit |
| 172 | set_hdcr vmexit |
Marc Zyngier | 85e84ba | 2015-03-16 10:59:43 +0000 | [diff] [blame] | 173 | set_hcptr vmexit, (HCPTR_TTA | HCPTR_TCP(10) | HCPTR_TCP(11)), after_vfp_restore |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 174 | |
| 175 | #ifdef CONFIG_VFPv3 |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 176 | @ Switch VFP/NEON hardware state to the host's |
| 177 | add r7, vcpu, #VCPU_VFP_GUEST |
| 178 | store_vfp_state r7 |
| 179 | add r7, vcpu, #VCPU_VFP_HOST |
| 180 | ldr r7, [r7] |
| 181 | restore_vfp_state r7 |
| 182 | |
| 183 | after_vfp_restore: |
| 184 | @ Restore FPEXC_EN which we clobbered on entry |
| 185 | pop {r2} |
| 186 | VFPFMXR FPEXC, r2 |
Marc Zyngier | 85e84ba | 2015-03-16 10:59:43 +0000 | [diff] [blame] | 187 | #else |
| 188 | after_vfp_restore: |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 189 | #endif |
| 190 | |
| 191 | @ Reset Hyp-role |
| 192 | configure_hyp_role vmexit |
| 193 | |
| 194 | @ Let host read hardware MIDR |
| 195 | mrc p15, 0, r2, c0, c0, 0 |
| 196 | mcr p15, 4, r2, c0, c0, 0 |
| 197 | |
| 198 | @ Back to hardware MPIDR |
| 199 | mrc p15, 0, r2, c0, c0, 5 |
| 200 | mcr p15, 4, r2, c0, c0, 5 |
| 201 | |
| 202 | @ Store guest CP15 state and restore host state |
| 203 | read_cp15_state store_to_vcpu = 1 |
| 204 | write_cp15_state read_from_vcpu = 0 |
| 205 | |
Marc Zyngier | 53e7240 | 2013-01-23 13:21:58 -0500 | [diff] [blame] | 206 | save_timer_state |
Marc Zyngier | 1a89dd9 | 2013-01-21 19:36:12 -0500 | [diff] [blame] | 207 | save_vgic_state |
| 208 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 209 | restore_host_regs |
| 210 | clrex @ Clear exclusive monitor |
Victor Kamensky | 6d7311b | 2014-06-12 09:30:03 -0700 | [diff] [blame] | 211 | #ifndef CONFIG_CPU_ENDIAN_BE8 |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 212 | mov r0, r1 @ Return the return code |
| 213 | mov r1, #0 @ Clear upper bits in return value |
Victor Kamensky | 6d7311b | 2014-06-12 09:30:03 -0700 | [diff] [blame] | 214 | #else |
| 215 | @ r1 already has return code |
| 216 | mov r0, #0 @ Clear upper bits in return value |
| 217 | #endif /* CONFIG_CPU_ENDIAN_BE8 */ |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 218 | bx lr @ return to IOCTL |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 219 | |
| 220 | /******************************************************************** |
| 221 | * Call function in Hyp mode |
| 222 | * |
| 223 | * |
| 224 | * u64 kvm_call_hyp(void *hypfn, ...); |
| 225 | * |
| 226 | * This is not really a variadic function in the classic C-way and care must |
| 227 | * be taken when calling this to ensure parameters are passed in registers |
| 228 | * only, since the stack will change between the caller and the callee. |
| 229 | * |
| 230 | * Call the function with the first argument containing a pointer to the |
| 231 | * function you wish to call in Hyp mode, and subsequent arguments will be |
| 232 | * passed as r0, r1, and r2 (a maximum of 3 arguments in addition to the |
| 233 | * function pointer can be passed). The function being called must be mapped |
| 234 | * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are |
| 235 | * passed in r0 and r1. |
| 236 | * |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 237 | * A function pointer with a value of 0xffffffff has a special meaning, |
| 238 | * and is used to implement __hyp_get_vectors in the same way as in |
| 239 | * arch/arm/kernel/hyp_stub.S. |
| 240 | * |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 241 | * The calling convention follows the standard AAPCS: |
| 242 | * r0 - r3: caller save |
| 243 | * r12: caller save |
| 244 | * rest: callee save |
| 245 | */ |
| 246 | ENTRY(kvm_call_hyp) |
| 247 | hvc #0 |
| 248 | bx lr |
| 249 | |
| 250 | /******************************************************************** |
| 251 | * Hypervisor exception vector and handlers |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 252 | * |
| 253 | * |
| 254 | * The KVM/ARM Hypervisor ABI is defined as follows: |
| 255 | * |
| 256 | * Entry to Hyp mode from the host kernel will happen _only_ when an HVC |
| 257 | * instruction is issued since all traps are disabled when running the host |
| 258 | * kernel as per the Hyp-mode initialization at boot time. |
| 259 | * |
Jonghwan Choi | 0b5e3ba | 2013-02-19 15:19:32 +0900 | [diff] [blame] | 260 | * HVC instructions cause a trap to the vector page + offset 0x14 (see hyp_hvc |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 261 | * below) when the HVC instruction is called from SVC mode (i.e. a guest or the |
Jonghwan Choi | 0b5e3ba | 2013-02-19 15:19:32 +0900 | [diff] [blame] | 262 | * host kernel) and they cause a trap to the vector page + offset 0x8 when HVC |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 263 | * instructions are called from within Hyp-mode. |
| 264 | * |
| 265 | * Hyp-ABI: Calling HYP-mode functions from host (in SVC mode): |
| 266 | * Switching to Hyp mode is done through a simple HVC #0 instruction. The |
| 267 | * exception vector code will check that the HVC comes from VMID==0 and if |
| 268 | * so will push the necessary state (SPSR, lr_usr) on the Hyp stack. |
| 269 | * - r0 contains a pointer to a HYP function |
| 270 | * - r1, r2, and r3 contain arguments to the above function. |
| 271 | * - The HYP function will be called with its arguments in r0, r1 and r2. |
| 272 | * On HYP function return, we return directly to SVC. |
| 273 | * |
| 274 | * Note that the above is used to execute code in Hyp-mode from a host-kernel |
| 275 | * point of view, and is a different concept from performing a world-switch and |
| 276 | * executing guest code SVC mode (with a VMID != 0). |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 277 | */ |
| 278 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 279 | /* Handle undef, svc, pabt, or dabt by crashing with a user notice */ |
| 280 | .macro bad_exception exception_code, panic_str |
| 281 | push {r0-r2} |
| 282 | mrrc p15, 6, r0, r1, c2 @ Read VTTBR |
| 283 | lsr r1, r1, #16 |
| 284 | ands r1, r1, #0xff |
| 285 | beq 99f |
| 286 | |
| 287 | load_vcpu @ Load VCPU pointer |
| 288 | .if \exception_code == ARM_EXCEPTION_DATA_ABORT |
| 289 | mrc p15, 4, r2, c5, c2, 0 @ HSR |
| 290 | mrc p15, 4, r1, c6, c0, 0 @ HDFAR |
| 291 | str r2, [vcpu, #VCPU_HSR] |
| 292 | str r1, [vcpu, #VCPU_HxFAR] |
| 293 | .endif |
| 294 | .if \exception_code == ARM_EXCEPTION_PREF_ABORT |
| 295 | mrc p15, 4, r2, c5, c2, 0 @ HSR |
| 296 | mrc p15, 4, r1, c6, c0, 2 @ HIFAR |
| 297 | str r2, [vcpu, #VCPU_HSR] |
| 298 | str r1, [vcpu, #VCPU_HxFAR] |
| 299 | .endif |
| 300 | mov r1, #\exception_code |
| 301 | b __kvm_vcpu_return |
| 302 | |
| 303 | @ We were in the host already. Let's craft a panic-ing return to SVC. |
| 304 | 99: mrs r2, cpsr |
| 305 | bic r2, r2, #MODE_MASK |
| 306 | orr r2, r2, #SVC_MODE |
| 307 | THUMB( orr r2, r2, #PSR_T_BIT ) |
| 308 | msr spsr_cxsf, r2 |
| 309 | mrs r1, ELR_hyp |
Russell King | 5890298 | 2015-04-21 14:16:05 +0100 | [diff] [blame] | 310 | ldr r2, =panic |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 311 | msr ELR_hyp, r2 |
| 312 | ldr r0, =\panic_str |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 313 | clrex @ Clear exclusive monitor |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 314 | eret |
| 315 | .endm |
| 316 | |
| 317 | .text |
| 318 | |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 319 | .align 5 |
| 320 | __kvm_hyp_vector: |
| 321 | .globl __kvm_hyp_vector |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 322 | |
| 323 | @ Hyp-mode exception vector |
| 324 | W(b) hyp_reset |
| 325 | W(b) hyp_undef |
| 326 | W(b) hyp_svc |
| 327 | W(b) hyp_pabt |
| 328 | W(b) hyp_dabt |
| 329 | W(b) hyp_hvc |
| 330 | W(b) hyp_irq |
| 331 | W(b) hyp_fiq |
| 332 | |
| 333 | .align |
| 334 | hyp_reset: |
| 335 | b hyp_reset |
| 336 | |
| 337 | .align |
| 338 | hyp_undef: |
| 339 | bad_exception ARM_EXCEPTION_UNDEFINED, und_die_str |
| 340 | |
| 341 | .align |
| 342 | hyp_svc: |
| 343 | bad_exception ARM_EXCEPTION_HVC, svc_die_str |
| 344 | |
| 345 | .align |
| 346 | hyp_pabt: |
| 347 | bad_exception ARM_EXCEPTION_PREF_ABORT, pabt_die_str |
| 348 | |
| 349 | .align |
| 350 | hyp_dabt: |
| 351 | bad_exception ARM_EXCEPTION_DATA_ABORT, dabt_die_str |
| 352 | |
| 353 | .align |
| 354 | hyp_hvc: |
| 355 | /* |
| 356 | * Getting here is either becuase of a trap from a guest or from calling |
| 357 | * HVC from the host kernel, which means "switch to Hyp mode". |
| 358 | */ |
| 359 | push {r0, r1, r2} |
| 360 | |
| 361 | @ Check syndrome register |
| 362 | mrc p15, 4, r1, c5, c2, 0 @ HSR |
| 363 | lsr r0, r1, #HSR_EC_SHIFT |
| 364 | #ifdef CONFIG_VFPv3 |
| 365 | cmp r0, #HSR_EC_CP_0_13 |
| 366 | beq switch_to_guest_vfp |
| 367 | #endif |
| 368 | cmp r0, #HSR_EC_HVC |
| 369 | bne guest_trap @ Not HVC instr. |
| 370 | |
| 371 | /* |
| 372 | * Let's check if the HVC came from VMID 0 and allow simple |
| 373 | * switch to Hyp mode |
| 374 | */ |
| 375 | mrrc p15, 6, r0, r2, c2 |
| 376 | lsr r2, r2, #16 |
| 377 | and r2, r2, #0xff |
| 378 | cmp r2, #0 |
| 379 | bne guest_trap @ Guest called HVC |
| 380 | |
| 381 | host_switch_to_hyp: |
| 382 | pop {r0, r1, r2} |
| 383 | |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 384 | /* Check for __hyp_get_vectors */ |
| 385 | cmp r0, #-1 |
| 386 | mrceq p15, 4, r0, c12, c0, 0 @ get HVBAR |
| 387 | beq 1f |
| 388 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 389 | push {lr} |
| 390 | mrs lr, SPSR |
| 391 | push {lr} |
| 392 | |
| 393 | mov lr, r0 |
| 394 | mov r0, r1 |
| 395 | mov r1, r2 |
| 396 | mov r2, r3 |
| 397 | |
| 398 | THUMB( orr lr, #1) |
| 399 | blx lr @ Call the HYP function |
| 400 | |
| 401 | pop {lr} |
| 402 | msr SPSR_csxf, lr |
| 403 | pop {lr} |
Marc Zyngier | b20c9f2 | 2014-02-26 18:47:36 +0000 | [diff] [blame] | 404 | 1: eret |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 405 | |
| 406 | guest_trap: |
| 407 | load_vcpu @ Load VCPU pointer to r0 |
| 408 | str r1, [vcpu, #VCPU_HSR] |
| 409 | |
| 410 | @ Check if we need the fault information |
| 411 | lsr r1, r1, #HSR_EC_SHIFT |
| 412 | cmp r1, #HSR_EC_IABT |
| 413 | mrceq p15, 4, r2, c6, c0, 2 @ HIFAR |
| 414 | beq 2f |
| 415 | cmp r1, #HSR_EC_DABT |
| 416 | bne 1f |
| 417 | mrc p15, 4, r2, c6, c0, 0 @ HDFAR |
| 418 | |
| 419 | 2: str r2, [vcpu, #VCPU_HxFAR] |
| 420 | |
| 421 | /* |
| 422 | * B3.13.5 Reporting exceptions taken to the Non-secure PL2 mode: |
| 423 | * |
| 424 | * Abort on the stage 2 translation for a memory access from a |
| 425 | * Non-secure PL1 or PL0 mode: |
| 426 | * |
| 427 | * For any Access flag fault or Translation fault, and also for any |
| 428 | * Permission fault on the stage 2 translation of a memory access |
| 429 | * made as part of a translation table walk for a stage 1 translation, |
| 430 | * the HPFAR holds the IPA that caused the fault. Otherwise, the HPFAR |
| 431 | * is UNKNOWN. |
| 432 | */ |
| 433 | |
| 434 | /* Check for permission fault, and S1PTW */ |
| 435 | mrc p15, 4, r1, c5, c2, 0 @ HSR |
| 436 | and r0, r1, #HSR_FSC_TYPE |
| 437 | cmp r0, #FSC_PERM |
| 438 | tsteq r1, #(1 << 7) @ S1PTW |
| 439 | mrcne p15, 4, r2, c6, c0, 4 @ HPFAR |
| 440 | bne 3f |
| 441 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 442 | /* Preserve PAR */ |
| 443 | mrrc p15, 0, r0, r1, c7 @ PAR |
| 444 | push {r0, r1} |
| 445 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 446 | /* Resolve IPA using the xFAR */ |
| 447 | mcr p15, 0, r2, c7, c8, 0 @ ATS1CPR |
| 448 | isb |
| 449 | mrrc p15, 0, r0, r1, c7 @ PAR |
| 450 | tst r0, #1 |
| 451 | bne 4f @ Failed translation |
| 452 | ubfx r2, r0, #12, #20 |
| 453 | lsl r2, r2, #4 |
| 454 | orr r2, r2, r1, lsl #24 |
| 455 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 456 | /* Restore PAR */ |
| 457 | pop {r0, r1} |
| 458 | mcrr p15, 0, r0, r1, c7 @ PAR |
| 459 | |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 460 | 3: load_vcpu @ Load VCPU pointer to r0 |
| 461 | str r2, [r0, #VCPU_HPFAR] |
| 462 | |
| 463 | 1: mov r1, #ARM_EXCEPTION_HVC |
| 464 | b __kvm_vcpu_return |
| 465 | |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 466 | 4: pop {r0, r1} @ Failed translation, return to guest |
| 467 | mcrr p15, 0, r0, r1, c7 @ PAR |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 468 | clrex |
Marc Zyngier | 6a077e4 | 2013-06-21 13:08:46 +0100 | [diff] [blame] | 469 | pop {r0, r1, r2} |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 470 | eret |
| 471 | |
| 472 | /* |
| 473 | * If VFPv3 support is not available, then we will not switch the VFP |
| 474 | * registers; however cp10 and cp11 accesses will still trap and fallback |
| 475 | * to the regular coprocessor emulation code, which currently will |
| 476 | * inject an undefined exception to the guest. |
| 477 | */ |
| 478 | #ifdef CONFIG_VFPv3 |
| 479 | switch_to_guest_vfp: |
| 480 | load_vcpu @ Load VCPU pointer to r0 |
| 481 | push {r3-r7} |
| 482 | |
| 483 | @ NEON/VFP used. Turn on VFP access. |
Marc Zyngier | 85e84ba | 2015-03-16 10:59:43 +0000 | [diff] [blame] | 484 | set_hcptr vmtrap, (HCPTR_TCP(10) | HCPTR_TCP(11)) |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 485 | |
| 486 | @ Switch VFP/NEON hardware state to the guest's |
| 487 | add r7, r0, #VCPU_VFP_HOST |
| 488 | ldr r7, [r7] |
| 489 | store_vfp_state r7 |
| 490 | add r7, r0, #VCPU_VFP_GUEST |
| 491 | restore_vfp_state r7 |
| 492 | |
| 493 | pop {r3-r7} |
| 494 | pop {r0-r2} |
Marc Zyngier | 22cfbb6 | 2013-06-21 13:08:48 +0100 | [diff] [blame] | 495 | clrex |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 496 | eret |
| 497 | #endif |
| 498 | |
| 499 | .align |
| 500 | hyp_irq: |
| 501 | push {r0, r1, r2} |
| 502 | mov r1, #ARM_EXCEPTION_IRQ |
| 503 | load_vcpu @ Load VCPU pointer to r0 |
| 504 | b __kvm_vcpu_return |
| 505 | |
| 506 | .align |
| 507 | hyp_fiq: |
| 508 | b hyp_fiq |
| 509 | |
| 510 | .ltorg |
Christoffer Dall | 342cd0a | 2013-01-20 18:28:06 -0500 | [diff] [blame] | 511 | |
| 512 | __kvm_hyp_code_end: |
| 513 | .globl __kvm_hyp_code_end |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 514 | |
| 515 | .section ".rodata" |
| 516 | |
| 517 | und_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 518 | .ascii "unexpected undefined exception in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 519 | pabt_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 520 | .ascii "unexpected prefetch abort in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 521 | dabt_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 522 | .ascii "unexpected data abort in Hyp mode at: %#08x\n" |
Christoffer Dall | f7ed45b | 2013-01-20 18:47:42 -0500 | [diff] [blame] | 523 | svc_die_str: |
Christoffer Dall | 1fe40f6 | 2013-08-14 12:33:48 -0700 | [diff] [blame] | 524 | .ascii "unexpected HVC/SVC trap in Hyp mode at: %#08x\n" |