blob: 218802f68b200b83347be77c7a38760b8a5edd2a [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
29
30#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
34
35 .text
36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT
38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100217 mrs x24, par_el1
Marc Zyngier55c74012012-12-10 16:40:18 +0000218
219 stp x4, x5, [x3]
220 stp x6, x7, [x3, #16]
221 stp x8, x9, [x3, #32]
222 stp x10, x11, [x3, #48]
223 stp x12, x13, [x3, #64]
224 stp x14, x15, [x3, #80]
225 stp x16, x17, [x3, #96]
226 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144]
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100229 str x24, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000230.endm
231
232.macro restore_sysregs
233 // x2: base address for cpu context
234 // x3: tmp register
235
236 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
237
238 ldp x4, x5, [x3]
239 ldp x6, x7, [x3, #16]
240 ldp x8, x9, [x3, #32]
241 ldp x10, x11, [x3, #48]
242 ldp x12, x13, [x3, #64]
243 ldp x14, x15, [x3, #80]
244 ldp x16, x17, [x3, #96]
245 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144]
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100248 ldr x24, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000249
250 msr vmpidr_el2, x4
251 msr csselr_el1, x5
252 msr sctlr_el1, x6
253 msr actlr_el1, x7
254 msr cpacr_el1, x8
255 msr ttbr0_el1, x9
256 msr ttbr1_el1, x10
257 msr tcr_el1, x11
258 msr esr_el1, x12
259 msr afsr0_el1, x13
260 msr afsr1_el1, x14
261 msr far_el1, x15
262 msr mair_el1, x16
263 msr vbar_el1, x17
264 msr contextidr_el1, x18
265 msr tpidr_el0, x19
266 msr tpidrro_el0, x20
267 msr tpidr_el1, x21
268 msr amair_el1, x22
269 msr cntkctl_el1, x23
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100270 msr par_el1, x24
Marc Zyngier55c74012012-12-10 16:40:18 +0000271.endm
272
Marc Zyngierb4afad02013-02-07 10:52:10 +0000273.macro skip_32bit_state tmp, target
274 // Skip 32bit state if not needed
275 mrs \tmp, hcr_el2
276 tbnz \tmp, #HCR_RW_SHIFT, \target
277.endm
278
279.macro skip_tee_state tmp, target
280 // Skip ThumbEE state if not needed
281 mrs \tmp, id_pfr0_el1
282 tbz \tmp, #12, \target
283.endm
284
285.macro save_guest_32bit_state
286 skip_32bit_state x3, 1f
287
288 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
289 mrs x4, spsr_abt
290 mrs x5, spsr_und
291 mrs x6, spsr_irq
292 mrs x7, spsr_fiq
293 stp x4, x5, [x3]
294 stp x6, x7, [x3, #16]
295
296 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
297 mrs x4, dacr32_el2
298 mrs x5, ifsr32_el2
299 mrs x6, fpexc32_el2
300 mrs x7, dbgvcr32_el2
301 stp x4, x5, [x3]
302 stp x6, x7, [x3, #16]
303
304 skip_tee_state x8, 1f
305
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
307 mrs x4, teecr32_el1
308 mrs x5, teehbr32_el1
309 stp x4, x5, [x3]
3101:
311.endm
312
313.macro restore_guest_32bit_state
314 skip_32bit_state x3, 1f
315
316 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
317 ldp x4, x5, [x3]
318 ldp x6, x7, [x3, #16]
319 msr spsr_abt, x4
320 msr spsr_und, x5
321 msr spsr_irq, x6
322 msr spsr_fiq, x7
323
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
325 ldp x4, x5, [x3]
326 ldp x6, x7, [x3, #16]
327 msr dacr32_el2, x4
328 msr ifsr32_el2, x5
329 msr fpexc32_el2, x6
330 msr dbgvcr32_el2, x7
331
332 skip_tee_state x8, 1f
333
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
335 ldp x4, x5, [x3]
336 msr teecr32_el1, x4
337 msr teehbr32_el1, x5
3381:
339.endm
340
Marc Zyngier55c74012012-12-10 16:40:18 +0000341.macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES]
343 ldr x1, [x0, #VCPU_HCR_EL2]
344 orr x2, x2, x1
345 msr hcr_el2, x2
346
347 ldr x2, =(CPTR_EL2_TTA)
348 msr cptr_el2, x2
349
350 ldr x2, =(1 << 15) // Trap CP15 Cr=15
351 msr hstr_el2, x2
352
353 mrs x2, mdcr_el2
354 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
356 msr mdcr_el2, x2
357.endm
358
359.macro deactivate_traps
360 mov x2, #HCR_RW
361 msr hcr_el2, x2
362 msr cptr_el2, xzr
363 msr hstr_el2, xzr
364
365 mrs x2, mdcr_el2
366 and x2, x2, #MDCR_EL2_HPMN_MASK
367 msr mdcr_el2, x2
368.endm
369
370.macro activate_vm
371 ldr x1, [x0, #VCPU_KVM]
372 kern_hyp_va x1
373 ldr x2, [x1, #KVM_VTTBR]
374 msr vttbr_el2, x2
375.endm
376
377.macro deactivate_vm
378 msr vttbr_el2, xzr
379.endm
380
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000381/*
382 * Save the VGIC CPU state into memory
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
385 */
386.macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */
388 ldr x2, [x0, #VCPU_KVM]
389 kern_hyp_va x2
390 ldr x2, [x2, #KVM_VGIC_VCTRL]
391 kern_hyp_va x2
392 cbz x2, 2f // disabled
393
394 /* Compute the address of struct vgic_cpu */
395 add x3, x0, #VCPU_VGIC_CPU
396
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
406
407 str w4, [x3, #VGIC_CPU_HCR]
408 str w5, [x3, #VGIC_CPU_VMCR]
409 str w6, [x3, #VGIC_CPU_MISR]
410 str w7, [x3, #VGIC_CPU_EISR]
411 str w8, [x3, #(VGIC_CPU_EISR + 4)]
412 str w9, [x3, #VGIC_CPU_ELRSR]
413 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
414 str w11, [x3, #VGIC_CPU_APR]
415
416 /* Clear GICH_HCR */
417 str wzr, [x2, #GICH_HCR]
418
419 /* Save list registers */
420 add x2, x2, #GICH_LR0
421 ldr w4, [x3, #VGIC_CPU_NR_LR]
422 add x3, x3, #VGIC_CPU_LR
4231: ldr w5, [x2], #4
424 str w5, [x3], #4
425 sub w4, w4, #1
426 cbnz w4, 1b
4272:
428.endm
429
430/*
431 * Restore the VGIC CPU state from memory
432 * x0: Register pointing to VCPU struct
433 */
434.macro restore_vgic_state
435 /* Get VGIC VCTRL base into x2 */
436 ldr x2, [x0, #VCPU_KVM]
437 kern_hyp_va x2
438 ldr x2, [x2, #KVM_VGIC_VCTRL]
439 kern_hyp_va x2
440 cbz x2, 2f // disabled
441
442 /* Compute the address of struct vgic_cpu */
443 add x3, x0, #VCPU_VGIC_CPU
444
445 /* We only restore a minimal set of registers */
446 ldr w4, [x3, #VGIC_CPU_HCR]
447 ldr w5, [x3, #VGIC_CPU_VMCR]
448 ldr w6, [x3, #VGIC_CPU_APR]
449
450 str w4, [x2, #GICH_HCR]
451 str w5, [x2, #GICH_VMCR]
452 str w6, [x2, #GICH_APR]
453
454 /* Restore list registers */
455 add x2, x2, #GICH_LR0
456 ldr w4, [x3, #VGIC_CPU_NR_LR]
457 add x3, x3, #VGIC_CPU_LR
4581: ldr w5, [x3], #4
459 str w5, [x2], #4
460 sub w4, w4, #1
461 cbnz w4, 1b
4622:
463.endm
464
Marc Zyngier003300d2012-12-07 17:52:03 +0000465.macro save_timer_state
466 // x0: vcpu pointer
467 ldr x2, [x0, #VCPU_KVM]
468 kern_hyp_va x2
469 ldr w3, [x2, #KVM_TIMER_ENABLED]
470 cbz w3, 1f
471
472 mrs x3, cntv_ctl_el0
473 and x3, x3, #3
474 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
475 bic x3, x3, #1 // Clear Enable
476 msr cntv_ctl_el0, x3
477
478 isb
479
480 mrs x3, cntv_cval_el0
481 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
482
4831:
484 // Allow physical timer/counter access for the host
485 mrs x2, cnthctl_el2
486 orr x2, x2, #3
487 msr cnthctl_el2, x2
488
489 // Clear cntvoff for the host
490 msr cntvoff_el2, xzr
491.endm
492
493.macro restore_timer_state
494 // x0: vcpu pointer
495 // Disallow physical timer access for the guest
496 // Physical counter access is allowed
497 mrs x2, cnthctl_el2
498 orr x2, x2, #1
499 bic x2, x2, #2
500 msr cnthctl_el2, x2
501
502 ldr x2, [x0, #VCPU_KVM]
503 kern_hyp_va x2
504 ldr w3, [x2, #KVM_TIMER_ENABLED]
505 cbz w3, 1f
506
507 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
508 msr cntvoff_el2, x3
509 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
510 msr cntv_cval_el0, x2
511 isb
512
513 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
514 and x2, x2, #3
515 msr cntv_ctl_el0, x2
5161:
517.endm
518
Marc Zyngier55c74012012-12-10 16:40:18 +0000519__save_sysregs:
520 save_sysregs
521 ret
522
523__restore_sysregs:
524 restore_sysregs
525 ret
526
527__save_fpsimd:
528 save_fpsimd
529 ret
530
531__restore_fpsimd:
532 restore_fpsimd
533 ret
534
535/*
536 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
537 *
538 * This is the world switch. The first half of the function
539 * deals with entering the guest, and anything from __kvm_vcpu_return
540 * to the end of the function deals with reentering the host.
541 * On the enter path, only x0 (vcpu pointer) must be preserved until
542 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
543 * code) must both be preserved until the epilogue.
544 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
545 */
546ENTRY(__kvm_vcpu_run)
547 kern_hyp_va x0
548 msr tpidr_el2, x0 // Save the vcpu register
549
550 // Host context
551 ldr x2, [x0, #VCPU_HOST_CONTEXT]
552 kern_hyp_va x2
553
554 save_host_regs
555 bl __save_fpsimd
556 bl __save_sysregs
557
558 activate_traps
559 activate_vm
560
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000561 restore_vgic_state
Marc Zyngier003300d2012-12-07 17:52:03 +0000562 restore_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000563
Marc Zyngier55c74012012-12-10 16:40:18 +0000564 // Guest context
565 add x2, x0, #VCPU_CONTEXT
566
567 bl __restore_sysregs
568 bl __restore_fpsimd
Marc Zyngierb4afad02013-02-07 10:52:10 +0000569 restore_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000570 restore_guest_regs
571
572 // That's it, no more messing around.
573 eret
574
575__kvm_vcpu_return:
576 // Assume x0 is the vcpu pointer, x1 the return code
577 // Guest's x0-x3 are on the stack
578
579 // Guest context
580 add x2, x0, #VCPU_CONTEXT
581
582 save_guest_regs
583 bl __save_fpsimd
584 bl __save_sysregs
Marc Zyngierb4afad02013-02-07 10:52:10 +0000585 save_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000586
Marc Zyngier003300d2012-12-07 17:52:03 +0000587 save_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000588 save_vgic_state
589
Marc Zyngier55c74012012-12-10 16:40:18 +0000590 deactivate_traps
591 deactivate_vm
592
593 // Host context
594 ldr x2, [x0, #VCPU_HOST_CONTEXT]
595 kern_hyp_va x2
596
597 bl __restore_sysregs
598 bl __restore_fpsimd
599 restore_host_regs
600
601 mov x0, x1
602 ret
603END(__kvm_vcpu_run)
604
605// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
606ENTRY(__kvm_tlb_flush_vmid_ipa)
607 kern_hyp_va x0
608 ldr x2, [x0, #KVM_VTTBR]
609 msr vttbr_el2, x2
610 isb
611
612 /*
613 * We could do so much better if we had the VA as well.
614 * Instead, we invalidate Stage-2 for this IPA, and the
615 * whole of Stage-1. Weep...
616 */
617 tlbi ipas2e1is, x1
618 dsb sy
619 tlbi vmalle1is
620 dsb sy
621 isb
622
623 msr vttbr_el2, xzr
624 ret
625ENDPROC(__kvm_tlb_flush_vmid_ipa)
626
627ENTRY(__kvm_flush_vm_context)
628 tlbi alle1is
629 ic ialluis
630 dsb sy
631 ret
632ENDPROC(__kvm_flush_vm_context)
633
634__kvm_hyp_panic:
635 // Guess the context by looking at VTTBR:
636 // If zero, then we're already a host.
637 // Otherwise restore a minimal host context before panicing.
638 mrs x0, vttbr_el2
639 cbz x0, 1f
640
641 mrs x0, tpidr_el2
642
643 deactivate_traps
644 deactivate_vm
645
646 ldr x2, [x0, #VCPU_HOST_CONTEXT]
647 kern_hyp_va x2
648
649 bl __restore_sysregs
650
6511: adr x0, __hyp_panic_str
652 adr x1, 2f
653 ldp x2, x3, [x1]
654 sub x0, x0, x2
655 add x0, x0, x3
656 mrs x1, spsr_el2
657 mrs x2, elr_el2
658 mrs x3, esr_el2
659 mrs x4, far_el2
660 mrs x5, hpfar_el2
661 mrs x6, par_el1
662 mrs x7, tpidr_el2
663
664 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
665 PSR_MODE_EL1h)
666 msr spsr_el2, lr
667 ldr lr, =panic
668 msr elr_el2, lr
669 eret
670
671 .align 3
6722: .quad HYP_PAGE_OFFSET
673 .quad PAGE_OFFSET
674ENDPROC(__kvm_hyp_panic)
675
676__hyp_panic_str:
677 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
678
679 .align 2
680
681ENTRY(kvm_call_hyp)
682 hvc #0
683 ret
684ENDPROC(kvm_call_hyp)
685
686.macro invalid_vector label, target
687 .align 2
688\label:
689 b \target
690ENDPROC(\label)
691.endm
692
693 /* None of these should ever happen */
694 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
695 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
696 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
697 invalid_vector el2t_error_invalid, __kvm_hyp_panic
698 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
699 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
700 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
701 invalid_vector el2h_error_invalid, __kvm_hyp_panic
702 invalid_vector el1_sync_invalid, __kvm_hyp_panic
703 invalid_vector el1_irq_invalid, __kvm_hyp_panic
704 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
705 invalid_vector el1_error_invalid, __kvm_hyp_panic
706
707el1_sync: // Guest trapped into EL2
708 push x0, x1
709 push x2, x3
710
711 mrs x1, esr_el2
712 lsr x2, x1, #ESR_EL2_EC_SHIFT
713
714 cmp x2, #ESR_EL2_EC_HVC64
715 b.ne el1_trap
716
717 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
718 cbnz x3, el1_trap // called HVC
719
720 /* Here, we're pretty sure the host called HVC. */
721 pop x2, x3
722 pop x0, x1
723
724 push lr, xzr
725
726 /*
727 * Compute the function address in EL2, and shuffle the parameters.
728 */
729 kern_hyp_va x0
730 mov lr, x0
731 mov x0, x1
732 mov x1, x2
733 mov x2, x3
734 blr lr
735
736 pop lr, xzr
737 eret
738
739el1_trap:
740 /*
741 * x1: ESR
742 * x2: ESR_EC
743 */
744 cmp x2, #ESR_EL2_EC_DABT
745 mov x0, #ESR_EL2_EC_IABT
746 ccmp x2, x0, #4, ne
747 b.ne 1f // Not an abort we care about
748
749 /* This is an abort. Check for permission fault */
750 and x2, x1, #ESR_EL2_FSC_TYPE
751 cmp x2, #FSC_PERM
752 b.ne 1f // Not a permission fault
753
754 /*
755 * Check for Stage-1 page table walk, which is guaranteed
756 * to give a valid HPFAR_EL2.
757 */
758 tbnz x1, #7, 1f // S1PTW is set
759
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100760 /* Preserve PAR_EL1 */
761 mrs x3, par_el1
762 push x3, xzr
763
Marc Zyngier55c74012012-12-10 16:40:18 +0000764 /*
765 * Permission fault, HPFAR_EL2 is invalid.
766 * Resolve the IPA the hard way using the guest VA.
767 * Stage-1 translation already validated the memory access rights.
768 * As such, we can use the EL1 translation regime, and don't have
769 * to distinguish between EL0 and EL1 access.
770 */
771 mrs x2, far_el2
772 at s1e1r, x2
773 isb
774
775 /* Read result */
776 mrs x3, par_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100777 pop x0, xzr // Restore PAR_EL1 from the stack
778 msr par_el1, x0
Marc Zyngier55c74012012-12-10 16:40:18 +0000779 tbnz x3, #0, 3f // Bail out if we failed the translation
780 ubfx x3, x3, #12, #36 // Extract IPA
781 lsl x3, x3, #4 // and present it like HPFAR
782 b 2f
783
7841: mrs x3, hpfar_el2
785 mrs x2, far_el2
786
7872: mrs x0, tpidr_el2
788 str x1, [x0, #VCPU_ESR_EL2]
789 str x2, [x0, #VCPU_FAR_EL2]
790 str x3, [x0, #VCPU_HPFAR_EL2]
791
792 mov x1, #ARM_EXCEPTION_TRAP
793 b __kvm_vcpu_return
794
795 /*
796 * Translation failed. Just return to the guest and
797 * let it fault again. Another CPU is probably playing
798 * behind our back.
799 */
8003: pop x2, x3
801 pop x0, x1
802
803 eret
804
805el1_irq:
806 push x0, x1
807 push x2, x3
808 mrs x0, tpidr_el2
809 mov x1, #ARM_EXCEPTION_IRQ
810 b __kvm_vcpu_return
811
812 .ltorg
813
814 .align 11
815
816ENTRY(__kvm_hyp_vector)
817 ventry el2t_sync_invalid // Synchronous EL2t
818 ventry el2t_irq_invalid // IRQ EL2t
819 ventry el2t_fiq_invalid // FIQ EL2t
820 ventry el2t_error_invalid // Error EL2t
821
822 ventry el2h_sync_invalid // Synchronous EL2h
823 ventry el2h_irq_invalid // IRQ EL2h
824 ventry el2h_fiq_invalid // FIQ EL2h
825 ventry el2h_error_invalid // Error EL2h
826
827 ventry el1_sync // Synchronous 64-bit EL1
828 ventry el1_irq // IRQ 64-bit EL1
829 ventry el1_fiq_invalid // FIQ 64-bit EL1
830 ventry el1_error_invalid // Error 64-bit EL1
831
832 ventry el1_sync // Synchronous 32-bit EL1
833 ventry el1_irq // IRQ 32-bit EL1
834 ventry el1_fiq_invalid // FIQ 32-bit EL1
835 ventry el1_error_invalid // Error 32-bit EL1
836ENDPROC(__kvm_hyp_vector)
837
838__kvm_hyp_code_end:
839 .globl __kvm_hyp_code_end
840
841 .popsection