blob: ff985e3d8b72db7861b1559cd42a59a59d957fad [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
29
30#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
34
35 .text
36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT
38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
217
218 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
228.endm
229
230.macro restore_sysregs
231 // x2: base address for cpu context
232 // x3: tmp register
233
234 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
235
236 ldp x4, x5, [x3]
237 ldp x6, x7, [x3, #16]
238 ldp x8, x9, [x3, #32]
239 ldp x10, x11, [x3, #48]
240 ldp x12, x13, [x3, #64]
241 ldp x14, x15, [x3, #80]
242 ldp x16, x17, [x3, #96]
243 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144]
246
247 msr vmpidr_el2, x4
248 msr csselr_el1, x5
249 msr sctlr_el1, x6
250 msr actlr_el1, x7
251 msr cpacr_el1, x8
252 msr ttbr0_el1, x9
253 msr ttbr1_el1, x10
254 msr tcr_el1, x11
255 msr esr_el1, x12
256 msr afsr0_el1, x13
257 msr afsr1_el1, x14
258 msr far_el1, x15
259 msr mair_el1, x16
260 msr vbar_el1, x17
261 msr contextidr_el1, x18
262 msr tpidr_el0, x19
263 msr tpidrro_el0, x20
264 msr tpidr_el1, x21
265 msr amair_el1, x22
266 msr cntkctl_el1, x23
267.endm
268
Marc Zyngierb4afad02013-02-07 10:52:10 +0000269.macro skip_32bit_state tmp, target
270 // Skip 32bit state if not needed
271 mrs \tmp, hcr_el2
272 tbnz \tmp, #HCR_RW_SHIFT, \target
273.endm
274
275.macro skip_tee_state tmp, target
276 // Skip ThumbEE state if not needed
277 mrs \tmp, id_pfr0_el1
278 tbz \tmp, #12, \target
279.endm
280
281.macro save_guest_32bit_state
282 skip_32bit_state x3, 1f
283
284 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
285 mrs x4, spsr_abt
286 mrs x5, spsr_und
287 mrs x6, spsr_irq
288 mrs x7, spsr_fiq
289 stp x4, x5, [x3]
290 stp x6, x7, [x3, #16]
291
292 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
293 mrs x4, dacr32_el2
294 mrs x5, ifsr32_el2
295 mrs x6, fpexc32_el2
296 mrs x7, dbgvcr32_el2
297 stp x4, x5, [x3]
298 stp x6, x7, [x3, #16]
299
300 skip_tee_state x8, 1f
301
302 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
303 mrs x4, teecr32_el1
304 mrs x5, teehbr32_el1
305 stp x4, x5, [x3]
3061:
307.endm
308
309.macro restore_guest_32bit_state
310 skip_32bit_state x3, 1f
311
312 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
313 ldp x4, x5, [x3]
314 ldp x6, x7, [x3, #16]
315 msr spsr_abt, x4
316 msr spsr_und, x5
317 msr spsr_irq, x6
318 msr spsr_fiq, x7
319
320 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
321 ldp x4, x5, [x3]
322 ldp x6, x7, [x3, #16]
323 msr dacr32_el2, x4
324 msr ifsr32_el2, x5
325 msr fpexc32_el2, x6
326 msr dbgvcr32_el2, x7
327
328 skip_tee_state x8, 1f
329
330 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
331 ldp x4, x5, [x3]
332 msr teecr32_el1, x4
333 msr teehbr32_el1, x5
3341:
335.endm
336
Marc Zyngier55c74012012-12-10 16:40:18 +0000337.macro activate_traps
338 ldr x2, [x0, #VCPU_IRQ_LINES]
339 ldr x1, [x0, #VCPU_HCR_EL2]
340 orr x2, x2, x1
341 msr hcr_el2, x2
342
343 ldr x2, =(CPTR_EL2_TTA)
344 msr cptr_el2, x2
345
346 ldr x2, =(1 << 15) // Trap CP15 Cr=15
347 msr hstr_el2, x2
348
349 mrs x2, mdcr_el2
350 and x2, x2, #MDCR_EL2_HPMN_MASK
351 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
352 msr mdcr_el2, x2
353.endm
354
355.macro deactivate_traps
356 mov x2, #HCR_RW
357 msr hcr_el2, x2
358 msr cptr_el2, xzr
359 msr hstr_el2, xzr
360
361 mrs x2, mdcr_el2
362 and x2, x2, #MDCR_EL2_HPMN_MASK
363 msr mdcr_el2, x2
364.endm
365
366.macro activate_vm
367 ldr x1, [x0, #VCPU_KVM]
368 kern_hyp_va x1
369 ldr x2, [x1, #KVM_VTTBR]
370 msr vttbr_el2, x2
371.endm
372
373.macro deactivate_vm
374 msr vttbr_el2, xzr
375.endm
376
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000377/*
378 * Save the VGIC CPU state into memory
379 * x0: Register pointing to VCPU struct
380 * Do not corrupt x1!!!
381 */
382.macro save_vgic_state
383 /* Get VGIC VCTRL base into x2 */
384 ldr x2, [x0, #VCPU_KVM]
385 kern_hyp_va x2
386 ldr x2, [x2, #KVM_VGIC_VCTRL]
387 kern_hyp_va x2
388 cbz x2, 2f // disabled
389
390 /* Compute the address of struct vgic_cpu */
391 add x3, x0, #VCPU_VGIC_CPU
392
393 /* Save all interesting registers */
394 ldr w4, [x2, #GICH_HCR]
395 ldr w5, [x2, #GICH_VMCR]
396 ldr w6, [x2, #GICH_MISR]
397 ldr w7, [x2, #GICH_EISR0]
398 ldr w8, [x2, #GICH_EISR1]
399 ldr w9, [x2, #GICH_ELRSR0]
400 ldr w10, [x2, #GICH_ELRSR1]
401 ldr w11, [x2, #GICH_APR]
402
403 str w4, [x3, #VGIC_CPU_HCR]
404 str w5, [x3, #VGIC_CPU_VMCR]
405 str w6, [x3, #VGIC_CPU_MISR]
406 str w7, [x3, #VGIC_CPU_EISR]
407 str w8, [x3, #(VGIC_CPU_EISR + 4)]
408 str w9, [x3, #VGIC_CPU_ELRSR]
409 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
410 str w11, [x3, #VGIC_CPU_APR]
411
412 /* Clear GICH_HCR */
413 str wzr, [x2, #GICH_HCR]
414
415 /* Save list registers */
416 add x2, x2, #GICH_LR0
417 ldr w4, [x3, #VGIC_CPU_NR_LR]
418 add x3, x3, #VGIC_CPU_LR
4191: ldr w5, [x2], #4
420 str w5, [x3], #4
421 sub w4, w4, #1
422 cbnz w4, 1b
4232:
424.endm
425
426/*
427 * Restore the VGIC CPU state from memory
428 * x0: Register pointing to VCPU struct
429 */
430.macro restore_vgic_state
431 /* Get VGIC VCTRL base into x2 */
432 ldr x2, [x0, #VCPU_KVM]
433 kern_hyp_va x2
434 ldr x2, [x2, #KVM_VGIC_VCTRL]
435 kern_hyp_va x2
436 cbz x2, 2f // disabled
437
438 /* Compute the address of struct vgic_cpu */
439 add x3, x0, #VCPU_VGIC_CPU
440
441 /* We only restore a minimal set of registers */
442 ldr w4, [x3, #VGIC_CPU_HCR]
443 ldr w5, [x3, #VGIC_CPU_VMCR]
444 ldr w6, [x3, #VGIC_CPU_APR]
445
446 str w4, [x2, #GICH_HCR]
447 str w5, [x2, #GICH_VMCR]
448 str w6, [x2, #GICH_APR]
449
450 /* Restore list registers */
451 add x2, x2, #GICH_LR0
452 ldr w4, [x3, #VGIC_CPU_NR_LR]
453 add x3, x3, #VGIC_CPU_LR
4541: ldr w5, [x3], #4
455 str w5, [x2], #4
456 sub w4, w4, #1
457 cbnz w4, 1b
4582:
459.endm
460
Marc Zyngier003300d2012-12-07 17:52:03 +0000461.macro save_timer_state
462 // x0: vcpu pointer
463 ldr x2, [x0, #VCPU_KVM]
464 kern_hyp_va x2
465 ldr w3, [x2, #KVM_TIMER_ENABLED]
466 cbz w3, 1f
467
468 mrs x3, cntv_ctl_el0
469 and x3, x3, #3
470 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
471 bic x3, x3, #1 // Clear Enable
472 msr cntv_ctl_el0, x3
473
474 isb
475
476 mrs x3, cntv_cval_el0
477 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
478
4791:
480 // Allow physical timer/counter access for the host
481 mrs x2, cnthctl_el2
482 orr x2, x2, #3
483 msr cnthctl_el2, x2
484
485 // Clear cntvoff for the host
486 msr cntvoff_el2, xzr
487.endm
488
489.macro restore_timer_state
490 // x0: vcpu pointer
491 // Disallow physical timer access for the guest
492 // Physical counter access is allowed
493 mrs x2, cnthctl_el2
494 orr x2, x2, #1
495 bic x2, x2, #2
496 msr cnthctl_el2, x2
497
498 ldr x2, [x0, #VCPU_KVM]
499 kern_hyp_va x2
500 ldr w3, [x2, #KVM_TIMER_ENABLED]
501 cbz w3, 1f
502
503 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
504 msr cntvoff_el2, x3
505 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
506 msr cntv_cval_el0, x2
507 isb
508
509 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
510 and x2, x2, #3
511 msr cntv_ctl_el0, x2
5121:
513.endm
514
Marc Zyngier55c74012012-12-10 16:40:18 +0000515__save_sysregs:
516 save_sysregs
517 ret
518
519__restore_sysregs:
520 restore_sysregs
521 ret
522
523__save_fpsimd:
524 save_fpsimd
525 ret
526
527__restore_fpsimd:
528 restore_fpsimd
529 ret
530
531/*
532 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
533 *
534 * This is the world switch. The first half of the function
535 * deals with entering the guest, and anything from __kvm_vcpu_return
536 * to the end of the function deals with reentering the host.
537 * On the enter path, only x0 (vcpu pointer) must be preserved until
538 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
539 * code) must both be preserved until the epilogue.
540 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
541 */
542ENTRY(__kvm_vcpu_run)
543 kern_hyp_va x0
544 msr tpidr_el2, x0 // Save the vcpu register
545
546 // Host context
547 ldr x2, [x0, #VCPU_HOST_CONTEXT]
548 kern_hyp_va x2
549
550 save_host_regs
551 bl __save_fpsimd
552 bl __save_sysregs
553
554 activate_traps
555 activate_vm
556
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000557 restore_vgic_state
Marc Zyngier003300d2012-12-07 17:52:03 +0000558 restore_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000559
Marc Zyngier55c74012012-12-10 16:40:18 +0000560 // Guest context
561 add x2, x0, #VCPU_CONTEXT
562
563 bl __restore_sysregs
564 bl __restore_fpsimd
Marc Zyngierb4afad02013-02-07 10:52:10 +0000565 restore_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000566 restore_guest_regs
567
568 // That's it, no more messing around.
569 eret
570
571__kvm_vcpu_return:
572 // Assume x0 is the vcpu pointer, x1 the return code
573 // Guest's x0-x3 are on the stack
574
575 // Guest context
576 add x2, x0, #VCPU_CONTEXT
577
578 save_guest_regs
579 bl __save_fpsimd
580 bl __save_sysregs
Marc Zyngierb4afad02013-02-07 10:52:10 +0000581 save_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000582
Marc Zyngier003300d2012-12-07 17:52:03 +0000583 save_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000584 save_vgic_state
585
Marc Zyngier55c74012012-12-10 16:40:18 +0000586 deactivate_traps
587 deactivate_vm
588
589 // Host context
590 ldr x2, [x0, #VCPU_HOST_CONTEXT]
591 kern_hyp_va x2
592
593 bl __restore_sysregs
594 bl __restore_fpsimd
595 restore_host_regs
596
597 mov x0, x1
598 ret
599END(__kvm_vcpu_run)
600
601// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
602ENTRY(__kvm_tlb_flush_vmid_ipa)
603 kern_hyp_va x0
604 ldr x2, [x0, #KVM_VTTBR]
605 msr vttbr_el2, x2
606 isb
607
608 /*
609 * We could do so much better if we had the VA as well.
610 * Instead, we invalidate Stage-2 for this IPA, and the
611 * whole of Stage-1. Weep...
612 */
613 tlbi ipas2e1is, x1
614 dsb sy
615 tlbi vmalle1is
616 dsb sy
617 isb
618
619 msr vttbr_el2, xzr
620 ret
621ENDPROC(__kvm_tlb_flush_vmid_ipa)
622
623ENTRY(__kvm_flush_vm_context)
624 tlbi alle1is
625 ic ialluis
626 dsb sy
627 ret
628ENDPROC(__kvm_flush_vm_context)
629
630__kvm_hyp_panic:
631 // Guess the context by looking at VTTBR:
632 // If zero, then we're already a host.
633 // Otherwise restore a minimal host context before panicing.
634 mrs x0, vttbr_el2
635 cbz x0, 1f
636
637 mrs x0, tpidr_el2
638
639 deactivate_traps
640 deactivate_vm
641
642 ldr x2, [x0, #VCPU_HOST_CONTEXT]
643 kern_hyp_va x2
644
645 bl __restore_sysregs
646
6471: adr x0, __hyp_panic_str
648 adr x1, 2f
649 ldp x2, x3, [x1]
650 sub x0, x0, x2
651 add x0, x0, x3
652 mrs x1, spsr_el2
653 mrs x2, elr_el2
654 mrs x3, esr_el2
655 mrs x4, far_el2
656 mrs x5, hpfar_el2
657 mrs x6, par_el1
658 mrs x7, tpidr_el2
659
660 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
661 PSR_MODE_EL1h)
662 msr spsr_el2, lr
663 ldr lr, =panic
664 msr elr_el2, lr
665 eret
666
667 .align 3
6682: .quad HYP_PAGE_OFFSET
669 .quad PAGE_OFFSET
670ENDPROC(__kvm_hyp_panic)
671
672__hyp_panic_str:
673 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
674
675 .align 2
676
677ENTRY(kvm_call_hyp)
678 hvc #0
679 ret
680ENDPROC(kvm_call_hyp)
681
682.macro invalid_vector label, target
683 .align 2
684\label:
685 b \target
686ENDPROC(\label)
687.endm
688
689 /* None of these should ever happen */
690 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
691 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
692 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
693 invalid_vector el2t_error_invalid, __kvm_hyp_panic
694 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
695 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
696 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
697 invalid_vector el2h_error_invalid, __kvm_hyp_panic
698 invalid_vector el1_sync_invalid, __kvm_hyp_panic
699 invalid_vector el1_irq_invalid, __kvm_hyp_panic
700 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
701 invalid_vector el1_error_invalid, __kvm_hyp_panic
702
703el1_sync: // Guest trapped into EL2
704 push x0, x1
705 push x2, x3
706
707 mrs x1, esr_el2
708 lsr x2, x1, #ESR_EL2_EC_SHIFT
709
710 cmp x2, #ESR_EL2_EC_HVC64
711 b.ne el1_trap
712
713 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
714 cbnz x3, el1_trap // called HVC
715
716 /* Here, we're pretty sure the host called HVC. */
717 pop x2, x3
718 pop x0, x1
719
720 push lr, xzr
721
722 /*
723 * Compute the function address in EL2, and shuffle the parameters.
724 */
725 kern_hyp_va x0
726 mov lr, x0
727 mov x0, x1
728 mov x1, x2
729 mov x2, x3
730 blr lr
731
732 pop lr, xzr
733 eret
734
735el1_trap:
736 /*
737 * x1: ESR
738 * x2: ESR_EC
739 */
740 cmp x2, #ESR_EL2_EC_DABT
741 mov x0, #ESR_EL2_EC_IABT
742 ccmp x2, x0, #4, ne
743 b.ne 1f // Not an abort we care about
744
745 /* This is an abort. Check for permission fault */
746 and x2, x1, #ESR_EL2_FSC_TYPE
747 cmp x2, #FSC_PERM
748 b.ne 1f // Not a permission fault
749
750 /*
751 * Check for Stage-1 page table walk, which is guaranteed
752 * to give a valid HPFAR_EL2.
753 */
754 tbnz x1, #7, 1f // S1PTW is set
755
756 /*
757 * Permission fault, HPFAR_EL2 is invalid.
758 * Resolve the IPA the hard way using the guest VA.
759 * Stage-1 translation already validated the memory access rights.
760 * As such, we can use the EL1 translation regime, and don't have
761 * to distinguish between EL0 and EL1 access.
762 */
763 mrs x2, far_el2
764 at s1e1r, x2
765 isb
766
767 /* Read result */
768 mrs x3, par_el1
769 tbnz x3, #0, 3f // Bail out if we failed the translation
770 ubfx x3, x3, #12, #36 // Extract IPA
771 lsl x3, x3, #4 // and present it like HPFAR
772 b 2f
773
7741: mrs x3, hpfar_el2
775 mrs x2, far_el2
776
7772: mrs x0, tpidr_el2
778 str x1, [x0, #VCPU_ESR_EL2]
779 str x2, [x0, #VCPU_FAR_EL2]
780 str x3, [x0, #VCPU_HPFAR_EL2]
781
782 mov x1, #ARM_EXCEPTION_TRAP
783 b __kvm_vcpu_return
784
785 /*
786 * Translation failed. Just return to the guest and
787 * let it fault again. Another CPU is probably playing
788 * behind our back.
789 */
7903: pop x2, x3
791 pop x0, x1
792
793 eret
794
795el1_irq:
796 push x0, x1
797 push x2, x3
798 mrs x0, tpidr_el2
799 mov x1, #ARM_EXCEPTION_IRQ
800 b __kvm_vcpu_return
801
802 .ltorg
803
804 .align 11
805
806ENTRY(__kvm_hyp_vector)
807 ventry el2t_sync_invalid // Synchronous EL2t
808 ventry el2t_irq_invalid // IRQ EL2t
809 ventry el2t_fiq_invalid // FIQ EL2t
810 ventry el2t_error_invalid // Error EL2t
811
812 ventry el2h_sync_invalid // Synchronous EL2h
813 ventry el2h_irq_invalid // IRQ EL2h
814 ventry el2h_fiq_invalid // FIQ EL2h
815 ventry el2h_error_invalid // Error EL2h
816
817 ventry el1_sync // Synchronous 64-bit EL1
818 ventry el1_irq // IRQ 64-bit EL1
819 ventry el1_fiq_invalid // FIQ 64-bit EL1
820 ventry el1_error_invalid // Error 64-bit EL1
821
822 ventry el1_sync // Synchronous 32-bit EL1
823 ventry el1_irq // IRQ 32-bit EL1
824 ventry el1_fiq_invalid // FIQ 32-bit EL1
825 ventry el1_error_invalid // Error 32-bit EL1
826ENDPROC(__kvm_hyp_vector)
827
828__kvm_hyp_code_end:
829 .globl __kvm_hyp_code_end
830
831 .popsection