blob: 8dc27a367d77a30525277e8355adcd9979bdf3e0 [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
29
30#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
34
35 .text
36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT
38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
217
218 stp x4, x5, [x3]
219 stp x6, x7, [x3, #16]
220 stp x8, x9, [x3, #32]
221 stp x10, x11, [x3, #48]
222 stp x12, x13, [x3, #64]
223 stp x14, x15, [x3, #80]
224 stp x16, x17, [x3, #96]
225 stp x18, x19, [x3, #112]
226 stp x20, x21, [x3, #128]
227 stp x22, x23, [x3, #144]
228.endm
229
230.macro restore_sysregs
231 // x2: base address for cpu context
232 // x3: tmp register
233
234 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
235
236 ldp x4, x5, [x3]
237 ldp x6, x7, [x3, #16]
238 ldp x8, x9, [x3, #32]
239 ldp x10, x11, [x3, #48]
240 ldp x12, x13, [x3, #64]
241 ldp x14, x15, [x3, #80]
242 ldp x16, x17, [x3, #96]
243 ldp x18, x19, [x3, #112]
244 ldp x20, x21, [x3, #128]
245 ldp x22, x23, [x3, #144]
246
247 msr vmpidr_el2, x4
248 msr csselr_el1, x5
249 msr sctlr_el1, x6
250 msr actlr_el1, x7
251 msr cpacr_el1, x8
252 msr ttbr0_el1, x9
253 msr ttbr1_el1, x10
254 msr tcr_el1, x11
255 msr esr_el1, x12
256 msr afsr0_el1, x13
257 msr afsr1_el1, x14
258 msr far_el1, x15
259 msr mair_el1, x16
260 msr vbar_el1, x17
261 msr contextidr_el1, x18
262 msr tpidr_el0, x19
263 msr tpidrro_el0, x20
264 msr tpidr_el1, x21
265 msr amair_el1, x22
266 msr cntkctl_el1, x23
267.endm
268
269.macro activate_traps
270 ldr x2, [x0, #VCPU_IRQ_LINES]
271 ldr x1, [x0, #VCPU_HCR_EL2]
272 orr x2, x2, x1
273 msr hcr_el2, x2
274
275 ldr x2, =(CPTR_EL2_TTA)
276 msr cptr_el2, x2
277
278 ldr x2, =(1 << 15) // Trap CP15 Cr=15
279 msr hstr_el2, x2
280
281 mrs x2, mdcr_el2
282 and x2, x2, #MDCR_EL2_HPMN_MASK
283 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
284 msr mdcr_el2, x2
285.endm
286
287.macro deactivate_traps
288 mov x2, #HCR_RW
289 msr hcr_el2, x2
290 msr cptr_el2, xzr
291 msr hstr_el2, xzr
292
293 mrs x2, mdcr_el2
294 and x2, x2, #MDCR_EL2_HPMN_MASK
295 msr mdcr_el2, x2
296.endm
297
298.macro activate_vm
299 ldr x1, [x0, #VCPU_KVM]
300 kern_hyp_va x1
301 ldr x2, [x1, #KVM_VTTBR]
302 msr vttbr_el2, x2
303.endm
304
305.macro deactivate_vm
306 msr vttbr_el2, xzr
307.endm
308
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000309/*
310 * Save the VGIC CPU state into memory
311 * x0: Register pointing to VCPU struct
312 * Do not corrupt x1!!!
313 */
314.macro save_vgic_state
315 /* Get VGIC VCTRL base into x2 */
316 ldr x2, [x0, #VCPU_KVM]
317 kern_hyp_va x2
318 ldr x2, [x2, #KVM_VGIC_VCTRL]
319 kern_hyp_va x2
320 cbz x2, 2f // disabled
321
322 /* Compute the address of struct vgic_cpu */
323 add x3, x0, #VCPU_VGIC_CPU
324
325 /* Save all interesting registers */
326 ldr w4, [x2, #GICH_HCR]
327 ldr w5, [x2, #GICH_VMCR]
328 ldr w6, [x2, #GICH_MISR]
329 ldr w7, [x2, #GICH_EISR0]
330 ldr w8, [x2, #GICH_EISR1]
331 ldr w9, [x2, #GICH_ELRSR0]
332 ldr w10, [x2, #GICH_ELRSR1]
333 ldr w11, [x2, #GICH_APR]
334
335 str w4, [x3, #VGIC_CPU_HCR]
336 str w5, [x3, #VGIC_CPU_VMCR]
337 str w6, [x3, #VGIC_CPU_MISR]
338 str w7, [x3, #VGIC_CPU_EISR]
339 str w8, [x3, #(VGIC_CPU_EISR + 4)]
340 str w9, [x3, #VGIC_CPU_ELRSR]
341 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
342 str w11, [x3, #VGIC_CPU_APR]
343
344 /* Clear GICH_HCR */
345 str wzr, [x2, #GICH_HCR]
346
347 /* Save list registers */
348 add x2, x2, #GICH_LR0
349 ldr w4, [x3, #VGIC_CPU_NR_LR]
350 add x3, x3, #VGIC_CPU_LR
3511: ldr w5, [x2], #4
352 str w5, [x3], #4
353 sub w4, w4, #1
354 cbnz w4, 1b
3552:
356.endm
357
358/*
359 * Restore the VGIC CPU state from memory
360 * x0: Register pointing to VCPU struct
361 */
362.macro restore_vgic_state
363 /* Get VGIC VCTRL base into x2 */
364 ldr x2, [x0, #VCPU_KVM]
365 kern_hyp_va x2
366 ldr x2, [x2, #KVM_VGIC_VCTRL]
367 kern_hyp_va x2
368 cbz x2, 2f // disabled
369
370 /* Compute the address of struct vgic_cpu */
371 add x3, x0, #VCPU_VGIC_CPU
372
373 /* We only restore a minimal set of registers */
374 ldr w4, [x3, #VGIC_CPU_HCR]
375 ldr w5, [x3, #VGIC_CPU_VMCR]
376 ldr w6, [x3, #VGIC_CPU_APR]
377
378 str w4, [x2, #GICH_HCR]
379 str w5, [x2, #GICH_VMCR]
380 str w6, [x2, #GICH_APR]
381
382 /* Restore list registers */
383 add x2, x2, #GICH_LR0
384 ldr w4, [x3, #VGIC_CPU_NR_LR]
385 add x3, x3, #VGIC_CPU_LR
3861: ldr w5, [x3], #4
387 str w5, [x2], #4
388 sub w4, w4, #1
389 cbnz w4, 1b
3902:
391.endm
392
Marc Zyngier55c74012012-12-10 16:40:18 +0000393__save_sysregs:
394 save_sysregs
395 ret
396
397__restore_sysregs:
398 restore_sysregs
399 ret
400
401__save_fpsimd:
402 save_fpsimd
403 ret
404
405__restore_fpsimd:
406 restore_fpsimd
407 ret
408
409/*
410 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
411 *
412 * This is the world switch. The first half of the function
413 * deals with entering the guest, and anything from __kvm_vcpu_return
414 * to the end of the function deals with reentering the host.
415 * On the enter path, only x0 (vcpu pointer) must be preserved until
416 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
417 * code) must both be preserved until the epilogue.
418 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
419 */
420ENTRY(__kvm_vcpu_run)
421 kern_hyp_va x0
422 msr tpidr_el2, x0 // Save the vcpu register
423
424 // Host context
425 ldr x2, [x0, #VCPU_HOST_CONTEXT]
426 kern_hyp_va x2
427
428 save_host_regs
429 bl __save_fpsimd
430 bl __save_sysregs
431
432 activate_traps
433 activate_vm
434
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000435 restore_vgic_state
436
Marc Zyngier55c74012012-12-10 16:40:18 +0000437 // Guest context
438 add x2, x0, #VCPU_CONTEXT
439
440 bl __restore_sysregs
441 bl __restore_fpsimd
442 restore_guest_regs
443
444 // That's it, no more messing around.
445 eret
446
447__kvm_vcpu_return:
448 // Assume x0 is the vcpu pointer, x1 the return code
449 // Guest's x0-x3 are on the stack
450
451 // Guest context
452 add x2, x0, #VCPU_CONTEXT
453
454 save_guest_regs
455 bl __save_fpsimd
456 bl __save_sysregs
457
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000458 save_vgic_state
459
Marc Zyngier55c74012012-12-10 16:40:18 +0000460 deactivate_traps
461 deactivate_vm
462
463 // Host context
464 ldr x2, [x0, #VCPU_HOST_CONTEXT]
465 kern_hyp_va x2
466
467 bl __restore_sysregs
468 bl __restore_fpsimd
469 restore_host_regs
470
471 mov x0, x1
472 ret
473END(__kvm_vcpu_run)
474
475// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
476ENTRY(__kvm_tlb_flush_vmid_ipa)
477 kern_hyp_va x0
478 ldr x2, [x0, #KVM_VTTBR]
479 msr vttbr_el2, x2
480 isb
481
482 /*
483 * We could do so much better if we had the VA as well.
484 * Instead, we invalidate Stage-2 for this IPA, and the
485 * whole of Stage-1. Weep...
486 */
487 tlbi ipas2e1is, x1
488 dsb sy
489 tlbi vmalle1is
490 dsb sy
491 isb
492
493 msr vttbr_el2, xzr
494 ret
495ENDPROC(__kvm_tlb_flush_vmid_ipa)
496
497ENTRY(__kvm_flush_vm_context)
498 tlbi alle1is
499 ic ialluis
500 dsb sy
501 ret
502ENDPROC(__kvm_flush_vm_context)
503
504__kvm_hyp_panic:
505 // Guess the context by looking at VTTBR:
506 // If zero, then we're already a host.
507 // Otherwise restore a minimal host context before panicing.
508 mrs x0, vttbr_el2
509 cbz x0, 1f
510
511 mrs x0, tpidr_el2
512
513 deactivate_traps
514 deactivate_vm
515
516 ldr x2, [x0, #VCPU_HOST_CONTEXT]
517 kern_hyp_va x2
518
519 bl __restore_sysregs
520
5211: adr x0, __hyp_panic_str
522 adr x1, 2f
523 ldp x2, x3, [x1]
524 sub x0, x0, x2
525 add x0, x0, x3
526 mrs x1, spsr_el2
527 mrs x2, elr_el2
528 mrs x3, esr_el2
529 mrs x4, far_el2
530 mrs x5, hpfar_el2
531 mrs x6, par_el1
532 mrs x7, tpidr_el2
533
534 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
535 PSR_MODE_EL1h)
536 msr spsr_el2, lr
537 ldr lr, =panic
538 msr elr_el2, lr
539 eret
540
541 .align 3
5422: .quad HYP_PAGE_OFFSET
543 .quad PAGE_OFFSET
544ENDPROC(__kvm_hyp_panic)
545
546__hyp_panic_str:
547 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
548
549 .align 2
550
551ENTRY(kvm_call_hyp)
552 hvc #0
553 ret
554ENDPROC(kvm_call_hyp)
555
556.macro invalid_vector label, target
557 .align 2
558\label:
559 b \target
560ENDPROC(\label)
561.endm
562
563 /* None of these should ever happen */
564 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
565 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
566 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
567 invalid_vector el2t_error_invalid, __kvm_hyp_panic
568 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
569 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
570 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
571 invalid_vector el2h_error_invalid, __kvm_hyp_panic
572 invalid_vector el1_sync_invalid, __kvm_hyp_panic
573 invalid_vector el1_irq_invalid, __kvm_hyp_panic
574 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
575 invalid_vector el1_error_invalid, __kvm_hyp_panic
576
577el1_sync: // Guest trapped into EL2
578 push x0, x1
579 push x2, x3
580
581 mrs x1, esr_el2
582 lsr x2, x1, #ESR_EL2_EC_SHIFT
583
584 cmp x2, #ESR_EL2_EC_HVC64
585 b.ne el1_trap
586
587 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
588 cbnz x3, el1_trap // called HVC
589
590 /* Here, we're pretty sure the host called HVC. */
591 pop x2, x3
592 pop x0, x1
593
594 push lr, xzr
595
596 /*
597 * Compute the function address in EL2, and shuffle the parameters.
598 */
599 kern_hyp_va x0
600 mov lr, x0
601 mov x0, x1
602 mov x1, x2
603 mov x2, x3
604 blr lr
605
606 pop lr, xzr
607 eret
608
609el1_trap:
610 /*
611 * x1: ESR
612 * x2: ESR_EC
613 */
614 cmp x2, #ESR_EL2_EC_DABT
615 mov x0, #ESR_EL2_EC_IABT
616 ccmp x2, x0, #4, ne
617 b.ne 1f // Not an abort we care about
618
619 /* This is an abort. Check for permission fault */
620 and x2, x1, #ESR_EL2_FSC_TYPE
621 cmp x2, #FSC_PERM
622 b.ne 1f // Not a permission fault
623
624 /*
625 * Check for Stage-1 page table walk, which is guaranteed
626 * to give a valid HPFAR_EL2.
627 */
628 tbnz x1, #7, 1f // S1PTW is set
629
630 /*
631 * Permission fault, HPFAR_EL2 is invalid.
632 * Resolve the IPA the hard way using the guest VA.
633 * Stage-1 translation already validated the memory access rights.
634 * As such, we can use the EL1 translation regime, and don't have
635 * to distinguish between EL0 and EL1 access.
636 */
637 mrs x2, far_el2
638 at s1e1r, x2
639 isb
640
641 /* Read result */
642 mrs x3, par_el1
643 tbnz x3, #0, 3f // Bail out if we failed the translation
644 ubfx x3, x3, #12, #36 // Extract IPA
645 lsl x3, x3, #4 // and present it like HPFAR
646 b 2f
647
6481: mrs x3, hpfar_el2
649 mrs x2, far_el2
650
6512: mrs x0, tpidr_el2
652 str x1, [x0, #VCPU_ESR_EL2]
653 str x2, [x0, #VCPU_FAR_EL2]
654 str x3, [x0, #VCPU_HPFAR_EL2]
655
656 mov x1, #ARM_EXCEPTION_TRAP
657 b __kvm_vcpu_return
658
659 /*
660 * Translation failed. Just return to the guest and
661 * let it fault again. Another CPU is probably playing
662 * behind our back.
663 */
6643: pop x2, x3
665 pop x0, x1
666
667 eret
668
669el1_irq:
670 push x0, x1
671 push x2, x3
672 mrs x0, tpidr_el2
673 mov x1, #ARM_EXCEPTION_IRQ
674 b __kvm_vcpu_return
675
676 .ltorg
677
678 .align 11
679
680ENTRY(__kvm_hyp_vector)
681 ventry el2t_sync_invalid // Synchronous EL2t
682 ventry el2t_irq_invalid // IRQ EL2t
683 ventry el2t_fiq_invalid // FIQ EL2t
684 ventry el2t_error_invalid // Error EL2t
685
686 ventry el2h_sync_invalid // Synchronous EL2h
687 ventry el2h_irq_invalid // IRQ EL2h
688 ventry el2h_fiq_invalid // FIQ EL2h
689 ventry el2h_error_invalid // Error EL2h
690
691 ventry el1_sync // Synchronous 64-bit EL1
692 ventry el1_irq // IRQ 64-bit EL1
693 ventry el1_fiq_invalid // FIQ 64-bit EL1
694 ventry el1_error_invalid // Error 64-bit EL1
695
696 ventry el1_sync // Synchronous 32-bit EL1
697 ventry el1_irq // IRQ 32-bit EL1
698 ventry el1_fiq_invalid // FIQ 32-bit EL1
699 ventry el1_error_invalid // Error 32-bit EL1
700ENDPROC(__kvm_hyp_vector)
701
702__kvm_hyp_code_end:
703 .globl __kvm_hyp_code_end
704
705 .popsection