blob: 2c56012cb2d2c8d82588063058f4444e188d13f6 [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
19#include <linux/irqchip/arm-gic.h>
20
21#include <asm/assembler.h>
22#include <asm/memory.h>
23#include <asm/asm-offsets.h>
24#include <asm/fpsimdmacros.h>
25#include <asm/kvm.h>
26#include <asm/kvm_asm.h>
27#include <asm/kvm_arm.h>
28#include <asm/kvm_mmu.h>
29
30#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
31#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
32#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
33#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
34
35 .text
36 .pushsection .hyp.text, "ax"
37 .align PAGE_SHIFT
38
39__kvm_hyp_code_start:
40 .globl __kvm_hyp_code_start
41
42.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
55 mrs x20, elr_el2 // EL1 PC
56 mrs x21, spsr_el2 // EL1 pstate
57
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
87 msr elr_el2, x20 // EL1 PC
88 msr spsr_el2, x21 // EL1 pstate
89
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100217 mrs x24, par_el1
Marc Zyngier55c74012012-12-10 16:40:18 +0000218
219 stp x4, x5, [x3]
220 stp x6, x7, [x3, #16]
221 stp x8, x9, [x3, #32]
222 stp x10, x11, [x3, #48]
223 stp x12, x13, [x3, #64]
224 stp x14, x15, [x3, #80]
225 stp x16, x17, [x3, #96]
226 stp x18, x19, [x3, #112]
227 stp x20, x21, [x3, #128]
228 stp x22, x23, [x3, #144]
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100229 str x24, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000230.endm
231
232.macro restore_sysregs
233 // x2: base address for cpu context
234 // x3: tmp register
235
236 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
237
238 ldp x4, x5, [x3]
239 ldp x6, x7, [x3, #16]
240 ldp x8, x9, [x3, #32]
241 ldp x10, x11, [x3, #48]
242 ldp x12, x13, [x3, #64]
243 ldp x14, x15, [x3, #80]
244 ldp x16, x17, [x3, #96]
245 ldp x18, x19, [x3, #112]
246 ldp x20, x21, [x3, #128]
247 ldp x22, x23, [x3, #144]
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100248 ldr x24, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000249
250 msr vmpidr_el2, x4
251 msr csselr_el1, x5
252 msr sctlr_el1, x6
253 msr actlr_el1, x7
254 msr cpacr_el1, x8
255 msr ttbr0_el1, x9
256 msr ttbr1_el1, x10
257 msr tcr_el1, x11
258 msr esr_el1, x12
259 msr afsr0_el1, x13
260 msr afsr1_el1, x14
261 msr far_el1, x15
262 msr mair_el1, x16
263 msr vbar_el1, x17
264 msr contextidr_el1, x18
265 msr tpidr_el0, x19
266 msr tpidrro_el0, x20
267 msr tpidr_el1, x21
268 msr amair_el1, x22
269 msr cntkctl_el1, x23
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100270 msr par_el1, x24
Marc Zyngier55c74012012-12-10 16:40:18 +0000271.endm
272
Marc Zyngierb4afad02013-02-07 10:52:10 +0000273.macro skip_32bit_state tmp, target
274 // Skip 32bit state if not needed
275 mrs \tmp, hcr_el2
276 tbnz \tmp, #HCR_RW_SHIFT, \target
277.endm
278
279.macro skip_tee_state tmp, target
280 // Skip ThumbEE state if not needed
281 mrs \tmp, id_pfr0_el1
282 tbz \tmp, #12, \target
283.endm
284
285.macro save_guest_32bit_state
286 skip_32bit_state x3, 1f
287
288 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
289 mrs x4, spsr_abt
290 mrs x5, spsr_und
291 mrs x6, spsr_irq
292 mrs x7, spsr_fiq
293 stp x4, x5, [x3]
294 stp x6, x7, [x3, #16]
295
296 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
297 mrs x4, dacr32_el2
298 mrs x5, ifsr32_el2
299 mrs x6, fpexc32_el2
300 mrs x7, dbgvcr32_el2
301 stp x4, x5, [x3]
302 stp x6, x7, [x3, #16]
303
304 skip_tee_state x8, 1f
305
306 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
307 mrs x4, teecr32_el1
308 mrs x5, teehbr32_el1
309 stp x4, x5, [x3]
3101:
311.endm
312
313.macro restore_guest_32bit_state
314 skip_32bit_state x3, 1f
315
316 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
317 ldp x4, x5, [x3]
318 ldp x6, x7, [x3, #16]
319 msr spsr_abt, x4
320 msr spsr_und, x5
321 msr spsr_irq, x6
322 msr spsr_fiq, x7
323
324 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
325 ldp x4, x5, [x3]
326 ldp x6, x7, [x3, #16]
327 msr dacr32_el2, x4
328 msr ifsr32_el2, x5
329 msr fpexc32_el2, x6
330 msr dbgvcr32_el2, x7
331
332 skip_tee_state x8, 1f
333
334 add x3, x2, #CPU_SYSREG_OFFSET(TEECR32_EL1)
335 ldp x4, x5, [x3]
336 msr teecr32_el1, x4
337 msr teehbr32_el1, x5
3381:
339.endm
340
Marc Zyngier55c74012012-12-10 16:40:18 +0000341.macro activate_traps
342 ldr x2, [x0, #VCPU_IRQ_LINES]
343 ldr x1, [x0, #VCPU_HCR_EL2]
344 orr x2, x2, x1
345 msr hcr_el2, x2
346
347 ldr x2, =(CPTR_EL2_TTA)
348 msr cptr_el2, x2
349
350 ldr x2, =(1 << 15) // Trap CP15 Cr=15
351 msr hstr_el2, x2
352
353 mrs x2, mdcr_el2
354 and x2, x2, #MDCR_EL2_HPMN_MASK
355 orr x2, x2, #(MDCR_EL2_TPM | MDCR_EL2_TPMCR)
356 msr mdcr_el2, x2
357.endm
358
359.macro deactivate_traps
360 mov x2, #HCR_RW
361 msr hcr_el2, x2
362 msr cptr_el2, xzr
363 msr hstr_el2, xzr
364
365 mrs x2, mdcr_el2
366 and x2, x2, #MDCR_EL2_HPMN_MASK
367 msr mdcr_el2, x2
368.endm
369
370.macro activate_vm
371 ldr x1, [x0, #VCPU_KVM]
372 kern_hyp_va x1
373 ldr x2, [x1, #KVM_VTTBR]
374 msr vttbr_el2, x2
375.endm
376
377.macro deactivate_vm
378 msr vttbr_el2, xzr
379.endm
380
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000381/*
382 * Save the VGIC CPU state into memory
383 * x0: Register pointing to VCPU struct
384 * Do not corrupt x1!!!
385 */
386.macro save_vgic_state
387 /* Get VGIC VCTRL base into x2 */
388 ldr x2, [x0, #VCPU_KVM]
389 kern_hyp_va x2
390 ldr x2, [x2, #KVM_VGIC_VCTRL]
391 kern_hyp_va x2
392 cbz x2, 2f // disabled
393
394 /* Compute the address of struct vgic_cpu */
395 add x3, x0, #VCPU_VGIC_CPU
396
397 /* Save all interesting registers */
398 ldr w4, [x2, #GICH_HCR]
399 ldr w5, [x2, #GICH_VMCR]
400 ldr w6, [x2, #GICH_MISR]
401 ldr w7, [x2, #GICH_EISR0]
402 ldr w8, [x2, #GICH_EISR1]
403 ldr w9, [x2, #GICH_ELRSR0]
404 ldr w10, [x2, #GICH_ELRSR1]
405 ldr w11, [x2, #GICH_APR]
Marc Zyngierc5b2c0f2013-11-05 18:29:46 +0000406CPU_BE( rev w4, w4 )
407CPU_BE( rev w5, w5 )
408CPU_BE( rev w6, w6 )
409CPU_BE( rev w7, w7 )
410CPU_BE( rev w8, w8 )
411CPU_BE( rev w9, w9 )
412CPU_BE( rev w10, w10 )
413CPU_BE( rev w11, w11 )
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000414
415 str w4, [x3, #VGIC_CPU_HCR]
416 str w5, [x3, #VGIC_CPU_VMCR]
417 str w6, [x3, #VGIC_CPU_MISR]
418 str w7, [x3, #VGIC_CPU_EISR]
419 str w8, [x3, #(VGIC_CPU_EISR + 4)]
420 str w9, [x3, #VGIC_CPU_ELRSR]
421 str w10, [x3, #(VGIC_CPU_ELRSR + 4)]
422 str w11, [x3, #VGIC_CPU_APR]
423
424 /* Clear GICH_HCR */
425 str wzr, [x2, #GICH_HCR]
426
427 /* Save list registers */
428 add x2, x2, #GICH_LR0
429 ldr w4, [x3, #VGIC_CPU_NR_LR]
430 add x3, x3, #VGIC_CPU_LR
4311: ldr w5, [x2], #4
Marc Zyngierc5b2c0f2013-11-05 18:29:46 +0000432CPU_BE( rev w5, w5 )
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000433 str w5, [x3], #4
434 sub w4, w4, #1
435 cbnz w4, 1b
4362:
437.endm
438
439/*
440 * Restore the VGIC CPU state from memory
441 * x0: Register pointing to VCPU struct
442 */
443.macro restore_vgic_state
444 /* Get VGIC VCTRL base into x2 */
445 ldr x2, [x0, #VCPU_KVM]
446 kern_hyp_va x2
447 ldr x2, [x2, #KVM_VGIC_VCTRL]
448 kern_hyp_va x2
449 cbz x2, 2f // disabled
450
451 /* Compute the address of struct vgic_cpu */
452 add x3, x0, #VCPU_VGIC_CPU
453
454 /* We only restore a minimal set of registers */
455 ldr w4, [x3, #VGIC_CPU_HCR]
456 ldr w5, [x3, #VGIC_CPU_VMCR]
457 ldr w6, [x3, #VGIC_CPU_APR]
Marc Zyngierc5b2c0f2013-11-05 18:29:46 +0000458CPU_BE( rev w4, w4 )
459CPU_BE( rev w5, w5 )
460CPU_BE( rev w6, w6 )
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000461
462 str w4, [x2, #GICH_HCR]
463 str w5, [x2, #GICH_VMCR]
464 str w6, [x2, #GICH_APR]
465
466 /* Restore list registers */
467 add x2, x2, #GICH_LR0
468 ldr w4, [x3, #VGIC_CPU_NR_LR]
469 add x3, x3, #VGIC_CPU_LR
4701: ldr w5, [x3], #4
Marc Zyngierc5b2c0f2013-11-05 18:29:46 +0000471CPU_BE( rev w5, w5 )
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000472 str w5, [x2], #4
473 sub w4, w4, #1
474 cbnz w4, 1b
4752:
476.endm
477
Marc Zyngier003300d2012-12-07 17:52:03 +0000478.macro save_timer_state
479 // x0: vcpu pointer
480 ldr x2, [x0, #VCPU_KVM]
481 kern_hyp_va x2
482 ldr w3, [x2, #KVM_TIMER_ENABLED]
483 cbz w3, 1f
484
485 mrs x3, cntv_ctl_el0
486 and x3, x3, #3
487 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
488 bic x3, x3, #1 // Clear Enable
489 msr cntv_ctl_el0, x3
490
491 isb
492
493 mrs x3, cntv_cval_el0
494 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
495
4961:
497 // Allow physical timer/counter access for the host
498 mrs x2, cnthctl_el2
499 orr x2, x2, #3
500 msr cnthctl_el2, x2
501
502 // Clear cntvoff for the host
503 msr cntvoff_el2, xzr
504.endm
505
506.macro restore_timer_state
507 // x0: vcpu pointer
508 // Disallow physical timer access for the guest
509 // Physical counter access is allowed
510 mrs x2, cnthctl_el2
511 orr x2, x2, #1
512 bic x2, x2, #2
513 msr cnthctl_el2, x2
514
515 ldr x2, [x0, #VCPU_KVM]
516 kern_hyp_va x2
517 ldr w3, [x2, #KVM_TIMER_ENABLED]
518 cbz w3, 1f
519
520 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
521 msr cntvoff_el2, x3
522 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
523 msr cntv_cval_el0, x2
524 isb
525
526 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
527 and x2, x2, #3
528 msr cntv_ctl_el0, x2
5291:
530.endm
531
Marc Zyngier55c74012012-12-10 16:40:18 +0000532__save_sysregs:
533 save_sysregs
534 ret
535
536__restore_sysregs:
537 restore_sysregs
538 ret
539
540__save_fpsimd:
541 save_fpsimd
542 ret
543
544__restore_fpsimd:
545 restore_fpsimd
546 ret
547
548/*
549 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
550 *
551 * This is the world switch. The first half of the function
552 * deals with entering the guest, and anything from __kvm_vcpu_return
553 * to the end of the function deals with reentering the host.
554 * On the enter path, only x0 (vcpu pointer) must be preserved until
555 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
556 * code) must both be preserved until the epilogue.
557 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
558 */
559ENTRY(__kvm_vcpu_run)
560 kern_hyp_va x0
561 msr tpidr_el2, x0 // Save the vcpu register
562
563 // Host context
564 ldr x2, [x0, #VCPU_HOST_CONTEXT]
565 kern_hyp_va x2
566
567 save_host_regs
568 bl __save_fpsimd
569 bl __save_sysregs
570
571 activate_traps
572 activate_vm
573
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000574 restore_vgic_state
Marc Zyngier003300d2012-12-07 17:52:03 +0000575 restore_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000576
Marc Zyngier55c74012012-12-10 16:40:18 +0000577 // Guest context
578 add x2, x0, #VCPU_CONTEXT
579
580 bl __restore_sysregs
581 bl __restore_fpsimd
Marc Zyngierb4afad02013-02-07 10:52:10 +0000582 restore_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000583 restore_guest_regs
584
585 // That's it, no more messing around.
586 eret
587
588__kvm_vcpu_return:
589 // Assume x0 is the vcpu pointer, x1 the return code
590 // Guest's x0-x3 are on the stack
591
592 // Guest context
593 add x2, x0, #VCPU_CONTEXT
594
595 save_guest_regs
596 bl __save_fpsimd
597 bl __save_sysregs
Marc Zyngierb4afad02013-02-07 10:52:10 +0000598 save_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000599
Marc Zyngier003300d2012-12-07 17:52:03 +0000600 save_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000601 save_vgic_state
602
Marc Zyngier55c74012012-12-10 16:40:18 +0000603 deactivate_traps
604 deactivate_vm
605
606 // Host context
607 ldr x2, [x0, #VCPU_HOST_CONTEXT]
608 kern_hyp_va x2
609
610 bl __restore_sysregs
611 bl __restore_fpsimd
612 restore_host_regs
613
614 mov x0, x1
615 ret
616END(__kvm_vcpu_run)
617
618// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
619ENTRY(__kvm_tlb_flush_vmid_ipa)
Marc Zyngierf142e5e2013-06-11 18:05:25 +0100620 dsb ishst
621
Marc Zyngier55c74012012-12-10 16:40:18 +0000622 kern_hyp_va x0
623 ldr x2, [x0, #KVM_VTTBR]
624 msr vttbr_el2, x2
625 isb
626
627 /*
628 * We could do so much better if we had the VA as well.
629 * Instead, we invalidate Stage-2 for this IPA, and the
630 * whole of Stage-1. Weep...
631 */
632 tlbi ipas2e1is, x1
633 dsb sy
634 tlbi vmalle1is
635 dsb sy
636 isb
637
638 msr vttbr_el2, xzr
639 ret
640ENDPROC(__kvm_tlb_flush_vmid_ipa)
641
642ENTRY(__kvm_flush_vm_context)
Marc Zyngierf142e5e2013-06-11 18:05:25 +0100643 dsb ishst
Marc Zyngier55c74012012-12-10 16:40:18 +0000644 tlbi alle1is
645 ic ialluis
646 dsb sy
647 ret
648ENDPROC(__kvm_flush_vm_context)
649
650__kvm_hyp_panic:
651 // Guess the context by looking at VTTBR:
652 // If zero, then we're already a host.
653 // Otherwise restore a minimal host context before panicing.
654 mrs x0, vttbr_el2
655 cbz x0, 1f
656
657 mrs x0, tpidr_el2
658
659 deactivate_traps
660 deactivate_vm
661
662 ldr x2, [x0, #VCPU_HOST_CONTEXT]
663 kern_hyp_va x2
664
665 bl __restore_sysregs
666
6671: adr x0, __hyp_panic_str
668 adr x1, 2f
669 ldp x2, x3, [x1]
670 sub x0, x0, x2
671 add x0, x0, x3
672 mrs x1, spsr_el2
673 mrs x2, elr_el2
674 mrs x3, esr_el2
675 mrs x4, far_el2
676 mrs x5, hpfar_el2
677 mrs x6, par_el1
678 mrs x7, tpidr_el2
679
680 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
681 PSR_MODE_EL1h)
682 msr spsr_el2, lr
683 ldr lr, =panic
684 msr elr_el2, lr
685 eret
686
687 .align 3
6882: .quad HYP_PAGE_OFFSET
689 .quad PAGE_OFFSET
690ENDPROC(__kvm_hyp_panic)
691
692__hyp_panic_str:
693 .ascii "HYP panic:\nPS:%08x PC:%p ESR:%p\nFAR:%p HPFAR:%p PAR:%p\nVCPU:%p\n\0"
694
695 .align 2
696
Marc Zyngierb20c9f22014-02-26 18:47:36 +0000697/*
698 * u64 kvm_call_hyp(void *hypfn, ...);
699 *
700 * This is not really a variadic function in the classic C-way and care must
701 * be taken when calling this to ensure parameters are passed in registers
702 * only, since the stack will change between the caller and the callee.
703 *
704 * Call the function with the first argument containing a pointer to the
705 * function you wish to call in Hyp mode, and subsequent arguments will be
706 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
707 * function pointer can be passed). The function being called must be mapped
708 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
709 * passed in r0 and r1.
710 *
711 * A function pointer with a value of 0 has a special meaning, and is
712 * used to implement __hyp_get_vectors in the same way as in
713 * arch/arm64/kernel/hyp_stub.S.
714 */
Marc Zyngier55c74012012-12-10 16:40:18 +0000715ENTRY(kvm_call_hyp)
716 hvc #0
717 ret
718ENDPROC(kvm_call_hyp)
719
720.macro invalid_vector label, target
721 .align 2
722\label:
723 b \target
724ENDPROC(\label)
725.endm
726
727 /* None of these should ever happen */
728 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
729 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
730 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
731 invalid_vector el2t_error_invalid, __kvm_hyp_panic
732 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
733 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
734 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
735 invalid_vector el2h_error_invalid, __kvm_hyp_panic
736 invalid_vector el1_sync_invalid, __kvm_hyp_panic
737 invalid_vector el1_irq_invalid, __kvm_hyp_panic
738 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
739 invalid_vector el1_error_invalid, __kvm_hyp_panic
740
741el1_sync: // Guest trapped into EL2
742 push x0, x1
743 push x2, x3
744
745 mrs x1, esr_el2
746 lsr x2, x1, #ESR_EL2_EC_SHIFT
747
748 cmp x2, #ESR_EL2_EC_HVC64
749 b.ne el1_trap
750
751 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
752 cbnz x3, el1_trap // called HVC
753
754 /* Here, we're pretty sure the host called HVC. */
755 pop x2, x3
756 pop x0, x1
757
Marc Zyngierb20c9f22014-02-26 18:47:36 +0000758 /* Check for __hyp_get_vectors */
759 cbnz x0, 1f
760 mrs x0, vbar_el2
761 b 2f
762
7631: push lr, xzr
Marc Zyngier55c74012012-12-10 16:40:18 +0000764
765 /*
766 * Compute the function address in EL2, and shuffle the parameters.
767 */
768 kern_hyp_va x0
769 mov lr, x0
770 mov x0, x1
771 mov x1, x2
772 mov x2, x3
773 blr lr
774
775 pop lr, xzr
Marc Zyngierb20c9f22014-02-26 18:47:36 +00007762: eret
Marc Zyngier55c74012012-12-10 16:40:18 +0000777
778el1_trap:
779 /*
780 * x1: ESR
781 * x2: ESR_EC
782 */
783 cmp x2, #ESR_EL2_EC_DABT
784 mov x0, #ESR_EL2_EC_IABT
785 ccmp x2, x0, #4, ne
786 b.ne 1f // Not an abort we care about
787
788 /* This is an abort. Check for permission fault */
789 and x2, x1, #ESR_EL2_FSC_TYPE
790 cmp x2, #FSC_PERM
791 b.ne 1f // Not a permission fault
792
793 /*
794 * Check for Stage-1 page table walk, which is guaranteed
795 * to give a valid HPFAR_EL2.
796 */
797 tbnz x1, #7, 1f // S1PTW is set
798
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100799 /* Preserve PAR_EL1 */
800 mrs x3, par_el1
801 push x3, xzr
802
Marc Zyngier55c74012012-12-10 16:40:18 +0000803 /*
804 * Permission fault, HPFAR_EL2 is invalid.
805 * Resolve the IPA the hard way using the guest VA.
806 * Stage-1 translation already validated the memory access rights.
807 * As such, we can use the EL1 translation regime, and don't have
808 * to distinguish between EL0 and EL1 access.
809 */
810 mrs x2, far_el2
811 at s1e1r, x2
812 isb
813
814 /* Read result */
815 mrs x3, par_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100816 pop x0, xzr // Restore PAR_EL1 from the stack
817 msr par_el1, x0
Marc Zyngier55c74012012-12-10 16:40:18 +0000818 tbnz x3, #0, 3f // Bail out if we failed the translation
819 ubfx x3, x3, #12, #36 // Extract IPA
820 lsl x3, x3, #4 // and present it like HPFAR
821 b 2f
822
8231: mrs x3, hpfar_el2
824 mrs x2, far_el2
825
8262: mrs x0, tpidr_el2
827 str x1, [x0, #VCPU_ESR_EL2]
828 str x2, [x0, #VCPU_FAR_EL2]
829 str x3, [x0, #VCPU_HPFAR_EL2]
830
831 mov x1, #ARM_EXCEPTION_TRAP
832 b __kvm_vcpu_return
833
834 /*
835 * Translation failed. Just return to the guest and
836 * let it fault again. Another CPU is probably playing
837 * behind our back.
838 */
8393: pop x2, x3
840 pop x0, x1
841
842 eret
843
844el1_irq:
845 push x0, x1
846 push x2, x3
847 mrs x0, tpidr_el2
848 mov x1, #ARM_EXCEPTION_IRQ
849 b __kvm_vcpu_return
850
851 .ltorg
852
853 .align 11
854
855ENTRY(__kvm_hyp_vector)
856 ventry el2t_sync_invalid // Synchronous EL2t
857 ventry el2t_irq_invalid // IRQ EL2t
858 ventry el2t_fiq_invalid // FIQ EL2t
859 ventry el2t_error_invalid // Error EL2t
860
861 ventry el2h_sync_invalid // Synchronous EL2h
862 ventry el2h_irq_invalid // IRQ EL2h
863 ventry el2h_fiq_invalid // FIQ EL2h
864 ventry el2h_error_invalid // Error EL2h
865
866 ventry el1_sync // Synchronous 64-bit EL1
867 ventry el1_irq // IRQ 64-bit EL1
868 ventry el1_fiq_invalid // FIQ 64-bit EL1
869 ventry el1_error_invalid // Error 64-bit EL1
870
871 ventry el1_sync // Synchronous 32-bit EL1
872 ventry el1_irq // IRQ 32-bit EL1
873 ventry el1_fiq_invalid // FIQ 32-bit EL1
874 ventry el1_error_invalid // Error 32-bit EL1
875ENDPROC(__kvm_hyp_vector)
876
877__kvm_hyp_code_end:
878 .globl __kvm_hyp_code_end
879
880 .popsection