blob: 86c289832272d71ba48786414bb6e4ecb9b9cb14 [file] [log] [blame]
Marc Zyngier55c74012012-12-10 16:40:18 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License version 2 as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program. If not, see <http://www.gnu.org/licenses/>.
16 */
17
18#include <linux/linkage.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000019
Marc Zyngier8a148492015-06-12 12:06:37 +010020#include <asm/alternative.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000021#include <asm/asm-offsets.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000022#include <asm/assembler.h>
Marc Zyngier8a148492015-06-12 12:06:37 +010023#include <asm/cpufeature.h>
Marc Zyngierb0e626b2014-05-07 13:44:49 +010024#include <asm/debug-monitors.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000025#include <asm/esr.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000026#include <asm/fpsimdmacros.h>
27#include <asm/kvm.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000028#include <asm/kvm_arm.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000029#include <asm/kvm_asm.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000030#include <asm/kvm_mmu.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000031#include <asm/memory.h>
Marc Zyngier55c74012012-12-10 16:40:18 +000032
33#define CPU_GP_REG_OFFSET(x) (CPU_GP_REGS + x)
34#define CPU_XREG_OFFSET(x) CPU_GP_REG_OFFSET(CPU_USER_PT_REGS + 8*x)
35#define CPU_SPSR_OFFSET(x) CPU_GP_REG_OFFSET(CPU_SPSR + 8*x)
36#define CPU_SYSREG_OFFSET(x) (CPU_SYSREGS + 8*x)
37
38 .text
39 .pushsection .hyp.text, "ax"
40 .align PAGE_SHIFT
41
Marc Zyngier55c74012012-12-10 16:40:18 +000042.macro save_common_regs
43 // x2: base address for cpu context
44 // x3: tmp register
45
46 add x3, x2, #CPU_XREG_OFFSET(19)
47 stp x19, x20, [x3]
48 stp x21, x22, [x3, #16]
49 stp x23, x24, [x3, #32]
50 stp x25, x26, [x3, #48]
51 stp x27, x28, [x3, #64]
52 stp x29, lr, [x3, #80]
53
54 mrs x19, sp_el0
Alex Bennée921ef1e2015-06-04 14:28:37 +010055 mrs x20, elr_el2 // pc before entering el2
56 mrs x21, spsr_el2 // pstate before entering el2
Marc Zyngier55c74012012-12-10 16:40:18 +000057
58 stp x19, x20, [x3, #96]
59 str x21, [x3, #112]
60
61 mrs x22, sp_el1
62 mrs x23, elr_el1
63 mrs x24, spsr_el1
64
65 str x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
66 str x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
67 str x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
68.endm
69
70.macro restore_common_regs
71 // x2: base address for cpu context
72 // x3: tmp register
73
74 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
75 ldr x23, [x2, #CPU_GP_REG_OFFSET(CPU_ELR_EL1)]
76 ldr x24, [x2, #CPU_SPSR_OFFSET(KVM_SPSR_EL1)]
77
78 msr sp_el1, x22
79 msr elr_el1, x23
80 msr spsr_el1, x24
81
82 add x3, x2, #CPU_XREG_OFFSET(31) // SP_EL0
83 ldp x19, x20, [x3]
84 ldr x21, [x3, #16]
85
86 msr sp_el0, x19
Alex Bennée921ef1e2015-06-04 14:28:37 +010087 msr elr_el2, x20 // pc on return from el2
88 msr spsr_el2, x21 // pstate on return from el2
Marc Zyngier55c74012012-12-10 16:40:18 +000089
90 add x3, x2, #CPU_XREG_OFFSET(19)
91 ldp x19, x20, [x3]
92 ldp x21, x22, [x3, #16]
93 ldp x23, x24, [x3, #32]
94 ldp x25, x26, [x3, #48]
95 ldp x27, x28, [x3, #64]
96 ldp x29, lr, [x3, #80]
97.endm
98
99.macro save_host_regs
100 save_common_regs
101.endm
102
103.macro restore_host_regs
104 restore_common_regs
105.endm
106
107.macro save_fpsimd
108 // x2: cpu context address
109 // x3, x4: tmp regs
110 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
111 fpsimd_save x3, 4
112.endm
113
114.macro restore_fpsimd
115 // x2: cpu context address
116 // x3, x4: tmp regs
117 add x3, x2, #CPU_GP_REG_OFFSET(CPU_FP_REGS)
118 fpsimd_restore x3, 4
119.endm
120
121.macro save_guest_regs
122 // x0 is the vcpu address
123 // x1 is the return code, do not corrupt!
124 // x2 is the cpu context
125 // x3 is a tmp register
126 // Guest's x0-x3 are on the stack
127
128 // Compute base to save registers
129 add x3, x2, #CPU_XREG_OFFSET(4)
130 stp x4, x5, [x3]
131 stp x6, x7, [x3, #16]
132 stp x8, x9, [x3, #32]
133 stp x10, x11, [x3, #48]
134 stp x12, x13, [x3, #64]
135 stp x14, x15, [x3, #80]
136 stp x16, x17, [x3, #96]
137 str x18, [x3, #112]
138
139 pop x6, x7 // x2, x3
140 pop x4, x5 // x0, x1
141
142 add x3, x2, #CPU_XREG_OFFSET(0)
143 stp x4, x5, [x3]
144 stp x6, x7, [x3, #16]
145
146 save_common_regs
147.endm
148
149.macro restore_guest_regs
150 // x0 is the vcpu address.
151 // x2 is the cpu context
152 // x3 is a tmp register
153
154 // Prepare x0-x3 for later restore
155 add x3, x2, #CPU_XREG_OFFSET(0)
156 ldp x4, x5, [x3]
157 ldp x6, x7, [x3, #16]
158 push x4, x5 // Push x0-x3 on the stack
159 push x6, x7
160
161 // x4-x18
162 ldp x4, x5, [x3, #32]
163 ldp x6, x7, [x3, #48]
164 ldp x8, x9, [x3, #64]
165 ldp x10, x11, [x3, #80]
166 ldp x12, x13, [x3, #96]
167 ldp x14, x15, [x3, #112]
168 ldp x16, x17, [x3, #128]
169 ldr x18, [x3, #144]
170
171 // x19-x29, lr, sp*, elr*, spsr*
172 restore_common_regs
173
174 // Last bits of the 64bit state
175 pop x2, x3
176 pop x0, x1
177
178 // Do not touch any register after this!
179.endm
180
181/*
182 * Macros to perform system register save/restore.
183 *
184 * Ordering here is absolutely critical, and must be kept consistent
185 * in {save,restore}_sysregs, {save,restore}_guest_32bit_state,
186 * and in kvm_asm.h.
187 *
188 * In other words, don't touch any of these unless you know what
189 * you are doing.
190 */
191.macro save_sysregs
192 // x2: base address for cpu context
193 // x3: tmp register
194
195 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
196
197 mrs x4, vmpidr_el2
198 mrs x5, csselr_el1
199 mrs x6, sctlr_el1
200 mrs x7, actlr_el1
201 mrs x8, cpacr_el1
202 mrs x9, ttbr0_el1
203 mrs x10, ttbr1_el1
204 mrs x11, tcr_el1
205 mrs x12, esr_el1
206 mrs x13, afsr0_el1
207 mrs x14, afsr1_el1
208 mrs x15, far_el1
209 mrs x16, mair_el1
210 mrs x17, vbar_el1
211 mrs x18, contextidr_el1
212 mrs x19, tpidr_el0
213 mrs x20, tpidrro_el0
214 mrs x21, tpidr_el1
215 mrs x22, amair_el1
216 mrs x23, cntkctl_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100217 mrs x24, par_el1
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100218 mrs x25, mdscr_el1
Marc Zyngier55c74012012-12-10 16:40:18 +0000219
220 stp x4, x5, [x3]
221 stp x6, x7, [x3, #16]
222 stp x8, x9, [x3, #32]
223 stp x10, x11, [x3, #48]
224 stp x12, x13, [x3, #64]
225 stp x14, x15, [x3, #80]
226 stp x16, x17, [x3, #96]
227 stp x18, x19, [x3, #112]
228 stp x20, x21, [x3, #128]
229 stp x22, x23, [x3, #144]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100230 stp x24, x25, [x3, #160]
231.endm
232
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100233.macro save_debug type
234 // x4: pointer to register set
235 // x5: number of registers to skip
236 // x6..x22 trashed
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100237
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100238 adr x22, 1f
239 add x22, x22, x5, lsl #2
240 br x22
Marc Zyngierb0e626b2014-05-07 13:44:49 +01002411:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100242 mrs x21, \type\()15_el1
243 mrs x20, \type\()14_el1
244 mrs x19, \type\()13_el1
245 mrs x18, \type\()12_el1
246 mrs x17, \type\()11_el1
247 mrs x16, \type\()10_el1
248 mrs x15, \type\()9_el1
249 mrs x14, \type\()8_el1
250 mrs x13, \type\()7_el1
251 mrs x12, \type\()6_el1
252 mrs x11, \type\()5_el1
253 mrs x10, \type\()4_el1
254 mrs x9, \type\()3_el1
255 mrs x8, \type\()2_el1
256 mrs x7, \type\()1_el1
257 mrs x6, \type\()0_el1
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100258
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100259 adr x22, 1f
260 add x22, x22, x5, lsl #2
261 br x22
Marc Zyngierb0e626b2014-05-07 13:44:49 +01002621:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100263 str x21, [x4, #(15 * 8)]
264 str x20, [x4, #(14 * 8)]
265 str x19, [x4, #(13 * 8)]
266 str x18, [x4, #(12 * 8)]
267 str x17, [x4, #(11 * 8)]
268 str x16, [x4, #(10 * 8)]
269 str x15, [x4, #(9 * 8)]
270 str x14, [x4, #(8 * 8)]
271 str x13, [x4, #(7 * 8)]
272 str x12, [x4, #(6 * 8)]
273 str x11, [x4, #(5 * 8)]
274 str x10, [x4, #(4 * 8)]
275 str x9, [x4, #(3 * 8)]
276 str x8, [x4, #(2 * 8)]
277 str x7, [x4, #(1 * 8)]
278 str x6, [x4, #(0 * 8)]
Marc Zyngier55c74012012-12-10 16:40:18 +0000279.endm
280
281.macro restore_sysregs
282 // x2: base address for cpu context
283 // x3: tmp register
284
285 add x3, x2, #CPU_SYSREG_OFFSET(MPIDR_EL1)
286
287 ldp x4, x5, [x3]
288 ldp x6, x7, [x3, #16]
289 ldp x8, x9, [x3, #32]
290 ldp x10, x11, [x3, #48]
291 ldp x12, x13, [x3, #64]
292 ldp x14, x15, [x3, #80]
293 ldp x16, x17, [x3, #96]
294 ldp x18, x19, [x3, #112]
295 ldp x20, x21, [x3, #128]
296 ldp x22, x23, [x3, #144]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100297 ldp x24, x25, [x3, #160]
Marc Zyngier55c74012012-12-10 16:40:18 +0000298
299 msr vmpidr_el2, x4
300 msr csselr_el1, x5
301 msr sctlr_el1, x6
302 msr actlr_el1, x7
303 msr cpacr_el1, x8
304 msr ttbr0_el1, x9
305 msr ttbr1_el1, x10
306 msr tcr_el1, x11
307 msr esr_el1, x12
308 msr afsr0_el1, x13
309 msr afsr1_el1, x14
310 msr far_el1, x15
311 msr mair_el1, x16
312 msr vbar_el1, x17
313 msr contextidr_el1, x18
314 msr tpidr_el0, x19
315 msr tpidrro_el0, x20
316 msr tpidr_el1, x21
317 msr amair_el1, x22
318 msr cntkctl_el1, x23
Marc Zyngier1bbd8052013-06-07 11:02:34 +0100319 msr par_el1, x24
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100320 msr mdscr_el1, x25
321.endm
322
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100323.macro restore_debug type
324 // x4: pointer to register set
325 // x5: number of registers to skip
326 // x6..x22 trashed
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100327
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100328 adr x22, 1f
329 add x22, x22, x5, lsl #2
330 br x22
Marc Zyngierb0e626b2014-05-07 13:44:49 +01003311:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100332 ldr x21, [x4, #(15 * 8)]
333 ldr x20, [x4, #(14 * 8)]
334 ldr x19, [x4, #(13 * 8)]
335 ldr x18, [x4, #(12 * 8)]
336 ldr x17, [x4, #(11 * 8)]
337 ldr x16, [x4, #(10 * 8)]
338 ldr x15, [x4, #(9 * 8)]
339 ldr x14, [x4, #(8 * 8)]
340 ldr x13, [x4, #(7 * 8)]
341 ldr x12, [x4, #(6 * 8)]
342 ldr x11, [x4, #(5 * 8)]
343 ldr x10, [x4, #(4 * 8)]
344 ldr x9, [x4, #(3 * 8)]
345 ldr x8, [x4, #(2 * 8)]
346 ldr x7, [x4, #(1 * 8)]
347 ldr x6, [x4, #(0 * 8)]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100348
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100349 adr x22, 1f
350 add x22, x22, x5, lsl #2
351 br x22
Marc Zyngierb0e626b2014-05-07 13:44:49 +01003521:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100353 msr \type\()15_el1, x21
354 msr \type\()14_el1, x20
355 msr \type\()13_el1, x19
356 msr \type\()12_el1, x18
357 msr \type\()11_el1, x17
358 msr \type\()10_el1, x16
359 msr \type\()9_el1, x15
360 msr \type\()8_el1, x14
361 msr \type\()7_el1, x13
362 msr \type\()6_el1, x12
363 msr \type\()5_el1, x11
364 msr \type\()4_el1, x10
365 msr \type\()3_el1, x9
366 msr \type\()2_el1, x8
367 msr \type\()1_el1, x7
368 msr \type\()0_el1, x6
Marc Zyngier55c74012012-12-10 16:40:18 +0000369.endm
370
Marc Zyngierb4afad02013-02-07 10:52:10 +0000371.macro skip_32bit_state tmp, target
372 // Skip 32bit state if not needed
373 mrs \tmp, hcr_el2
374 tbnz \tmp, #HCR_RW_SHIFT, \target
375.endm
376
377.macro skip_tee_state tmp, target
378 // Skip ThumbEE state if not needed
379 mrs \tmp, id_pfr0_el1
380 tbz \tmp, #12, \target
381.endm
382
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100383.macro skip_debug_state tmp, target
384 ldr \tmp, [x0, #VCPU_DEBUG_FLAGS]
385 tbz \tmp, #KVM_ARM64_DEBUG_DIRTY_SHIFT, \target
386.endm
387
Mario Smarduch33c76a02015-07-16 22:29:37 +0100388/*
389 * Branch to target if CPTR_EL2.TFP bit is set (VFP/SIMD trapping enabled)
390 */
391.macro skip_fpsimd_state tmp, target
392 mrs \tmp, cptr_el2
393 tbnz \tmp, #CPTR_EL2_TFP_SHIFT, \target
394.endm
395
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100396.macro compute_debug_state target
397 // Compute debug state: If any of KDE, MDE or KVM_ARM64_DEBUG_DIRTY
398 // is set, we do a full save/restore cycle and disable trapping.
399 add x25, x0, #VCPU_CONTEXT
400
401 // Check the state of MDSCR_EL1
402 ldr x25, [x25, #CPU_SYSREG_OFFSET(MDSCR_EL1)]
403 and x26, x25, #DBG_MDSCR_KDE
404 and x25, x25, #DBG_MDSCR_MDE
405 adds xzr, x25, x26
406 b.eq 9998f // Nothing to see there
407
408 // If any interesting bits was set, we must set the flag
409 mov x26, #KVM_ARM64_DEBUG_DIRTY
410 str x26, [x0, #VCPU_DEBUG_FLAGS]
411 b 9999f // Don't skip restore
412
4139998:
414 // Otherwise load the flags from memory in case we recently
415 // trapped
416 skip_debug_state x25, \target
4179999:
418.endm
419
Marc Zyngierb4afad02013-02-07 10:52:10 +0000420.macro save_guest_32bit_state
421 skip_32bit_state x3, 1f
422
423 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
424 mrs x4, spsr_abt
425 mrs x5, spsr_und
426 mrs x6, spsr_irq
427 mrs x7, spsr_fiq
428 stp x4, x5, [x3]
429 stp x6, x7, [x3, #16]
430
431 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
432 mrs x4, dacr32_el2
433 mrs x5, ifsr32_el2
Marc Zyngierb4afad02013-02-07 10:52:10 +0000434 stp x4, x5, [x3]
Marc Zyngierb4afad02013-02-07 10:52:10 +0000435
Will Deacon34c3faa2015-09-15 17:15:33 +0100436 skip_fpsimd_state x8, 2f
Mario Smarduch33c76a02015-07-16 22:29:37 +0100437 mrs x6, fpexc32_el2
438 str x6, [x3, #16]
Will Deacon34c3faa2015-09-15 17:15:33 +01004392:
440 skip_debug_state x8, 1f
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100441 mrs x7, dbgvcr32_el2
442 str x7, [x3, #24]
Marc Zyngierb4afad02013-02-07 10:52:10 +00004431:
444.endm
445
446.macro restore_guest_32bit_state
447 skip_32bit_state x3, 1f
448
449 add x3, x2, #CPU_SPSR_OFFSET(KVM_SPSR_ABT)
450 ldp x4, x5, [x3]
451 ldp x6, x7, [x3, #16]
452 msr spsr_abt, x4
453 msr spsr_und, x5
454 msr spsr_irq, x6
455 msr spsr_fiq, x7
456
457 add x3, x2, #CPU_SYSREG_OFFSET(DACR32_EL2)
458 ldp x4, x5, [x3]
Marc Zyngierb4afad02013-02-07 10:52:10 +0000459 msr dacr32_el2, x4
460 msr ifsr32_el2, x5
Marc Zyngierb4afad02013-02-07 10:52:10 +0000461
Will Deacon34c3faa2015-09-15 17:15:33 +0100462 skip_debug_state x8, 1f
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100463 ldr x7, [x3, #24]
464 msr dbgvcr32_el2, x7
Marc Zyngierb4afad02013-02-07 10:52:10 +00004651:
466.endm
467
Marc Zyngier55c74012012-12-10 16:40:18 +0000468.macro activate_traps
Marc Zyngierac3c3742013-08-09 18:19:11 +0100469 ldr x2, [x0, #VCPU_HCR_EL2]
Mario Smarduch33c76a02015-07-16 22:29:37 +0100470
471 /*
472 * We are about to set CPTR_EL2.TFP to trap all floating point
473 * register accesses to EL2, however, the ARM ARM clearly states that
474 * traps are only taken to EL2 if the operation would not otherwise
475 * trap to EL1. Therefore, always make sure that for 32-bit guests,
476 * we set FPEXC.EN to prevent traps to EL1, when setting the TFP bit.
477 */
478 tbnz x2, #HCR_RW_SHIFT, 99f // open code skip_32bit_state
479 mov x3, #(1 << 30)
480 msr fpexc32_el2, x3
481 isb
48299:
Marc Zyngierac3c3742013-08-09 18:19:11 +0100483 msr hcr_el2, x2
Ard Biesheuvel302cd372014-11-07 14:12:34 +0000484 mov x2, #CPTR_EL2_TTA
Mario Smarduch33c76a02015-07-16 22:29:37 +0100485 orr x2, x2, #CPTR_EL2_TFP
Marc Zyngier55c74012012-12-10 16:40:18 +0000486 msr cptr_el2, x2
487
Ard Biesheuvel302cd372014-11-07 14:12:34 +0000488 mov x2, #(1 << 15) // Trap CP15 Cr=15
Marc Zyngier55c74012012-12-10 16:40:18 +0000489 msr hstr_el2, x2
490
Alex Bennée56c7f5e2015-07-07 17:29:56 +0100491 // Monitor Debug Config - see kvm_arm_setup_debug()
492 ldr x2, [x0, #VCPU_MDCR_EL2]
Marc Zyngier55c74012012-12-10 16:40:18 +0000493 msr mdcr_el2, x2
494.endm
495
496.macro deactivate_traps
497 mov x2, #HCR_RW
498 msr hcr_el2, x2
Marc Zyngier55c74012012-12-10 16:40:18 +0000499 msr hstr_el2, xzr
500
501 mrs x2, mdcr_el2
502 and x2, x2, #MDCR_EL2_HPMN_MASK
503 msr mdcr_el2, x2
504.endm
505
506.macro activate_vm
507 ldr x1, [x0, #VCPU_KVM]
508 kern_hyp_va x1
509 ldr x2, [x1, #KVM_VTTBR]
510 msr vttbr_el2, x2
511.endm
512
513.macro deactivate_vm
514 msr vttbr_el2, xzr
515.endm
516
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000517/*
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100518 * Call into the vgic backend for state saving
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000519 */
520.macro save_vgic_state
Daniel Thompsonfc032422015-07-22 12:21:04 +0100521alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
522 bl __save_vgic_v2_state
523alternative_else
524 bl __save_vgic_v3_state
525alternative_endif
Marc Zyngierac3c3742013-08-09 18:19:11 +0100526 mrs x24, hcr_el2
527 mov x25, #HCR_INT_OVERRIDE
528 neg x25, x25
529 and x24, x24, x25
530 msr hcr_el2, x24
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000531.endm
532
533/*
Marc Zyngier1a9b1302013-06-21 11:57:56 +0100534 * Call into the vgic backend for state restoring
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000535 */
536.macro restore_vgic_state
Marc Zyngierac3c3742013-08-09 18:19:11 +0100537 mrs x24, hcr_el2
538 ldr x25, [x0, #VCPU_IRQ_LINES]
539 orr x24, x24, #HCR_INT_OVERRIDE
540 orr x24, x24, x25
541 msr hcr_el2, x24
Daniel Thompsonfc032422015-07-22 12:21:04 +0100542alternative_if_not ARM64_HAS_SYSREG_GIC_CPUIF
543 bl __restore_vgic_v2_state
544alternative_else
545 bl __restore_vgic_v3_state
546alternative_endif
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000547.endm
548
Marc Zyngier003300d2012-12-07 17:52:03 +0000549.macro save_timer_state
550 // x0: vcpu pointer
551 ldr x2, [x0, #VCPU_KVM]
552 kern_hyp_va x2
553 ldr w3, [x2, #KVM_TIMER_ENABLED]
554 cbz w3, 1f
555
556 mrs x3, cntv_ctl_el0
557 and x3, x3, #3
558 str w3, [x0, #VCPU_TIMER_CNTV_CTL]
Marc Zyngier003300d2012-12-07 17:52:03 +0000559
560 isb
561
562 mrs x3, cntv_cval_el0
563 str x3, [x0, #VCPU_TIMER_CNTV_CVAL]
564
5651:
Marc Zyngierc4cbba92015-09-16 16:18:59 +0100566 // Disable the virtual timer
567 msr cntv_ctl_el0, xzr
568
Marc Zyngier003300d2012-12-07 17:52:03 +0000569 // Allow physical timer/counter access for the host
570 mrs x2, cnthctl_el2
571 orr x2, x2, #3
572 msr cnthctl_el2, x2
573
574 // Clear cntvoff for the host
575 msr cntvoff_el2, xzr
576.endm
577
578.macro restore_timer_state
579 // x0: vcpu pointer
580 // Disallow physical timer access for the guest
581 // Physical counter access is allowed
582 mrs x2, cnthctl_el2
583 orr x2, x2, #1
584 bic x2, x2, #2
585 msr cnthctl_el2, x2
586
587 ldr x2, [x0, #VCPU_KVM]
588 kern_hyp_va x2
589 ldr w3, [x2, #KVM_TIMER_ENABLED]
590 cbz w3, 1f
591
592 ldr x3, [x2, #KVM_TIMER_CNTVOFF]
593 msr cntvoff_el2, x3
594 ldr x2, [x0, #VCPU_TIMER_CNTV_CVAL]
595 msr cntv_cval_el0, x2
596 isb
597
598 ldr w2, [x0, #VCPU_TIMER_CNTV_CTL]
599 and x2, x2, #3
600 msr cntv_ctl_el0, x2
6011:
602.endm
603
Marc Zyngier55c74012012-12-10 16:40:18 +0000604__save_sysregs:
605 save_sysregs
606 ret
607
608__restore_sysregs:
609 restore_sysregs
610 ret
611
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100612/* Save debug state */
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100613__save_debug:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100614 // x2: ptr to CPU context
Alex Bennée84e690b2015-07-07 17:30:00 +0100615 // x3: ptr to debug reg struct
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100616 // x4/x5/x6-22/x24-26: trashed
617
618 mrs x26, id_aa64dfr0_el1
619 ubfx x24, x26, #12, #4 // Extract BRPs
620 ubfx x25, x26, #20, #4 // Extract WRPs
621 mov w26, #15
622 sub w24, w26, w24 // How many BPs to skip
623 sub w25, w26, w25 // How many WPs to skip
624
625 mov x5, x24
Alex Bennée84e690b2015-07-07 17:30:00 +0100626 add x4, x3, #DEBUG_BCR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100627 save_debug dbgbcr
Alex Bennée84e690b2015-07-07 17:30:00 +0100628 add x4, x3, #DEBUG_BVR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100629 save_debug dbgbvr
630
631 mov x5, x25
Alex Bennée84e690b2015-07-07 17:30:00 +0100632 add x4, x3, #DEBUG_WCR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100633 save_debug dbgwcr
Alex Bennée84e690b2015-07-07 17:30:00 +0100634 add x4, x3, #DEBUG_WVR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100635 save_debug dbgwvr
636
637 mrs x21, mdccint_el1
638 str x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100639 ret
640
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100641/* Restore debug state */
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100642__restore_debug:
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100643 // x2: ptr to CPU context
Alex Bennée84e690b2015-07-07 17:30:00 +0100644 // x3: ptr to debug reg struct
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100645 // x4/x5/x6-22/x24-26: trashed
646
647 mrs x26, id_aa64dfr0_el1
648 ubfx x24, x26, #12, #4 // Extract BRPs
649 ubfx x25, x26, #20, #4 // Extract WRPs
650 mov w26, #15
651 sub w24, w26, w24 // How many BPs to skip
652 sub w25, w26, w25 // How many WPs to skip
653
654 mov x5, x24
Alex Bennée84e690b2015-07-07 17:30:00 +0100655 add x4, x3, #DEBUG_BCR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100656 restore_debug dbgbcr
Alex Bennée84e690b2015-07-07 17:30:00 +0100657 add x4, x3, #DEBUG_BVR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100658 restore_debug dbgbvr
659
660 mov x5, x25
Alex Bennée84e690b2015-07-07 17:30:00 +0100661 add x4, x3, #DEBUG_WCR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100662 restore_debug dbgwcr
Alex Bennée84e690b2015-07-07 17:30:00 +0100663 add x4, x3, #DEBUG_WVR
Alex Bennéee0a1b9a2015-07-07 17:29:59 +0100664 restore_debug dbgwvr
665
666 ldr x21, [x2, #CPU_SYSREG_OFFSET(MDCCINT_EL1)]
667 msr mdccint_el1, x21
668
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100669 ret
670
Marc Zyngier55c74012012-12-10 16:40:18 +0000671__save_fpsimd:
Mario Smarduch33c76a02015-07-16 22:29:37 +0100672 skip_fpsimd_state x3, 1f
Marc Zyngier55c74012012-12-10 16:40:18 +0000673 save_fpsimd
Mario Smarduch33c76a02015-07-16 22:29:37 +01006741: ret
Marc Zyngier55c74012012-12-10 16:40:18 +0000675
676__restore_fpsimd:
Mario Smarduch33c76a02015-07-16 22:29:37 +0100677 skip_fpsimd_state x3, 1f
Marc Zyngier55c74012012-12-10 16:40:18 +0000678 restore_fpsimd
Mario Smarduch33c76a02015-07-16 22:29:37 +01006791: ret
680
681switch_to_guest_fpsimd:
682 push x4, lr
683
684 mrs x2, cptr_el2
685 bic x2, x2, #CPTR_EL2_TFP
686 msr cptr_el2, x2
687 isb
688
689 mrs x0, tpidr_el2
690
691 ldr x2, [x0, #VCPU_HOST_CONTEXT]
692 kern_hyp_va x2
693 bl __save_fpsimd
694
695 add x2, x0, #VCPU_CONTEXT
696 bl __restore_fpsimd
697
698 skip_32bit_state x3, 1f
699 ldr x4, [x2, #CPU_SYSREG_OFFSET(FPEXC32_EL2)]
700 msr fpexc32_el2, x4
7011:
702 pop x4, lr
703 pop x2, x3
704 pop x0, x1
705
706 eret
Marc Zyngier55c74012012-12-10 16:40:18 +0000707
708/*
709 * u64 __kvm_vcpu_run(struct kvm_vcpu *vcpu);
710 *
711 * This is the world switch. The first half of the function
712 * deals with entering the guest, and anything from __kvm_vcpu_return
713 * to the end of the function deals with reentering the host.
714 * On the enter path, only x0 (vcpu pointer) must be preserved until
715 * the last moment. On the exit path, x0 (vcpu pointer) and x1 (exception
716 * code) must both be preserved until the epilogue.
717 * In both cases, x2 points to the CPU context we're saving/restoring from/to.
718 */
719ENTRY(__kvm_vcpu_run)
720 kern_hyp_va x0
721 msr tpidr_el2, x0 // Save the vcpu register
722
723 // Host context
724 ldr x2, [x0, #VCPU_HOST_CONTEXT]
725 kern_hyp_va x2
726
727 save_host_regs
Marc Zyngier55c74012012-12-10 16:40:18 +0000728 bl __save_sysregs
729
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100730 compute_debug_state 1f
Alex Bennée84e690b2015-07-07 17:30:00 +0100731 add x3, x0, #VCPU_HOST_DEBUG_STATE
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100732 bl __save_debug
7331:
Marc Zyngier55c74012012-12-10 16:40:18 +0000734 activate_traps
735 activate_vm
736
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000737 restore_vgic_state
Marc Zyngier003300d2012-12-07 17:52:03 +0000738 restore_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000739
Marc Zyngier55c74012012-12-10 16:40:18 +0000740 // Guest context
741 add x2, x0, #VCPU_CONTEXT
742
Will Deacon43297dd2015-09-14 16:06:03 +0100743 // We must restore the 32-bit state before the sysregs, thanks
744 // to Cortex-A57 erratum #852523.
745 restore_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000746 bl __restore_sysregs
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100747
748 skip_debug_state x3, 1f
Alex Bennée84e690b2015-07-07 17:30:00 +0100749 ldr x3, [x0, #VCPU_DEBUG_PTR]
750 kern_hyp_va x3
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100751 bl __restore_debug
7521:
Marc Zyngier55c74012012-12-10 16:40:18 +0000753 restore_guest_regs
754
755 // That's it, no more messing around.
756 eret
757
758__kvm_vcpu_return:
759 // Assume x0 is the vcpu pointer, x1 the return code
760 // Guest's x0-x3 are on the stack
761
762 // Guest context
763 add x2, x0, #VCPU_CONTEXT
764
765 save_guest_regs
766 bl __save_fpsimd
767 bl __save_sysregs
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100768
769 skip_debug_state x3, 1f
Alex Bennée84e690b2015-07-07 17:30:00 +0100770 ldr x3, [x0, #VCPU_DEBUG_PTR]
771 kern_hyp_va x3
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100772 bl __save_debug
7731:
Marc Zyngierb4afad02013-02-07 10:52:10 +0000774 save_guest_32bit_state
Marc Zyngier55c74012012-12-10 16:40:18 +0000775
Marc Zyngier003300d2012-12-07 17:52:03 +0000776 save_timer_state
Marc Zyngier1f17f3b2012-12-07 17:54:54 +0000777 save_vgic_state
778
Marc Zyngier55c74012012-12-10 16:40:18 +0000779 deactivate_traps
780 deactivate_vm
781
782 // Host context
783 ldr x2, [x0, #VCPU_HOST_CONTEXT]
784 kern_hyp_va x2
785
786 bl __restore_sysregs
787 bl __restore_fpsimd
Mario Smarduch33c76a02015-07-16 22:29:37 +0100788 /* Clear FPSIMD and Trace trapping */
789 msr cptr_el2, xzr
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100790
791 skip_debug_state x3, 1f
792 // Clear the dirty flag for the next run, as all the state has
793 // already been saved. Note that we nuke the whole 64bit word.
794 // If we ever add more flags, we'll have to be more careful...
795 str xzr, [x0, #VCPU_DEBUG_FLAGS]
Alex Bennée84e690b2015-07-07 17:30:00 +0100796 add x3, x0, #VCPU_HOST_DEBUG_STATE
Marc Zyngierb0e626b2014-05-07 13:44:49 +0100797 bl __restore_debug
7981:
Marc Zyngier55c74012012-12-10 16:40:18 +0000799 restore_host_regs
800
801 mov x0, x1
802 ret
803END(__kvm_vcpu_run)
804
805// void __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa);
806ENTRY(__kvm_tlb_flush_vmid_ipa)
Marc Zyngierf142e5e2013-06-11 18:05:25 +0100807 dsb ishst
808
Marc Zyngier55c74012012-12-10 16:40:18 +0000809 kern_hyp_va x0
810 ldr x2, [x0, #KVM_VTTBR]
811 msr vttbr_el2, x2
812 isb
813
814 /*
815 * We could do so much better if we had the VA as well.
816 * Instead, we invalidate Stage-2 for this IPA, and the
817 * whole of Stage-1. Weep...
818 */
Marc Zyngier55e858b2015-01-11 14:10:10 +0100819 lsr x1, x1, #12
Marc Zyngier55c74012012-12-10 16:40:18 +0000820 tlbi ipas2e1is, x1
Will Deaconee9e1012014-05-02 16:24:14 +0100821 /*
822 * We have to ensure completion of the invalidation at Stage-2,
823 * since a table walk on another CPU could refill a TLB with a
824 * complete (S1 + S2) walk based on the old Stage-2 mapping if
825 * the Stage-1 invalidation happened first.
826 */
827 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +0000828 tlbi vmalle1is
Will Deaconee9e1012014-05-02 16:24:14 +0100829 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +0000830 isb
831
832 msr vttbr_el2, xzr
833 ret
834ENDPROC(__kvm_tlb_flush_vmid_ipa)
835
Mario Smarduch9836c6b2015-01-15 15:59:00 -0800836/**
837 * void __kvm_tlb_flush_vmid(struct kvm *kvm) - Flush per-VMID TLBs
838 * @struct kvm *kvm - pointer to kvm structure
839 *
840 * Invalidates all Stage 1 and 2 TLB entries for current VMID.
841 */
842ENTRY(__kvm_tlb_flush_vmid)
843 dsb ishst
844
845 kern_hyp_va x0
846 ldr x2, [x0, #KVM_VTTBR]
847 msr vttbr_el2, x2
848 isb
849
850 tlbi vmalls12e1is
851 dsb ish
852 isb
853
854 msr vttbr_el2, xzr
855 ret
856ENDPROC(__kvm_tlb_flush_vmid)
857
Marc Zyngier55c74012012-12-10 16:40:18 +0000858ENTRY(__kvm_flush_vm_context)
Marc Zyngierf142e5e2013-06-11 18:05:25 +0100859 dsb ishst
Marc Zyngier55c74012012-12-10 16:40:18 +0000860 tlbi alle1is
861 ic ialluis
Will Deaconee9e1012014-05-02 16:24:14 +0100862 dsb ish
Marc Zyngier55c74012012-12-10 16:40:18 +0000863 ret
864ENDPROC(__kvm_flush_vm_context)
865
866__kvm_hyp_panic:
Mark Rutlandfbb45742015-11-16 13:58:29 +0000867 // Stash PAR_EL1 before corrupting it in __restore_sysregs
868 mrs x0, par_el1
869 push x0, xzr
870
Marc Zyngier55c74012012-12-10 16:40:18 +0000871 // Guess the context by looking at VTTBR:
872 // If zero, then we're already a host.
873 // Otherwise restore a minimal host context before panicing.
874 mrs x0, vttbr_el2
875 cbz x0, 1f
876
877 mrs x0, tpidr_el2
878
879 deactivate_traps
880 deactivate_vm
881
882 ldr x2, [x0, #VCPU_HOST_CONTEXT]
883 kern_hyp_va x2
884
885 bl __restore_sysregs
886
Mark Rutlanddb85c552015-10-12 15:04:50 +0100887 /*
888 * Make sure we have a valid host stack, and don't leave junk in the
889 * frame pointer that will give us a misleading host stack unwinding.
890 */
891 ldr x22, [x2, #CPU_GP_REG_OFFSET(CPU_SP_EL1)]
892 msr sp_el1, x22
893 mov x29, xzr
894
Marc Zyngier55c74012012-12-10 16:40:18 +00008951: adr x0, __hyp_panic_str
896 adr x1, 2f
897 ldp x2, x3, [x1]
898 sub x0, x0, x2
899 add x0, x0, x3
900 mrs x1, spsr_el2
901 mrs x2, elr_el2
902 mrs x3, esr_el2
903 mrs x4, far_el2
904 mrs x5, hpfar_el2
Mark Rutlandfbb45742015-11-16 13:58:29 +0000905 pop x6, xzr // active context PAR_EL1
Marc Zyngier55c74012012-12-10 16:40:18 +0000906 mrs x7, tpidr_el2
907
908 mov lr, #(PSR_F_BIT | PSR_I_BIT | PSR_A_BIT | PSR_D_BIT |\
909 PSR_MODE_EL1h)
910 msr spsr_el2, lr
911 ldr lr, =panic
912 msr elr_el2, lr
913 eret
914
915 .align 3
9162: .quad HYP_PAGE_OFFSET
917 .quad PAGE_OFFSET
918ENDPROC(__kvm_hyp_panic)
919
920__hyp_panic_str:
Mark Rutland1d7a4e32015-11-16 13:55:51 +0000921 .ascii "HYP panic:\nPS:%08x PC:%016x ESR:%08x\nFAR:%016x HPFAR:%016x PAR:%016x\nVCPU:%p\n\0"
Marc Zyngier55c74012012-12-10 16:40:18 +0000922
923 .align 2
924
Marc Zyngierb20c9f22014-02-26 18:47:36 +0000925/*
926 * u64 kvm_call_hyp(void *hypfn, ...);
927 *
928 * This is not really a variadic function in the classic C-way and care must
929 * be taken when calling this to ensure parameters are passed in registers
930 * only, since the stack will change between the caller and the callee.
931 *
932 * Call the function with the first argument containing a pointer to the
933 * function you wish to call in Hyp mode, and subsequent arguments will be
934 * passed as x0, x1, and x2 (a maximum of 3 arguments in addition to the
935 * function pointer can be passed). The function being called must be mapped
936 * in Hyp mode (see init_hyp_mode in arch/arm/kvm/arm.c). Return values are
937 * passed in r0 and r1.
938 *
939 * A function pointer with a value of 0 has a special meaning, and is
940 * used to implement __hyp_get_vectors in the same way as in
941 * arch/arm64/kernel/hyp_stub.S.
942 */
Marc Zyngier55c74012012-12-10 16:40:18 +0000943ENTRY(kvm_call_hyp)
944 hvc #0
945 ret
946ENDPROC(kvm_call_hyp)
947
948.macro invalid_vector label, target
949 .align 2
950\label:
951 b \target
952ENDPROC(\label)
953.endm
954
955 /* None of these should ever happen */
956 invalid_vector el2t_sync_invalid, __kvm_hyp_panic
957 invalid_vector el2t_irq_invalid, __kvm_hyp_panic
958 invalid_vector el2t_fiq_invalid, __kvm_hyp_panic
959 invalid_vector el2t_error_invalid, __kvm_hyp_panic
960 invalid_vector el2h_sync_invalid, __kvm_hyp_panic
961 invalid_vector el2h_irq_invalid, __kvm_hyp_panic
962 invalid_vector el2h_fiq_invalid, __kvm_hyp_panic
963 invalid_vector el2h_error_invalid, __kvm_hyp_panic
964 invalid_vector el1_sync_invalid, __kvm_hyp_panic
965 invalid_vector el1_irq_invalid, __kvm_hyp_panic
966 invalid_vector el1_fiq_invalid, __kvm_hyp_panic
967 invalid_vector el1_error_invalid, __kvm_hyp_panic
968
969el1_sync: // Guest trapped into EL2
970 push x0, x1
971 push x2, x3
972
973 mrs x1, esr_el2
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000974 lsr x2, x1, #ESR_ELx_EC_SHIFT
Marc Zyngier55c74012012-12-10 16:40:18 +0000975
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000976 cmp x2, #ESR_ELx_EC_HVC64
Marc Zyngier55c74012012-12-10 16:40:18 +0000977 b.ne el1_trap
978
979 mrs x3, vttbr_el2 // If vttbr is valid, the 64bit guest
980 cbnz x3, el1_trap // called HVC
981
982 /* Here, we're pretty sure the host called HVC. */
983 pop x2, x3
984 pop x0, x1
985
Marc Zyngierb20c9f22014-02-26 18:47:36 +0000986 /* Check for __hyp_get_vectors */
987 cbnz x0, 1f
988 mrs x0, vbar_el2
989 b 2f
990
9911: push lr, xzr
Marc Zyngier55c74012012-12-10 16:40:18 +0000992
993 /*
994 * Compute the function address in EL2, and shuffle the parameters.
995 */
996 kern_hyp_va x0
997 mov lr, x0
998 mov x0, x1
999 mov x1, x2
1000 mov x2, x3
1001 blr lr
1002
1003 pop lr, xzr
Marc Zyngierb20c9f22014-02-26 18:47:36 +000010042: eret
Marc Zyngier55c74012012-12-10 16:40:18 +00001005
1006el1_trap:
1007 /*
1008 * x1: ESR
1009 * x2: ESR_EC
1010 */
Mario Smarduch33c76a02015-07-16 22:29:37 +01001011
1012 /* Guest accessed VFP/SIMD registers, save host, restore Guest */
1013 cmp x2, #ESR_ELx_EC_FP_ASIMD
1014 b.eq switch_to_guest_fpsimd
1015
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001016 cmp x2, #ESR_ELx_EC_DABT_LOW
1017 mov x0, #ESR_ELx_EC_IABT_LOW
Marc Zyngier55c74012012-12-10 16:40:18 +00001018 ccmp x2, x0, #4, ne
1019 b.ne 1f // Not an abort we care about
1020
1021 /* This is an abort. Check for permission fault */
Marc Zyngier498cd5c2015-11-16 10:28:18 +00001022alternative_if_not ARM64_WORKAROUND_834220
Mark Rutlandc6d01a92014-11-24 13:59:30 +00001023 and x2, x1, #ESR_ELx_FSC_TYPE
Marc Zyngier55c74012012-12-10 16:40:18 +00001024 cmp x2, #FSC_PERM
1025 b.ne 1f // Not a permission fault
Marc Zyngier498cd5c2015-11-16 10:28:18 +00001026alternative_else
1027 nop // Use the permission fault path to
1028 nop // check for a valid S1 translation,
1029 nop // regardless of the ESR value.
1030alternative_endif
Marc Zyngier55c74012012-12-10 16:40:18 +00001031
1032 /*
1033 * Check for Stage-1 page table walk, which is guaranteed
1034 * to give a valid HPFAR_EL2.
1035 */
1036 tbnz x1, #7, 1f // S1PTW is set
1037
Marc Zyngier1bbd8052013-06-07 11:02:34 +01001038 /* Preserve PAR_EL1 */
1039 mrs x3, par_el1
1040 push x3, xzr
1041
Marc Zyngier55c74012012-12-10 16:40:18 +00001042 /*
1043 * Permission fault, HPFAR_EL2 is invalid.
1044 * Resolve the IPA the hard way using the guest VA.
1045 * Stage-1 translation already validated the memory access rights.
1046 * As such, we can use the EL1 translation regime, and don't have
1047 * to distinguish between EL0 and EL1 access.
1048 */
1049 mrs x2, far_el2
1050 at s1e1r, x2
1051 isb
1052
1053 /* Read result */
1054 mrs x3, par_el1
Marc Zyngier1bbd8052013-06-07 11:02:34 +01001055 pop x0, xzr // Restore PAR_EL1 from the stack
1056 msr par_el1, x0
Marc Zyngier55c74012012-12-10 16:40:18 +00001057 tbnz x3, #0, 3f // Bail out if we failed the translation
1058 ubfx x3, x3, #12, #36 // Extract IPA
1059 lsl x3, x3, #4 // and present it like HPFAR
1060 b 2f
1061
10621: mrs x3, hpfar_el2
1063 mrs x2, far_el2
1064
10652: mrs x0, tpidr_el2
Victor Kamenskyba083d22014-06-12 09:30:09 -07001066 str w1, [x0, #VCPU_ESR_EL2]
Marc Zyngier55c74012012-12-10 16:40:18 +00001067 str x2, [x0, #VCPU_FAR_EL2]
1068 str x3, [x0, #VCPU_HPFAR_EL2]
1069
1070 mov x1, #ARM_EXCEPTION_TRAP
1071 b __kvm_vcpu_return
1072
1073 /*
1074 * Translation failed. Just return to the guest and
1075 * let it fault again. Another CPU is probably playing
1076 * behind our back.
1077 */
10783: pop x2, x3
1079 pop x0, x1
1080
1081 eret
1082
1083el1_irq:
1084 push x0, x1
1085 push x2, x3
1086 mrs x0, tpidr_el2
1087 mov x1, #ARM_EXCEPTION_IRQ
1088 b __kvm_vcpu_return
1089
1090 .ltorg
1091
1092 .align 11
1093
1094ENTRY(__kvm_hyp_vector)
1095 ventry el2t_sync_invalid // Synchronous EL2t
1096 ventry el2t_irq_invalid // IRQ EL2t
1097 ventry el2t_fiq_invalid // FIQ EL2t
1098 ventry el2t_error_invalid // Error EL2t
1099
1100 ventry el2h_sync_invalid // Synchronous EL2h
1101 ventry el2h_irq_invalid // IRQ EL2h
1102 ventry el2h_fiq_invalid // FIQ EL2h
1103 ventry el2h_error_invalid // Error EL2h
1104
1105 ventry el1_sync // Synchronous 64-bit EL1
1106 ventry el1_irq // IRQ 64-bit EL1
1107 ventry el1_fiq_invalid // FIQ 64-bit EL1
1108 ventry el1_error_invalid // Error 64-bit EL1
1109
1110 ventry el1_sync // Synchronous 32-bit EL1
1111 ventry el1_irq // IRQ 32-bit EL1
1112 ventry el1_fiq_invalid // FIQ 32-bit EL1
1113 ventry el1_error_invalid // Error 32-bit EL1
1114ENDPROC(__kvm_hyp_vector)
1115
Alex Bennée56c7f5e2015-07-07 17:29:56 +01001116
1117ENTRY(__kvm_get_mdcr_el2)
1118 mrs x0, mdcr_el2
1119 ret
1120ENDPROC(__kvm_get_mdcr_el2)
1121
Marc Zyngier55c74012012-12-10 16:40:18 +00001122 .popsection