blob: ff9acd1b027b3273f1e5e57493f1c69c9c42c9ef [file] [log] [blame]
Christoffer Dall749cf76c2013-01-20 18:28:06 -05001/*
2 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
3 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
4 *
5 * This program is free software; you can redistribute it and/or modify
6 * it under the terms of the GNU General Public License, version 2, as
7 * published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope that it will be useful,
10 * but WITHOUT ANY WARRANTY; without even the implied warranty of
11 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12 * GNU General Public License for more details.
13 *
14 * You should have received a copy of the GNU General Public License
15 * along with this program; if not, write to the Free Software
16 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
17 */
18
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050019#include <linux/mm.h>
20#include <linux/kvm_host.h>
21#include <asm/kvm_arm.h>
Christoffer Dall749cf76c2013-01-20 18:28:06 -050022#include <asm/kvm_emulate.h>
Marc Zyngierc5997562012-12-08 18:13:18 +000023#include <asm/opcodes.h>
Christoffer Dall5b3e5e52013-01-20 18:28:09 -050024#include <trace/events/kvm.h>
25
26#include "trace.h"
Christoffer Dall749cf76c2013-01-20 18:28:06 -050027
28#define VCPU_NR_MODES 6
29#define VCPU_REG_OFFSET_USR 0
30#define VCPU_REG_OFFSET_FIQ 1
31#define VCPU_REG_OFFSET_IRQ 2
32#define VCPU_REG_OFFSET_SVC 3
33#define VCPU_REG_OFFSET_ABT 4
34#define VCPU_REG_OFFSET_UND 5
35#define REG_OFFSET(_reg) \
36 (offsetof(struct kvm_regs, _reg) / sizeof(u32))
37
38#define USR_REG_OFFSET(_num) REG_OFFSET(usr_regs.uregs[_num])
39
40static const unsigned long vcpu_reg_offsets[VCPU_NR_MODES][15] = {
41 /* USR/SYS Registers */
42 [VCPU_REG_OFFSET_USR] = {
43 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
44 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
45 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
46 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
47 USR_REG_OFFSET(12), USR_REG_OFFSET(13), USR_REG_OFFSET(14),
48 },
49
50 /* FIQ Registers */
51 [VCPU_REG_OFFSET_FIQ] = {
52 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
53 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
54 USR_REG_OFFSET(6), USR_REG_OFFSET(7),
55 REG_OFFSET(fiq_regs[0]), /* r8 */
56 REG_OFFSET(fiq_regs[1]), /* r9 */
57 REG_OFFSET(fiq_regs[2]), /* r10 */
58 REG_OFFSET(fiq_regs[3]), /* r11 */
59 REG_OFFSET(fiq_regs[4]), /* r12 */
60 REG_OFFSET(fiq_regs[5]), /* r13 */
61 REG_OFFSET(fiq_regs[6]), /* r14 */
62 },
63
64 /* IRQ Registers */
65 [VCPU_REG_OFFSET_IRQ] = {
66 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
67 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
68 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
69 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
70 USR_REG_OFFSET(12),
71 REG_OFFSET(irq_regs[0]), /* r13 */
72 REG_OFFSET(irq_regs[1]), /* r14 */
73 },
74
75 /* SVC Registers */
76 [VCPU_REG_OFFSET_SVC] = {
77 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
78 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
79 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
80 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
81 USR_REG_OFFSET(12),
82 REG_OFFSET(svc_regs[0]), /* r13 */
83 REG_OFFSET(svc_regs[1]), /* r14 */
84 },
85
86 /* ABT Registers */
87 [VCPU_REG_OFFSET_ABT] = {
88 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
89 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
90 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
91 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
92 USR_REG_OFFSET(12),
93 REG_OFFSET(abt_regs[0]), /* r13 */
94 REG_OFFSET(abt_regs[1]), /* r14 */
95 },
96
97 /* UND Registers */
98 [VCPU_REG_OFFSET_UND] = {
99 USR_REG_OFFSET(0), USR_REG_OFFSET(1), USR_REG_OFFSET(2),
100 USR_REG_OFFSET(3), USR_REG_OFFSET(4), USR_REG_OFFSET(5),
101 USR_REG_OFFSET(6), USR_REG_OFFSET(7), USR_REG_OFFSET(8),
102 USR_REG_OFFSET(9), USR_REG_OFFSET(10), USR_REG_OFFSET(11),
103 USR_REG_OFFSET(12),
104 REG_OFFSET(und_regs[0]), /* r13 */
105 REG_OFFSET(und_regs[1]), /* r14 */
106 },
107};
108
109/*
110 * Return a pointer to the register number valid in the current mode of
111 * the virtual CPU.
112 */
Marc Zyngierdb730d82012-10-03 11:17:02 +0100113unsigned long *vcpu_reg(struct kvm_vcpu *vcpu, u8 reg_num)
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500114{
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000115 unsigned long *reg_array = (unsigned long *)&vcpu->arch.ctxt.gp_regs;
Marc Zyngierdb730d82012-10-03 11:17:02 +0100116 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500117
118 switch (mode) {
119 case USR_MODE...SVC_MODE:
120 mode &= ~MODE32_BIT; /* 0 ... 3 */
121 break;
122
123 case ABT_MODE:
124 mode = VCPU_REG_OFFSET_ABT;
125 break;
126
127 case UND_MODE:
128 mode = VCPU_REG_OFFSET_UND;
129 break;
130
131 case SYSTEM_MODE:
132 mode = VCPU_REG_OFFSET_USR;
133 break;
134
135 default:
136 BUG();
137 }
138
139 return reg_array + vcpu_reg_offsets[mode][reg_num];
140}
141
142/*
143 * Return the SPSR for the current mode of the virtual CPU.
144 */
Marc Zyngierdb730d82012-10-03 11:17:02 +0100145unsigned long *vcpu_spsr(struct kvm_vcpu *vcpu)
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500146{
Marc Zyngierdb730d82012-10-03 11:17:02 +0100147 unsigned long mode = *vcpu_cpsr(vcpu) & MODE_MASK;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500148 switch (mode) {
149 case SVC_MODE:
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000150 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_SVC_spsr;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500151 case ABT_MODE:
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000152 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_ABT_spsr;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500153 case UND_MODE:
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000154 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_UND_spsr;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500155 case IRQ_MODE:
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000156 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_IRQ_spsr;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500157 case FIQ_MODE:
Marc Zyngierc2a8dab5072016-01-03 11:26:01 +0000158 return &vcpu->arch.ctxt.gp_regs.KVM_ARM_FIQ_spsr;
Christoffer Dall749cf76c2013-01-20 18:28:06 -0500159 default:
160 BUG();
161 }
162}
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500163
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500164/******************************************************************************
165 * Inject exceptions into the guest
166 */
167
168static u32 exc_vector_base(struct kvm_vcpu *vcpu)
169{
Marc Zyngierfb32a522016-01-03 11:26:01 +0000170 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
171 u32 vbar = vcpu_cp15(vcpu, c12_VBAR);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500172
173 if (sctlr & SCTLR_V)
174 return 0xffff0000;
175 else /* always have security exceptions */
176 return vbar;
177}
178
Marc Zyngiere078ef82015-12-14 17:58:33 +0000179/*
180 * Switch to an exception mode, updating both CPSR and SPSR. Follow
181 * the logic described in AArch32.EnterMode() from the ARMv8 ARM.
182 */
183static void kvm_update_psr(struct kvm_vcpu *vcpu, unsigned long mode)
184{
185 unsigned long cpsr = *vcpu_cpsr(vcpu);
Marc Zyngierfb32a522016-01-03 11:26:01 +0000186 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
Marc Zyngiere078ef82015-12-14 17:58:33 +0000187
188 *vcpu_cpsr(vcpu) = (cpsr & ~MODE_MASK) | mode;
189
190 switch (mode) {
191 case FIQ_MODE:
192 *vcpu_cpsr(vcpu) |= PSR_F_BIT;
193 /* Fall through */
194 case ABT_MODE:
195 case IRQ_MODE:
196 *vcpu_cpsr(vcpu) |= PSR_A_BIT;
197 /* Fall through */
198 default:
199 *vcpu_cpsr(vcpu) |= PSR_I_BIT;
200 }
201
202 *vcpu_cpsr(vcpu) &= ~(PSR_IT_MASK | PSR_J_BIT | PSR_E_BIT | PSR_T_BIT);
203
204 if (sctlr & SCTLR_TE)
205 *vcpu_cpsr(vcpu) |= PSR_T_BIT;
206 if (sctlr & SCTLR_EE)
207 *vcpu_cpsr(vcpu) |= PSR_E_BIT;
208
209 /* Note: These now point to the mode banked copies */
210 *vcpu_spsr(vcpu) = cpsr;
211}
212
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500213/**
214 * kvm_inject_undefined - inject an undefined exception into the guest
215 * @vcpu: The VCPU to receive the undefined exception
216 *
217 * It is assumed that this code is called from the VCPU thread and that the
218 * VCPU therefore is not currently executing guest code.
219 *
220 * Modelled after TakeUndefInstrException() pseudocode.
221 */
222void kvm_inject_undefined(struct kvm_vcpu *vcpu)
223{
Marc Zyngierdb730d82012-10-03 11:17:02 +0100224 unsigned long cpsr = *vcpu_cpsr(vcpu);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500225 bool is_thumb = (cpsr & PSR_T_BIT);
226 u32 vect_offset = 4;
227 u32 return_offset = (is_thumb) ? 2 : 4;
228
Marc Zyngiere078ef82015-12-14 17:58:33 +0000229 kvm_update_psr(vcpu, UND_MODE);
230 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) - return_offset;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500231
232 /* Branch to exception vector */
233 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
234}
235
236/*
237 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
238 * pseudocode.
239 */
240static void inject_abt(struct kvm_vcpu *vcpu, bool is_pabt, unsigned long addr)
241{
Marc Zyngierdb730d82012-10-03 11:17:02 +0100242 unsigned long cpsr = *vcpu_cpsr(vcpu);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500243 bool is_thumb = (cpsr & PSR_T_BIT);
244 u32 vect_offset;
245 u32 return_offset = (is_thumb) ? 4 : 0;
246 bool is_lpae;
247
Marc Zyngiere078ef82015-12-14 17:58:33 +0000248 kvm_update_psr(vcpu, ABT_MODE);
249 *vcpu_reg(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500250
251 if (is_pabt)
252 vect_offset = 12;
253 else
254 vect_offset = 16;
255
256 /* Branch to exception vector */
257 *vcpu_pc(vcpu) = exc_vector_base(vcpu) + vect_offset;
258
259 if (is_pabt) {
Anup Patelb373e492013-09-11 18:34:22 +0530260 /* Set IFAR and IFSR */
Marc Zyngierfb32a522016-01-03 11:26:01 +0000261 vcpu_cp15(vcpu, c6_IFAR) = addr;
262 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500263 /* Always give debug fault for now - should give guest a clue */
264 if (is_lpae)
Marc Zyngierfb32a522016-01-03 11:26:01 +0000265 vcpu_cp15(vcpu, c5_IFSR) = 1 << 9 | 0x22;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500266 else
Marc Zyngierfb32a522016-01-03 11:26:01 +0000267 vcpu_cp15(vcpu, c5_IFSR) = 2;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500268 } else { /* !iabt */
269 /* Set DFAR and DFSR */
Marc Zyngierfb32a522016-01-03 11:26:01 +0000270 vcpu_cp15(vcpu, c6_DFAR) = addr;
271 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500272 /* Always give debug fault for now - should give guest a clue */
273 if (is_lpae)
Marc Zyngierfb32a522016-01-03 11:26:01 +0000274 vcpu_cp15(vcpu, c5_DFSR) = 1 << 9 | 0x22;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500275 else
Marc Zyngierfb32a522016-01-03 11:26:01 +0000276 vcpu_cp15(vcpu, c5_DFSR) = 2;
Christoffer Dall5b3e5e52013-01-20 18:28:09 -0500277 }
278
279}
280
281/**
282 * kvm_inject_dabt - inject a data abort into the guest
283 * @vcpu: The VCPU to receive the undefined exception
284 * @addr: The address to report in the DFAR
285 *
286 * It is assumed that this code is called from the VCPU thread and that the
287 * VCPU therefore is not currently executing guest code.
288 */
289void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr)
290{
291 inject_abt(vcpu, false, addr);
292}
293
294/**
295 * kvm_inject_pabt - inject a prefetch abort into the guest
296 * @vcpu: The VCPU to receive the undefined exception
297 * @addr: The address to report in the DFAR
298 *
299 * It is assumed that this code is called from the VCPU thread and that the
300 * VCPU therefore is not currently executing guest code.
301 */
302void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr)
303{
304 inject_abt(vcpu, true, addr);
305}