blob: 4df8e7a58c6bad58d7729d277ebd12078133f7c6 [file] [log] [blame]
Marc Zyngier83a49792012-12-10 13:27:52 +00001/*
2 * Copyright (C) 2012,2013 - ARM Ltd
3 * Author: Marc Zyngier <marc.zyngier@arm.com>
4 *
5 * Derived from arch/arm/include/kvm_emulate.h
6 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
7 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
8 *
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of the GNU General Public License version 2 as
11 * published by the Free Software Foundation.
12 *
13 * This program is distributed in the hope that it will be useful,
14 * but WITHOUT ANY WARRANTY; without even the implied warranty of
15 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
16 * GNU General Public License for more details.
17 *
18 * You should have received a copy of the GNU General Public License
19 * along with this program. If not, see <http://www.gnu.org/licenses/>.
20 */
21
22#ifndef __ARM64_KVM_EMULATE_H__
23#define __ARM64_KVM_EMULATE_H__
24
25#include <linux/kvm_host.h>
Mark Rutlandc6d01a92014-11-24 13:59:30 +000026
27#include <asm/esr.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000028#include <asm/kvm_arm.h>
29#include <asm/kvm_mmio.h>
30#include <asm/ptrace.h>
Andre Przywara4429fc62014-06-02 15:37:13 +020031#include <asm/cputype.h>
Marc Zyngier83a49792012-12-10 13:27:52 +000032
Marc Zyngierb5476312013-02-06 19:40:29 +000033unsigned long *vcpu_reg32(const struct kvm_vcpu *vcpu, u8 reg_num);
34unsigned long *vcpu_spsr32(const struct kvm_vcpu *vcpu);
35
Marc Zyngier27b190b2013-02-06 19:54:04 +000036bool kvm_condition_valid32(const struct kvm_vcpu *vcpu);
37void kvm_skip_instr32(struct kvm_vcpu *vcpu, bool is_wide_instr);
38
Marc Zyngier83a49792012-12-10 13:27:52 +000039void kvm_inject_undefined(struct kvm_vcpu *vcpu);
40void kvm_inject_dabt(struct kvm_vcpu *vcpu, unsigned long addr);
41void kvm_inject_pabt(struct kvm_vcpu *vcpu, unsigned long addr);
42
Christoffer Dallb856a592014-10-16 17:21:16 +020043static inline void vcpu_reset_hcr(struct kvm_vcpu *vcpu)
44{
45 vcpu->arch.hcr_el2 = HCR_GUEST_FLAGS;
Marc Zyngier801f6772015-01-11 14:10:11 +010046 if (test_bit(KVM_ARM_VCPU_EL1_32BIT, vcpu->arch.features))
47 vcpu->arch.hcr_el2 &= ~HCR_RW;
Christoffer Dallb856a592014-10-16 17:21:16 +020048}
49
Marc Zyngier3c1e7162014-12-19 16:05:31 +000050static inline unsigned long vcpu_get_hcr(struct kvm_vcpu *vcpu)
51{
52 return vcpu->arch.hcr_el2;
53}
54
55static inline void vcpu_set_hcr(struct kvm_vcpu *vcpu, unsigned long hcr)
56{
57 vcpu->arch.hcr_el2 = hcr;
58}
59
Marc Zyngier83a49792012-12-10 13:27:52 +000060static inline unsigned long *vcpu_pc(const struct kvm_vcpu *vcpu)
61{
62 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pc;
63}
64
65static inline unsigned long *vcpu_elr_el1(const struct kvm_vcpu *vcpu)
66{
67 return (unsigned long *)&vcpu_gp_regs(vcpu)->elr_el1;
68}
69
70static inline unsigned long *vcpu_cpsr(const struct kvm_vcpu *vcpu)
71{
72 return (unsigned long *)&vcpu_gp_regs(vcpu)->regs.pstate;
73}
74
75static inline bool vcpu_mode_is_32bit(const struct kvm_vcpu *vcpu)
76{
Marc Zyngierb5476312013-02-06 19:40:29 +000077 return !!(*vcpu_cpsr(vcpu) & PSR_MODE32_BIT);
Marc Zyngier83a49792012-12-10 13:27:52 +000078}
79
80static inline bool kvm_condition_valid(const struct kvm_vcpu *vcpu)
81{
Marc Zyngier27b190b2013-02-06 19:54:04 +000082 if (vcpu_mode_is_32bit(vcpu))
83 return kvm_condition_valid32(vcpu);
84
85 return true;
Marc Zyngier83a49792012-12-10 13:27:52 +000086}
87
88static inline void kvm_skip_instr(struct kvm_vcpu *vcpu, bool is_wide_instr)
89{
Marc Zyngier27b190b2013-02-06 19:54:04 +000090 if (vcpu_mode_is_32bit(vcpu))
91 kvm_skip_instr32(vcpu, is_wide_instr);
92 else
93 *vcpu_pc(vcpu) += 4;
Marc Zyngier83a49792012-12-10 13:27:52 +000094}
95
96static inline void vcpu_set_thumb(struct kvm_vcpu *vcpu)
97{
Marc Zyngierb5476312013-02-06 19:40:29 +000098 *vcpu_cpsr(vcpu) |= COMPAT_PSR_T_BIT;
Marc Zyngier83a49792012-12-10 13:27:52 +000099}
100
Marc Zyngierc0f09632015-11-16 10:28:17 +0000101/*
Pavel Fedinf6be5632015-12-04 15:03:14 +0300102 * vcpu_get_reg and vcpu_set_reg should always be passed a register number
103 * coming from a read of ESR_EL2. Otherwise, it may give the wrong result on
104 * AArch32 with banked registers.
Marc Zyngierc0f09632015-11-16 10:28:17 +0000105 */
Pavel Fedinbc45a512015-12-04 15:03:11 +0300106static inline unsigned long vcpu_get_reg(const struct kvm_vcpu *vcpu,
107 u8 reg_num)
108{
109 return (reg_num == 31) ? 0 : vcpu_gp_regs(vcpu)->regs.regs[reg_num];
110}
111
112static inline void vcpu_set_reg(struct kvm_vcpu *vcpu, u8 reg_num,
113 unsigned long val)
114{
115 if (reg_num != 31)
116 vcpu_gp_regs(vcpu)->regs.regs[reg_num] = val;
117}
118
Marc Zyngier83a49792012-12-10 13:27:52 +0000119/* Get vcpu SPSR for current mode */
120static inline unsigned long *vcpu_spsr(const struct kvm_vcpu *vcpu)
121{
Marc Zyngierb5476312013-02-06 19:40:29 +0000122 if (vcpu_mode_is_32bit(vcpu))
123 return vcpu_spsr32(vcpu);
124
Marc Zyngier83a49792012-12-10 13:27:52 +0000125 return (unsigned long *)&vcpu_gp_regs(vcpu)->spsr[KVM_SPSR_EL1];
126}
127
128static inline bool vcpu_mode_priv(const struct kvm_vcpu *vcpu)
129{
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800130 u32 mode;
Marc Zyngier83a49792012-12-10 13:27:52 +0000131
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800132 if (vcpu_mode_is_32bit(vcpu)) {
133 mode = *vcpu_cpsr(vcpu) & COMPAT_PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000134 return mode > COMPAT_PSR_MODE_USR;
Shannon Zhao9586a2e2016-01-13 17:16:39 +0800135 }
136
137 mode = *vcpu_cpsr(vcpu) & PSR_MODE_MASK;
Marc Zyngierb5476312013-02-06 19:40:29 +0000138
Marc Zyngier83a49792012-12-10 13:27:52 +0000139 return mode != PSR_MODE_EL0t;
140}
141
142static inline u32 kvm_vcpu_get_hsr(const struct kvm_vcpu *vcpu)
143{
144 return vcpu->arch.fault.esr_el2;
145}
146
147static inline unsigned long kvm_vcpu_get_hfar(const struct kvm_vcpu *vcpu)
148{
149 return vcpu->arch.fault.far_el2;
150}
151
152static inline phys_addr_t kvm_vcpu_get_fault_ipa(const struct kvm_vcpu *vcpu)
153{
154 return ((phys_addr_t)vcpu->arch.fault.hpfar_el2 & HPFAR_MASK) << 8;
155}
156
Wei Huang0d97f8842015-01-12 11:53:36 -0500157static inline u32 kvm_vcpu_hvc_get_imm(const struct kvm_vcpu *vcpu)
158{
Paolo Bonzini1c6007d2015-01-23 13:39:51 +0100159 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_xVC_IMM_MASK;
Wei Huang0d97f8842015-01-12 11:53:36 -0500160}
161
Marc Zyngier83a49792012-12-10 13:27:52 +0000162static inline bool kvm_vcpu_dabt_isvalid(const struct kvm_vcpu *vcpu)
163{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000164 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_ISV);
Marc Zyngier83a49792012-12-10 13:27:52 +0000165}
166
167static inline bool kvm_vcpu_dabt_iswrite(const struct kvm_vcpu *vcpu)
168{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000169 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_WNR);
Marc Zyngier83a49792012-12-10 13:27:52 +0000170}
171
172static inline bool kvm_vcpu_dabt_issext(const struct kvm_vcpu *vcpu)
173{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000174 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SSE);
Marc Zyngier83a49792012-12-10 13:27:52 +0000175}
176
177static inline int kvm_vcpu_dabt_get_rd(const struct kvm_vcpu *vcpu)
178{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000179 return (kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SRT_MASK) >> ESR_ELx_SRT_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000180}
181
182static inline bool kvm_vcpu_dabt_isextabt(const struct kvm_vcpu *vcpu)
183{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000184 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_EA);
Marc Zyngier83a49792012-12-10 13:27:52 +0000185}
186
187static inline bool kvm_vcpu_dabt_iss1tw(const struct kvm_vcpu *vcpu)
188{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000189 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_S1PTW);
Marc Zyngier83a49792012-12-10 13:27:52 +0000190}
191
Marc Zyngier57c841f2016-01-29 15:01:28 +0000192static inline bool kvm_vcpu_dabt_is_cm(const struct kvm_vcpu *vcpu)
193{
194 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_CM);
195}
196
Marc Zyngier83a49792012-12-10 13:27:52 +0000197static inline int kvm_vcpu_dabt_get_as(const struct kvm_vcpu *vcpu)
198{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000199 return 1 << ((kvm_vcpu_get_hsr(vcpu) & ESR_ELx_SAS) >> ESR_ELx_SAS_SHIFT);
Marc Zyngier83a49792012-12-10 13:27:52 +0000200}
201
202/* This one is not specific to Data Abort */
203static inline bool kvm_vcpu_trap_il_is32bit(const struct kvm_vcpu *vcpu)
204{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000205 return !!(kvm_vcpu_get_hsr(vcpu) & ESR_ELx_IL);
Marc Zyngier83a49792012-12-10 13:27:52 +0000206}
207
208static inline u8 kvm_vcpu_trap_get_class(const struct kvm_vcpu *vcpu)
209{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000210 return kvm_vcpu_get_hsr(vcpu) >> ESR_ELx_EC_SHIFT;
Marc Zyngier83a49792012-12-10 13:27:52 +0000211}
212
213static inline bool kvm_vcpu_trap_is_iabt(const struct kvm_vcpu *vcpu)
214{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000215 return kvm_vcpu_trap_get_class(vcpu) == ESR_ELx_EC_IABT_LOW;
Marc Zyngier83a49792012-12-10 13:27:52 +0000216}
217
218static inline u8 kvm_vcpu_trap_get_fault(const struct kvm_vcpu *vcpu)
219{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000220 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC;
Christoffer Dall0496daa52014-09-26 12:29:34 +0200221}
222
223static inline u8 kvm_vcpu_trap_get_fault_type(const struct kvm_vcpu *vcpu)
224{
Mark Rutlandc6d01a92014-11-24 13:59:30 +0000225 return kvm_vcpu_get_hsr(vcpu) & ESR_ELx_FSC_TYPE;
Marc Zyngier83a49792012-12-10 13:27:52 +0000226}
227
Andre Przywara4429fc62014-06-02 15:37:13 +0200228static inline unsigned long kvm_vcpu_get_mpidr_aff(struct kvm_vcpu *vcpu)
Marc Zyngier79c64882013-10-18 18:19:03 +0100229{
Andre Przywara4429fc62014-06-02 15:37:13 +0200230 return vcpu_sys_reg(vcpu, MPIDR_EL1) & MPIDR_HWID_BITMASK;
Marc Zyngier79c64882013-10-18 18:19:03 +0100231}
232
Marc Zyngierce94fe92013-11-05 14:12:15 +0000233static inline void kvm_vcpu_set_be(struct kvm_vcpu *vcpu)
234{
235 if (vcpu_mode_is_32bit(vcpu))
236 *vcpu_cpsr(vcpu) |= COMPAT_PSR_E_BIT;
237 else
238 vcpu_sys_reg(vcpu, SCTLR_EL1) |= (1 << 25);
239}
240
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000241static inline bool kvm_vcpu_is_be(struct kvm_vcpu *vcpu)
242{
243 if (vcpu_mode_is_32bit(vcpu))
244 return !!(*vcpu_cpsr(vcpu) & COMPAT_PSR_E_BIT);
245
246 return !!(vcpu_sys_reg(vcpu, SCTLR_EL1) & (1 << 25));
247}
248
249static inline unsigned long vcpu_data_guest_to_host(struct kvm_vcpu *vcpu,
250 unsigned long data,
251 unsigned int len)
252{
253 if (kvm_vcpu_is_be(vcpu)) {
254 switch (len) {
255 case 1:
256 return data & 0xff;
257 case 2:
258 return be16_to_cpu(data & 0xffff);
259 case 4:
260 return be32_to_cpu(data & 0xffffffff);
261 default:
262 return be64_to_cpu(data);
263 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700264 } else {
265 switch (len) {
266 case 1:
267 return data & 0xff;
268 case 2:
269 return le16_to_cpu(data & 0xffff);
270 case 4:
271 return le32_to_cpu(data & 0xffffffff);
272 default:
273 return le64_to_cpu(data);
274 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000275 }
276
277 return data; /* Leave LE untouched */
278}
279
280static inline unsigned long vcpu_data_host_to_guest(struct kvm_vcpu *vcpu,
281 unsigned long data,
282 unsigned int len)
283{
284 if (kvm_vcpu_is_be(vcpu)) {
285 switch (len) {
286 case 1:
287 return data & 0xff;
288 case 2:
289 return cpu_to_be16(data & 0xffff);
290 case 4:
291 return cpu_to_be32(data & 0xffffffff);
292 default:
293 return cpu_to_be64(data);
294 }
Victor Kamenskyb3007082014-06-12 09:30:08 -0700295 } else {
296 switch (len) {
297 case 1:
298 return data & 0xff;
299 case 2:
300 return cpu_to_le16(data & 0xffff);
301 case 4:
302 return cpu_to_le32(data & 0xffffffff);
303 default:
304 return cpu_to_le64(data);
305 }
Marc Zyngier6d89d2d2013-02-12 12:40:22 +0000306 }
307
308 return data; /* Leave LE untouched */
309}
310
Marc Zyngier83a49792012-12-10 13:27:52 +0000311#endif /* __ARM64_KVM_EMULATE_H__ */