blob: 18d6d5124397893dd978ab88a6fc7f4c3596fbdd [file] [log] [blame]
Marc Zyngier27b190b2013-02-06 19:54:04 +00001/*
2 * (not much of an) Emulation layer for 32bit guests.
3 *
4 * Copyright (C) 2012,2013 - ARM Ltd
5 * Author: Marc Zyngier <marc.zyngier@arm.com>
6 *
7 * based on arch/arm/kvm/emulate.c
8 * Copyright (C) 2012 - Virtual Open Systems and Columbia University
9 * Author: Christoffer Dall <c.dall@virtualopensystems.com>
10 *
11 * This program is free software: you can redistribute it and/or modify
12 * it under the terms of the GNU General Public License version 2 as
13 * published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope that it will be useful,
16 * but WITHOUT ANY WARRANTY; without even the implied warranty of
17 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
18 * GNU General Public License for more details.
19 *
20 * You should have received a copy of the GNU General Public License
21 * along with this program. If not, see <http://www.gnu.org/licenses/>.
22 */
23
Mark Rutland7a781ba2020-01-08 13:43:23 +000024#include <linux/bits.h>
Marc Zyngier27b190b2013-02-06 19:54:04 +000025#include <linux/kvm_host.h>
26#include <asm/kvm_emulate.h>
Marc Zyngier8cebe752016-09-06 09:28:44 +010027#include <asm/kvm_hyp.h>
Marc Zyngier27b190b2013-02-06 19:54:04 +000028
James Morsef8d4dff2020-01-21 12:33:56 +000029#define DFSR_FSC_EXTABT_LPAE 0x10
30#define DFSR_FSC_EXTABT_nLPAE 0x08
31#define DFSR_LPAE BIT(9)
32
Marc Zyngier27b190b2013-02-06 19:54:04 +000033/*
Marc Zyngier74a64a92017-10-29 02:18:09 +000034 * Table taken from ARMv8 ARM DDI0487B-B, table G1-10.
35 */
36static const u8 return_offsets[8][2] = {
37 [0] = { 0, 0 }, /* Reset, unused */
38 [1] = { 4, 2 }, /* Undefined */
39 [2] = { 0, 0 }, /* SVC, unused */
40 [3] = { 4, 4 }, /* Prefetch abort */
41 [4] = { 8, 8 }, /* Data abort */
42 [5] = { 0, 0 }, /* HVC, unused */
43 [6] = { 4, 4 }, /* IRQ, unused */
44 [7] = { 4, 4 }, /* FIQ, unused */
45};
46
Mark Rutland7a781ba2020-01-08 13:43:23 +000047/*
48 * When an exception is taken, most CPSR fields are left unchanged in the
49 * handler. However, some are explicitly overridden (e.g. M[4:0]).
50 *
51 * The SPSR/SPSR_ELx layouts differ, and the below is intended to work with
52 * either format. Note: SPSR.J bit doesn't exist in SPSR_ELx, but this bit was
53 * obsoleted by the ARMv7 virtualization extensions and is RES0.
54 *
55 * For the SPSR layout seen from AArch32, see:
56 * - ARM DDI 0406C.d, page B1-1148
57 * - ARM DDI 0487E.a, page G8-6264
58 *
59 * For the SPSR_ELx layout for AArch32 seen from AArch64, see:
60 * - ARM DDI 0487E.a, page C5-426
61 *
62 * Here we manipulate the fields in order of the AArch32 SPSR_ELx layout, from
63 * MSB to LSB.
64 */
65static unsigned long get_except32_cpsr(struct kvm_vcpu *vcpu, u32 mode)
66{
67 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
68 unsigned long old, new;
69
70 old = *vcpu_cpsr(vcpu);
71 new = 0;
72
73 new |= (old & PSR_AA32_N_BIT);
74 new |= (old & PSR_AA32_Z_BIT);
75 new |= (old & PSR_AA32_C_BIT);
76 new |= (old & PSR_AA32_V_BIT);
77 new |= (old & PSR_AA32_Q_BIT);
78
79 // CPSR.IT[7:0] are set to zero upon any exception
80 // See ARM DDI 0487E.a, section G1.12.3
81 // See ARM DDI 0406C.d, section B1.8.3
82
83 new |= (old & PSR_AA32_DIT_BIT);
84
85 // CPSR.SSBS is set to SCTLR.DSSBS upon any exception
86 // See ARM DDI 0487E.a, page G8-6244
87 if (sctlr & BIT(31))
88 new |= PSR_AA32_SSBS_BIT;
89
90 // CPSR.PAN is unchanged unless SCTLR.SPAN == 0b0
91 // SCTLR.SPAN is RES1 when ARMv8.1-PAN is not implemented
92 // See ARM DDI 0487E.a, page G8-6246
93 new |= (old & PSR_AA32_PAN_BIT);
94 if (!(sctlr & BIT(23)))
95 new |= PSR_AA32_PAN_BIT;
96
97 // SS does not exist in AArch32, so ignore
98
99 // CPSR.IL is set to zero upon any exception
100 // See ARM DDI 0487E.a, page G1-5527
101
102 new |= (old & PSR_AA32_GE_MASK);
103
104 // CPSR.IT[7:0] are set to zero upon any exception
105 // See prior comment above
106
107 // CPSR.E is set to SCTLR.EE upon any exception
108 // See ARM DDI 0487E.a, page G8-6245
109 // See ARM DDI 0406C.d, page B4-1701
110 if (sctlr & BIT(25))
111 new |= PSR_AA32_E_BIT;
112
113 // CPSR.A is unchanged upon an exception to Undefined, Supervisor
114 // CPSR.A is set upon an exception to other modes
115 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
116 // See ARM DDI 0406C.d, page B1-1182
117 new |= (old & PSR_AA32_A_BIT);
118 if (mode != PSR_AA32_MODE_UND && mode != PSR_AA32_MODE_SVC)
119 new |= PSR_AA32_A_BIT;
120
121 // CPSR.I is set upon any exception
122 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
123 // See ARM DDI 0406C.d, page B1-1182
124 new |= PSR_AA32_I_BIT;
125
126 // CPSR.F is set upon an exception to FIQ
127 // CPSR.F is unchanged upon an exception to other modes
128 // See ARM DDI 0487E.a, pages G1-5515 to G1-5516
129 // See ARM DDI 0406C.d, page B1-1182
130 new |= (old & PSR_AA32_F_BIT);
131 if (mode == PSR_AA32_MODE_FIQ)
132 new |= PSR_AA32_F_BIT;
133
134 // CPSR.T is set to SCTLR.TE upon any exception
135 // See ARM DDI 0487E.a, page G8-5514
136 // See ARM DDI 0406C.d, page B1-1181
137 if (sctlr & BIT(30))
138 new |= PSR_AA32_T_BIT;
139
140 new |= mode;
141
142 return new;
143}
144
Marc Zyngier74a64a92017-10-29 02:18:09 +0000145static void prepare_fault32(struct kvm_vcpu *vcpu, u32 mode, u32 vect_offset)
146{
Mark Rutlandc1ed7342020-01-08 13:43:24 +0000147 unsigned long spsr = *vcpu_cpsr(vcpu);
148 bool is_thumb = (spsr & PSR_AA32_T_BIT);
Marc Zyngier74a64a92017-10-29 02:18:09 +0000149 u32 return_offset = return_offsets[vect_offset >> 2][is_thumb];
150 u32 sctlr = vcpu_cp15(vcpu, c1_SCTLR);
151
Mark Rutland7a781ba2020-01-08 13:43:23 +0000152 *vcpu_cpsr(vcpu) = get_except32_cpsr(vcpu, mode);
Marc Zyngier74a64a92017-10-29 02:18:09 +0000153
154 /* Note: These now point to the banked copies */
Mark Rutlandc1ed7342020-01-08 13:43:24 +0000155 vcpu_write_spsr(vcpu, host_spsr_to_spsr32(spsr));
Marc Zyngier74a64a92017-10-29 02:18:09 +0000156 *vcpu_reg32(vcpu, 14) = *vcpu_pc(vcpu) + return_offset;
157
158 /* Branch to exception vector */
159 if (sctlr & (1 << 13))
160 vect_offset += 0xffff0000;
161 else /* always have security exceptions */
162 vect_offset += vcpu_cp15(vcpu, c12_VBAR);
163
164 *vcpu_pc(vcpu) = vect_offset;
165}
166
167void kvm_inject_undef32(struct kvm_vcpu *vcpu)
168{
Mark Rutland256c0962018-07-05 15:16:53 +0100169 prepare_fault32(vcpu, PSR_AA32_MODE_UND, 4);
Marc Zyngier74a64a92017-10-29 02:18:09 +0000170}
171
172/*
173 * Modelled after TakeDataAbortException() and TakePrefetchAbortException
174 * pseudocode.
175 */
176static void inject_abt32(struct kvm_vcpu *vcpu, bool is_pabt,
177 unsigned long addr)
178{
179 u32 vect_offset;
180 u32 *far, *fsr;
181 bool is_lpae;
182
183 if (is_pabt) {
184 vect_offset = 12;
185 far = &vcpu_cp15(vcpu, c6_IFAR);
186 fsr = &vcpu_cp15(vcpu, c5_IFSR);
187 } else { /* !iabt */
188 vect_offset = 16;
189 far = &vcpu_cp15(vcpu, c6_DFAR);
190 fsr = &vcpu_cp15(vcpu, c5_DFSR);
191 }
192
Mark Rutland7a781ba2020-01-08 13:43:23 +0000193 prepare_fault32(vcpu, PSR_AA32_MODE_ABT, vect_offset);
Marc Zyngier74a64a92017-10-29 02:18:09 +0000194
195 *far = addr;
196
197 /* Give the guest an IMPLEMENTATION DEFINED exception */
198 is_lpae = (vcpu_cp15(vcpu, c2_TTBCR) >> 31);
James Morse8ea83322020-01-21 12:33:55 +0000199 if (is_lpae) {
James Morsef8d4dff2020-01-21 12:33:56 +0000200 *fsr = DFSR_LPAE | DFSR_FSC_EXTABT_LPAE;
James Morse8ea83322020-01-21 12:33:55 +0000201 } else {
James Morsef8d4dff2020-01-21 12:33:56 +0000202 /* no need to shuffle FS[4] into DFSR[10] as its 0 */
203 *fsr = DFSR_FSC_EXTABT_nLPAE;
James Morse8ea83322020-01-21 12:33:55 +0000204 }
Marc Zyngier74a64a92017-10-29 02:18:09 +0000205}
206
207void kvm_inject_dabt32(struct kvm_vcpu *vcpu, unsigned long addr)
208{
209 inject_abt32(vcpu, false, addr);
210}
211
212void kvm_inject_pabt32(struct kvm_vcpu *vcpu, unsigned long addr)
213{
214 inject_abt32(vcpu, true, addr);
215}