blob: fdbc695038dcc8cb172ece3ff8b5aaae1eebc9ed [file] [log] [blame]
Alexander Grafc215c6e2009-10-30 05:47:14 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/kvm_ppc.h>
21#include <asm/disassemble.h>
22#include <asm/kvm_book3s.h>
23#include <asm/reg.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000024#include <asm/switch_to.h>
Paul Mackerrasb0a94d42012-11-04 18:15:43 +000025#include <asm/time.h>
Simon Guo57063402018-05-23 15:02:01 +080026#include <asm/tm.h>
Thomas Huth5358a962015-05-22 09:25:02 +020027#include "book3s.h"
Simon Guo533082a2018-05-23 15:02:00 +080028#include <asm/asm-prototypes.h>
Alexander Grafc215c6e2009-10-30 05:47:14 +000029
30#define OP_19_XOP_RFID 18
31#define OP_19_XOP_RFI 50
32
33#define OP_31_XOP_MFMSR 83
34#define OP_31_XOP_MTMSR 146
35#define OP_31_XOP_MTMSRD 178
Alexander Graf71db4082010-02-19 11:00:37 +010036#define OP_31_XOP_MTSR 210
Alexander Grafc215c6e2009-10-30 05:47:14 +000037#define OP_31_XOP_MTSRIN 242
38#define OP_31_XOP_TLBIEL 274
39#define OP_31_XOP_TLBIE 306
Alexander Graf50c7bb82012-12-14 23:42:05 +010040/* Opcode is officially reserved, reuse it as sc 1 when sc 1 doesn't trap */
41#define OP_31_XOP_FAKE_SC1 308
Alexander Grafc215c6e2009-10-30 05:47:14 +000042#define OP_31_XOP_SLBMTE 402
43#define OP_31_XOP_SLBIE 434
44#define OP_31_XOP_SLBIA 498
Alexander Grafc6648762010-03-24 21:48:24 +010045#define OP_31_XOP_MFSR 595
Alexander Grafc215c6e2009-10-30 05:47:14 +000046#define OP_31_XOP_MFSRIN 659
Alexander Grafbd7cdbb2010-03-24 21:48:33 +010047#define OP_31_XOP_DCBA 758
Alexander Grafc215c6e2009-10-30 05:47:14 +000048#define OP_31_XOP_SLBMFEV 851
49#define OP_31_XOP_EIOIO 854
50#define OP_31_XOP_SLBMFEE 915
51
Simon Guo57063402018-05-23 15:02:01 +080052#define OP_31_XOP_TBEGIN 654
Simon Guo26798f82018-05-23 15:02:05 +080053#define OP_31_XOP_TABORT 910
Simon Guo57063402018-05-23 15:02:01 +080054
Simon Guo03c81682018-05-23 15:02:03 +080055#define OP_31_XOP_TRECLAIM 942
Simon Guoe32c53d2018-05-23 15:02:04 +080056#define OP_31_XOP_TRCHKPT 1006
Simon Guo03c81682018-05-23 15:02:03 +080057
Alexander Grafc215c6e2009-10-30 05:47:14 +000058/* DCBZ is actually 1014, but we patch it to 1010 so we get a trap */
59#define OP_31_XOP_DCBZ 1010
60
Alexander Grafca7f4202010-03-24 21:48:28 +010061#define OP_LFS 48
62#define OP_LFD 50
63#define OP_STFS 52
64#define OP_STFD 54
65
Alexander Grafd6d549b2010-02-19 11:00:33 +010066#define SPRN_GQR0 912
67#define SPRN_GQR1 913
68#define SPRN_GQR2 914
69#define SPRN_GQR3 915
70#define SPRN_GQR4 916
71#define SPRN_GQR5 917
72#define SPRN_GQR6 918
73#define SPRN_GQR7 919
74
Alexander Graf07b09072010-04-16 00:11:53 +020075/* Book3S_32 defines mfsrin(v) - but that messes up our abstract
76 * function pointers, so let's just disable the define. */
77#undef mfsrin
78
Alexander Graf317a8fa2011-08-08 16:07:16 +020079enum priv_level {
80 PRIV_PROBLEM = 0,
81 PRIV_SUPER = 1,
82 PRIV_HYPER = 2,
83};
84
85static bool spr_allowed(struct kvm_vcpu *vcpu, enum priv_level level)
86{
87 /* PAPR VMs only access supervisor SPRs */
88 if (vcpu->arch.papr_enabled && (level > PRIV_SUPER))
89 return false;
90
91 /* Limit user space to its own small SPR set */
Alexander Graf5deb8e72014-04-24 13:46:24 +020092 if ((kvmppc_get_msr(vcpu) & MSR_PR) && level > PRIV_PROBLEM)
Alexander Graf317a8fa2011-08-08 16:07:16 +020093 return false;
94
95 return true;
96}
97
Simon Guode7ad932018-05-23 15:01:56 +080098#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
99static inline void kvmppc_copyto_vcpu_tm(struct kvm_vcpu *vcpu)
100{
101 memcpy(&vcpu->arch.gpr_tm[0], &vcpu->arch.regs.gpr[0],
102 sizeof(vcpu->arch.gpr_tm));
103 memcpy(&vcpu->arch.fp_tm, &vcpu->arch.fp,
104 sizeof(struct thread_fp_state));
105 memcpy(&vcpu->arch.vr_tm, &vcpu->arch.vr,
106 sizeof(struct thread_vr_state));
107 vcpu->arch.ppr_tm = vcpu->arch.ppr;
108 vcpu->arch.dscr_tm = vcpu->arch.dscr;
109 vcpu->arch.amr_tm = vcpu->arch.amr;
110 vcpu->arch.ctr_tm = vcpu->arch.regs.ctr;
111 vcpu->arch.tar_tm = vcpu->arch.tar;
112 vcpu->arch.lr_tm = vcpu->arch.regs.link;
113 vcpu->arch.cr_tm = vcpu->arch.cr;
114 vcpu->arch.xer_tm = vcpu->arch.regs.xer;
115 vcpu->arch.vrsave_tm = vcpu->arch.vrsave;
116}
117
118static inline void kvmppc_copyfrom_vcpu_tm(struct kvm_vcpu *vcpu)
119{
120 memcpy(&vcpu->arch.regs.gpr[0], &vcpu->arch.gpr_tm[0],
121 sizeof(vcpu->arch.regs.gpr));
122 memcpy(&vcpu->arch.fp, &vcpu->arch.fp_tm,
123 sizeof(struct thread_fp_state));
124 memcpy(&vcpu->arch.vr, &vcpu->arch.vr_tm,
125 sizeof(struct thread_vr_state));
126 vcpu->arch.ppr = vcpu->arch.ppr_tm;
127 vcpu->arch.dscr = vcpu->arch.dscr_tm;
128 vcpu->arch.amr = vcpu->arch.amr_tm;
129 vcpu->arch.regs.ctr = vcpu->arch.ctr_tm;
130 vcpu->arch.tar = vcpu->arch.tar_tm;
131 vcpu->arch.regs.link = vcpu->arch.lr_tm;
132 vcpu->arch.cr = vcpu->arch.cr_tm;
133 vcpu->arch.regs.xer = vcpu->arch.xer_tm;
134 vcpu->arch.vrsave = vcpu->arch.vrsave_tm;
135}
136
Simon Guo03c81682018-05-23 15:02:03 +0800137static void kvmppc_emulate_treclaim(struct kvm_vcpu *vcpu, int ra_val)
138{
139 unsigned long guest_msr = kvmppc_get_msr(vcpu);
140 int fc_val = ra_val ? ra_val : 1;
141
142 /* CR0 = 0 | MSR[TS] | 0 */
143 vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
144 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
145 << CR0_SHIFT);
146
147 preempt_disable();
148 kvmppc_save_tm_pr(vcpu);
149 kvmppc_copyfrom_vcpu_tm(vcpu);
150
151 tm_enable();
152 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
153 /* failure recording depends on Failure Summary bit */
154 if (!(vcpu->arch.texasr & TEXASR_FS)) {
155 vcpu->arch.texasr &= ~TEXASR_FC;
156 vcpu->arch.texasr |= ((u64)fc_val << TEXASR_FC_LG);
157
158 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
159 if (kvmppc_get_msr(vcpu) & MSR_PR)
160 vcpu->arch.texasr |= TEXASR_PR;
161
162 if (kvmppc_get_msr(vcpu) & MSR_HV)
163 vcpu->arch.texasr |= TEXASR_HV;
164
165 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
166 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
167 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
168 }
169 tm_disable();
170 /*
171 * treclaim need quit to non-transactional state.
172 */
173 guest_msr &= ~(MSR_TS_MASK);
174 kvmppc_set_msr(vcpu, guest_msr);
175 preempt_enable();
Simon Guo7284ca82018-05-23 15:02:07 +0800176
177 if (vcpu->arch.shadow_fscr & FSCR_TAR)
178 mtspr(SPRN_TAR, vcpu->arch.tar);
Simon Guo03c81682018-05-23 15:02:03 +0800179}
Simon Guoe32c53d2018-05-23 15:02:04 +0800180
181static void kvmppc_emulate_trchkpt(struct kvm_vcpu *vcpu)
182{
183 unsigned long guest_msr = kvmppc_get_msr(vcpu);
184
185 preempt_disable();
186 /*
187 * need flush FP/VEC/VSX to vcpu save area before
188 * copy.
189 */
190 kvmppc_giveup_ext(vcpu, MSR_VSX);
Simon Guo7284ca82018-05-23 15:02:07 +0800191 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guoe32c53d2018-05-23 15:02:04 +0800192 kvmppc_copyto_vcpu_tm(vcpu);
193 kvmppc_save_tm_sprs(vcpu);
194
195 /*
196 * as a result of trecheckpoint. set TS to suspended.
197 */
198 guest_msr &= ~(MSR_TS_MASK);
199 guest_msr |= MSR_TS_S;
200 kvmppc_set_msr(vcpu, guest_msr);
201 kvmppc_restore_tm_pr(vcpu);
202 preempt_enable();
203}
Simon Guo26798f82018-05-23 15:02:05 +0800204
205/* emulate tabort. at guest privilege state */
Simon Guo68ab07b2018-05-23 15:02:06 +0800206void kvmppc_emulate_tabort(struct kvm_vcpu *vcpu, int ra_val)
Simon Guo26798f82018-05-23 15:02:05 +0800207{
208 /* currently we only emulate tabort. but no emulation of other
209 * tabort variants since there is no kernel usage of them at
210 * present.
211 */
212 unsigned long guest_msr = kvmppc_get_msr(vcpu);
213
214 preempt_disable();
215 tm_enable();
216 tm_abort(ra_val);
217
218 /* CR0 = 0 | MSR[TS] | 0 */
219 vcpu->arch.cr = (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)) |
220 (((guest_msr & MSR_TS_MASK) >> (MSR_TS_S_LG - 1))
221 << CR0_SHIFT);
222
223 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
224 /* failure recording depends on Failure Summary bit,
225 * and tabort will be treated as nops in non-transactional
226 * state.
227 */
228 if (!(vcpu->arch.texasr & TEXASR_FS) &&
229 MSR_TM_ACTIVE(guest_msr)) {
230 vcpu->arch.texasr &= ~(TEXASR_PR | TEXASR_HV);
231 if (guest_msr & MSR_PR)
232 vcpu->arch.texasr |= TEXASR_PR;
233
234 if (guest_msr & MSR_HV)
235 vcpu->arch.texasr |= TEXASR_HV;
236
237 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
238 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
239 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
240 }
241 tm_disable();
242 preempt_enable();
243}
244
Simon Guode7ad932018-05-23 15:01:56 +0800245#endif
246
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530247int kvmppc_core_emulate_op_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
248 unsigned int inst, int *advance)
Alexander Grafc215c6e2009-10-30 05:47:14 +0000249{
250 int emulated = EMULATE_DONE;
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200251 int rt = get_rt(inst);
252 int rs = get_rs(inst);
253 int ra = get_ra(inst);
254 int rb = get_rb(inst);
Alexander Graf42188362014-05-13 17:05:51 +0200255 u32 inst_sc = 0x44000002;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000256
257 switch (get_op(inst)) {
Alexander Graf42188362014-05-13 17:05:51 +0200258 case 0:
259 emulated = EMULATE_FAIL;
260 if ((kvmppc_get_msr(vcpu) & MSR_LE) &&
261 (inst == swab32(inst_sc))) {
262 /*
263 * This is the byte reversed syscall instruction of our
264 * hypercall handler. Early versions of LE Linux didn't
265 * swap the instructions correctly and ended up in
266 * illegal instructions.
267 * Just always fail hypercalls on these broken systems.
268 */
269 kvmppc_set_gpr(vcpu, 3, EV_UNIMPLEMENTED);
270 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
271 emulated = EMULATE_DONE;
272 }
273 break;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000274 case 19:
275 switch (get_xop(inst)) {
276 case OP_19_XOP_RFID:
Simon Guo401a89e2018-05-23 15:01:54 +0800277 case OP_19_XOP_RFI: {
278 unsigned long srr1 = kvmppc_get_srr1(vcpu);
279#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
280 unsigned long cur_msr = kvmppc_get_msr(vcpu);
281
282 /*
283 * add rules to fit in ISA specification regarding TM
284 * state transistion in TM disable/Suspended state,
285 * and target TM state is TM inactive(00) state. (the
286 * change should be suppressed).
287 */
288 if (((cur_msr & MSR_TM) == 0) &&
289 ((srr1 & MSR_TM) == 0) &&
290 MSR_TM_SUSPENDED(cur_msr) &&
291 !MSR_TM_ACTIVE(srr1))
292 srr1 |= MSR_TS_S;
293#endif
Alexander Graf5deb8e72014-04-24 13:46:24 +0200294 kvmppc_set_pc(vcpu, kvmppc_get_srr0(vcpu));
Simon Guo401a89e2018-05-23 15:01:54 +0800295 kvmppc_set_msr(vcpu, srr1);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000296 *advance = 0;
297 break;
Simon Guo401a89e2018-05-23 15:01:54 +0800298 }
Alexander Grafc215c6e2009-10-30 05:47:14 +0000299
300 default:
301 emulated = EMULATE_FAIL;
302 break;
303 }
304 break;
305 case 31:
306 switch (get_xop(inst)) {
307 case OP_31_XOP_MFMSR:
Alexander Graf5deb8e72014-04-24 13:46:24 +0200308 kvmppc_set_gpr(vcpu, rt, kvmppc_get_msr(vcpu));
Alexander Grafc215c6e2009-10-30 05:47:14 +0000309 break;
310 case OP_31_XOP_MTMSRD:
311 {
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200312 ulong rs_val = kvmppc_get_gpr(vcpu, rs);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000313 if (inst & 0x10000) {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200314 ulong new_msr = kvmppc_get_msr(vcpu);
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200315 new_msr &= ~(MSR_RI | MSR_EE);
316 new_msr |= rs_val & (MSR_RI | MSR_EE);
Alexander Graf5deb8e72014-04-24 13:46:24 +0200317 kvmppc_set_msr_fast(vcpu, new_msr);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000318 } else
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200319 kvmppc_set_msr(vcpu, rs_val);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000320 break;
321 }
322 case OP_31_XOP_MTMSR:
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200323 kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
Alexander Grafc215c6e2009-10-30 05:47:14 +0000324 break;
Alexander Grafc6648762010-03-24 21:48:24 +0100325 case OP_31_XOP_MFSR:
326 {
327 int srnum;
328
329 srnum = kvmppc_get_field(inst, 12 + 32, 15 + 32);
330 if (vcpu->arch.mmu.mfsrin) {
331 u32 sr;
332 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200333 kvmppc_set_gpr(vcpu, rt, sr);
Alexander Grafc6648762010-03-24 21:48:24 +0100334 }
335 break;
336 }
Alexander Grafc215c6e2009-10-30 05:47:14 +0000337 case OP_31_XOP_MFSRIN:
338 {
339 int srnum;
340
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200341 srnum = (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000342 if (vcpu->arch.mmu.mfsrin) {
343 u32 sr;
344 sr = vcpu->arch.mmu.mfsrin(vcpu, srnum);
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200345 kvmppc_set_gpr(vcpu, rt, sr);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000346 }
347 break;
348 }
Alexander Graf71db4082010-02-19 11:00:37 +0100349 case OP_31_XOP_MTSR:
350 vcpu->arch.mmu.mtsrin(vcpu,
351 (inst >> 16) & 0xf,
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200352 kvmppc_get_gpr(vcpu, rs));
Alexander Graf71db4082010-02-19 11:00:37 +0100353 break;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000354 case OP_31_XOP_MTSRIN:
355 vcpu->arch.mmu.mtsrin(vcpu,
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200356 (kvmppc_get_gpr(vcpu, rb) >> 28) & 0xf,
357 kvmppc_get_gpr(vcpu, rs));
Alexander Grafc215c6e2009-10-30 05:47:14 +0000358 break;
359 case OP_31_XOP_TLBIE:
360 case OP_31_XOP_TLBIEL:
361 {
362 bool large = (inst & 0x00200000) ? true : false;
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200363 ulong addr = kvmppc_get_gpr(vcpu, rb);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000364 vcpu->arch.mmu.tlbie(vcpu, addr, large);
365 break;
366 }
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530367#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf50c7bb82012-12-14 23:42:05 +0100368 case OP_31_XOP_FAKE_SC1:
369 {
370 /* SC 1 papr hypercalls */
371 ulong cmd = kvmppc_get_gpr(vcpu, 3);
372 int i;
373
Alexander Graf5deb8e72014-04-24 13:46:24 +0200374 if ((kvmppc_get_msr(vcpu) & MSR_PR) ||
Alexander Graf50c7bb82012-12-14 23:42:05 +0100375 !vcpu->arch.papr_enabled) {
376 emulated = EMULATE_FAIL;
377 break;
378 }
379
380 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE)
381 break;
382
383 run->papr_hcall.nr = cmd;
384 for (i = 0; i < 9; ++i) {
385 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
386 run->papr_hcall.args[i] = gpr;
387 }
388
Bharat Bhushan0f47f9b2013-04-08 00:32:14 +0000389 run->exit_reason = KVM_EXIT_PAPR_HCALL;
390 vcpu->arch.hcall_needed = 1;
Bharat Bhushanc402a3f2013-04-08 00:32:13 +0000391 emulated = EMULATE_EXIT_USER;
Alexander Graf50c7bb82012-12-14 23:42:05 +0100392 break;
393 }
394#endif
Alexander Grafc215c6e2009-10-30 05:47:14 +0000395 case OP_31_XOP_EIOIO:
396 break;
397 case OP_31_XOP_SLBMTE:
398 if (!vcpu->arch.mmu.slbmte)
399 return EMULATE_FAIL;
400
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100401 vcpu->arch.mmu.slbmte(vcpu,
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200402 kvmppc_get_gpr(vcpu, rs),
403 kvmppc_get_gpr(vcpu, rb));
Alexander Grafc215c6e2009-10-30 05:47:14 +0000404 break;
405 case OP_31_XOP_SLBIE:
406 if (!vcpu->arch.mmu.slbie)
407 return EMULATE_FAIL;
408
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100409 vcpu->arch.mmu.slbie(vcpu,
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200410 kvmppc_get_gpr(vcpu, rb));
Alexander Grafc215c6e2009-10-30 05:47:14 +0000411 break;
412 case OP_31_XOP_SLBIA:
413 if (!vcpu->arch.mmu.slbia)
414 return EMULATE_FAIL;
415
416 vcpu->arch.mmu.slbia(vcpu);
417 break;
418 case OP_31_XOP_SLBMFEE:
419 if (!vcpu->arch.mmu.slbmfee) {
420 emulated = EMULATE_FAIL;
421 } else {
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200422 ulong t, rb_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000423
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200424 rb_val = kvmppc_get_gpr(vcpu, rb);
425 t = vcpu->arch.mmu.slbmfee(vcpu, rb_val);
426 kvmppc_set_gpr(vcpu, rt, t);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000427 }
428 break;
429 case OP_31_XOP_SLBMFEV:
430 if (!vcpu->arch.mmu.slbmfev) {
431 emulated = EMULATE_FAIL;
432 } else {
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200433 ulong t, rb_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000434
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200435 rb_val = kvmppc_get_gpr(vcpu, rb);
436 t = vcpu->arch.mmu.slbmfev(vcpu, rb_val);
437 kvmppc_set_gpr(vcpu, rt, t);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000438 }
439 break;
Alexander Grafbd7cdbb2010-03-24 21:48:33 +0100440 case OP_31_XOP_DCBA:
441 /* Gets treated as NOP */
442 break;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000443 case OP_31_XOP_DCBZ:
444 {
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200445 ulong rb_val = kvmppc_get_gpr(vcpu, rb);
446 ulong ra_val = 0;
Alexander Graf5467a972010-02-19 11:00:38 +0100447 ulong addr, vaddr;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000448 u32 zeros[8] = { 0, 0, 0, 0, 0, 0, 0, 0 };
Alexander Graf9fb244a2010-03-24 21:48:32 +0100449 u32 dsisr;
450 int r;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000451
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200452 if (ra)
453 ra_val = kvmppc_get_gpr(vcpu, ra);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000454
Alexander Grafc46dc9a2012-05-04 14:01:33 +0200455 addr = (ra_val + rb_val) & ~31ULL;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200456 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Alexander Grafc215c6e2009-10-30 05:47:14 +0000457 addr &= 0xffffffff;
Alexander Graf5467a972010-02-19 11:00:38 +0100458 vaddr = addr;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000459
Alexander Graf9fb244a2010-03-24 21:48:32 +0100460 r = kvmppc_st(vcpu, &addr, 32, zeros, true);
461 if ((r == -ENOENT) || (r == -EPERM)) {
462 *advance = 0;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200463 kvmppc_set_dar(vcpu, vaddr);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000464 vcpu->arch.fault_dar = vaddr;
Alexander Graf9fb244a2010-03-24 21:48:32 +0100465
466 dsisr = DSISR_ISSTORE;
467 if (r == -ENOENT)
468 dsisr |= DSISR_NOHPTE;
469 else if (r == -EPERM)
470 dsisr |= DSISR_PROTFAULT;
471
Alexander Graf5deb8e72014-04-24 13:46:24 +0200472 kvmppc_set_dsisr(vcpu, dsisr);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000473 vcpu->arch.fault_dsisr = dsisr;
Alexander Graf9fb244a2010-03-24 21:48:32 +0100474
Alexander Grafc215c6e2009-10-30 05:47:14 +0000475 kvmppc_book3s_queue_irqprio(vcpu,
476 BOOK3S_INTERRUPT_DATA_STORAGE);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000477 }
478
479 break;
480 }
Simon Guo57063402018-05-23 15:02:01 +0800481#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
482 case OP_31_XOP_TBEGIN:
483 {
484 if (!cpu_has_feature(CPU_FTR_TM))
485 break;
486
487 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
488 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
489 emulated = EMULATE_AGAIN;
490 break;
491 }
492
493 if (!(kvmppc_get_msr(vcpu) & MSR_PR)) {
494 preempt_disable();
495 vcpu->arch.cr = (CR0_TBEGIN_FAILURE |
496 (vcpu->arch.cr & ~(CR0_MASK << CR0_SHIFT)));
497
498 vcpu->arch.texasr = (TEXASR_FS | TEXASR_EXACT |
499 (((u64)(TM_CAUSE_EMULATE | TM_CAUSE_PERSISTENT))
500 << TEXASR_FC_LG));
501
502 if ((inst >> 21) & 0x1)
503 vcpu->arch.texasr |= TEXASR_ROT;
504
505 if (kvmppc_get_msr(vcpu) & MSR_HV)
506 vcpu->arch.texasr |= TEXASR_HV;
507
508 vcpu->arch.tfhar = kvmppc_get_pc(vcpu) + 4;
509 vcpu->arch.tfiar = kvmppc_get_pc(vcpu);
510
511 kvmppc_restore_tm_sprs(vcpu);
512 preempt_enable();
513 } else
514 emulated = EMULATE_FAIL;
515 break;
516 }
Simon Guo26798f82018-05-23 15:02:05 +0800517 case OP_31_XOP_TABORT:
518 {
519 ulong guest_msr = kvmppc_get_msr(vcpu);
520 unsigned long ra_val = 0;
521
522 if (!cpu_has_feature(CPU_FTR_TM))
523 break;
524
525 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
526 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
527 emulated = EMULATE_AGAIN;
528 break;
529 }
530
531 /* only emulate for privilege guest, since problem state
532 * guest can run with TM enabled and we don't expect to
533 * trap at here for that case.
534 */
535 WARN_ON(guest_msr & MSR_PR);
536
537 if (ra)
538 ra_val = kvmppc_get_gpr(vcpu, ra);
539
540 kvmppc_emulate_tabort(vcpu, ra_val);
541 break;
542 }
Simon Guo03c81682018-05-23 15:02:03 +0800543 case OP_31_XOP_TRECLAIM:
544 {
545 ulong guest_msr = kvmppc_get_msr(vcpu);
546 unsigned long ra_val = 0;
547
548 if (!cpu_has_feature(CPU_FTR_TM))
549 break;
550
551 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
552 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
553 emulated = EMULATE_AGAIN;
554 break;
555 }
556
557 /* generate interrupts based on priorities */
558 if (guest_msr & MSR_PR) {
559 /* Privileged Instruction type Program Interrupt */
560 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
561 emulated = EMULATE_AGAIN;
562 break;
563 }
564
565 if (!MSR_TM_ACTIVE(guest_msr)) {
566 /* TM bad thing interrupt */
567 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
568 emulated = EMULATE_AGAIN;
569 break;
570 }
571
572 if (ra)
573 ra_val = kvmppc_get_gpr(vcpu, ra);
574 kvmppc_emulate_treclaim(vcpu, ra_val);
575 break;
576 }
Simon Guoe32c53d2018-05-23 15:02:04 +0800577 case OP_31_XOP_TRCHKPT:
578 {
579 ulong guest_msr = kvmppc_get_msr(vcpu);
580 unsigned long texasr;
581
582 if (!cpu_has_feature(CPU_FTR_TM))
583 break;
584
585 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
586 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
587 emulated = EMULATE_AGAIN;
588 break;
589 }
590
591 /* generate interrupt based on priorities */
592 if (guest_msr & MSR_PR) {
593 /* Privileged Instruction type Program Intr */
594 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
595 emulated = EMULATE_AGAIN;
596 break;
597 }
598
599 tm_enable();
600 texasr = mfspr(SPRN_TEXASR);
601 tm_disable();
602
603 if (MSR_TM_ACTIVE(guest_msr) ||
604 !(texasr & (TEXASR_FS))) {
605 /* TM bad thing interrupt */
606 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
607 emulated = EMULATE_AGAIN;
608 break;
609 }
610
611 kvmppc_emulate_trchkpt(vcpu);
612 break;
613 }
Simon Guo57063402018-05-23 15:02:01 +0800614#endif
Alexander Grafc215c6e2009-10-30 05:47:14 +0000615 default:
616 emulated = EMULATE_FAIL;
617 }
618 break;
619 default:
620 emulated = EMULATE_FAIL;
621 }
622
Alexander Graf831317b2010-02-19 11:00:44 +0100623 if (emulated == EMULATE_FAIL)
624 emulated = kvmppc_emulate_paired_single(run, vcpu);
625
Alexander Grafc215c6e2009-10-30 05:47:14 +0000626 return emulated;
627}
628
Alexander Grafe15a1132009-11-30 03:02:02 +0000629void kvmppc_set_bat(struct kvm_vcpu *vcpu, struct kvmppc_bat *bat, bool upper,
630 u32 val)
631{
632 if (upper) {
633 /* Upper BAT */
634 u32 bl = (val >> 2) & 0x7ff;
635 bat->bepi_mask = (~bl << 17);
636 bat->bepi = val & 0xfffe0000;
637 bat->vs = (val & 2) ? 1 : 0;
638 bat->vp = (val & 1) ? 1 : 0;
639 bat->raw = (bat->raw & 0xffffffff00000000ULL) | val;
640 } else {
641 /* Lower BAT */
642 bat->brpn = val & 0xfffe0000;
643 bat->wimg = (val >> 3) & 0xf;
644 bat->pp = val & 3;
645 bat->raw = (bat->raw & 0x00000000ffffffffULL) | ((u64)val << 32);
646 }
647}
648
Alexander Grafc1c88e22010-08-02 23:23:04 +0200649static struct kvmppc_bat *kvmppc_find_bat(struct kvm_vcpu *vcpu, int sprn)
Alexander Grafc04a6952010-03-24 21:48:25 +0100650{
651 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
652 struct kvmppc_bat *bat;
653
654 switch (sprn) {
655 case SPRN_IBAT0U ... SPRN_IBAT3L:
656 bat = &vcpu_book3s->ibat[(sprn - SPRN_IBAT0U) / 2];
657 break;
658 case SPRN_IBAT4U ... SPRN_IBAT7L:
659 bat = &vcpu_book3s->ibat[4 + ((sprn - SPRN_IBAT4U) / 2)];
660 break;
661 case SPRN_DBAT0U ... SPRN_DBAT3L:
662 bat = &vcpu_book3s->dbat[(sprn - SPRN_DBAT0U) / 2];
663 break;
664 case SPRN_DBAT4U ... SPRN_DBAT7L:
665 bat = &vcpu_book3s->dbat[4 + ((sprn - SPRN_DBAT4U) / 2)];
666 break;
667 default:
668 BUG();
669 }
670
Alexander Grafc1c88e22010-08-02 23:23:04 +0200671 return bat;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000672}
673
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530674int kvmppc_core_emulate_mtspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong spr_val)
Alexander Grafc215c6e2009-10-30 05:47:14 +0000675{
676 int emulated = EMULATE_DONE;
677
678 switch (sprn) {
679 case SPRN_SDR1:
Alexander Graf317a8fa2011-08-08 16:07:16 +0200680 if (!spr_allowed(vcpu, PRIV_HYPER))
681 goto unprivileged;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100682 to_book3s(vcpu)->sdr1 = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000683 break;
684 case SPRN_DSISR:
Alexander Graf5deb8e72014-04-24 13:46:24 +0200685 kvmppc_set_dsisr(vcpu, spr_val);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000686 break;
687 case SPRN_DAR:
Alexander Graf5deb8e72014-04-24 13:46:24 +0200688 kvmppc_set_dar(vcpu, spr_val);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000689 break;
690 case SPRN_HIOR:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100691 to_book3s(vcpu)->hior = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000692 break;
693 case SPRN_IBAT0U ... SPRN_IBAT3L:
694 case SPRN_IBAT4U ... SPRN_IBAT7L:
695 case SPRN_DBAT0U ... SPRN_DBAT3L:
696 case SPRN_DBAT4U ... SPRN_DBAT7L:
Alexander Grafc1c88e22010-08-02 23:23:04 +0200697 {
698 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
699
700 kvmppc_set_bat(vcpu, bat, !(sprn % 2), (u32)spr_val);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000701 /* BAT writes happen so rarely that we're ok to flush
702 * everything here */
703 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Grafc04a6952010-03-24 21:48:25 +0100704 kvmppc_mmu_flush_segments(vcpu);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000705 break;
Alexander Grafc1c88e22010-08-02 23:23:04 +0200706 }
Alexander Grafc215c6e2009-10-30 05:47:14 +0000707 case SPRN_HID0:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100708 to_book3s(vcpu)->hid[0] = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000709 break;
710 case SPRN_HID1:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100711 to_book3s(vcpu)->hid[1] = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000712 break;
713 case SPRN_HID2:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100714 to_book3s(vcpu)->hid[2] = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000715 break;
Alexander Grafd6d549b2010-02-19 11:00:33 +0100716 case SPRN_HID2_GEKKO:
717 to_book3s(vcpu)->hid[2] = spr_val;
718 /* HID2.PSE controls paired single on gekko */
719 switch (vcpu->arch.pvr) {
720 case 0x00080200: /* lonestar 2.0 */
721 case 0x00088202: /* lonestar 2.2 */
722 case 0x70000100: /* gekko 1.0 */
723 case 0x00080100: /* gekko 2.0 */
724 case 0x00083203: /* gekko 2.3a */
725 case 0x00083213: /* gekko 2.3b */
726 case 0x00083204: /* gekko 2.4 */
727 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
Alexander Grafb83d4a92010-04-20 02:49:54 +0200728 case 0x00087200: /* broadway */
729 if (vcpu->arch.hflags & BOOK3S_HFLAG_NATIVE_PS) {
730 /* Native paired singles */
731 } else if (spr_val & (1 << 29)) { /* HID2.PSE */
Alexander Grafd6d549b2010-02-19 11:00:33 +0100732 vcpu->arch.hflags |= BOOK3S_HFLAG_PAIRED_SINGLE;
733 kvmppc_giveup_ext(vcpu, MSR_FP);
734 } else {
735 vcpu->arch.hflags &= ~BOOK3S_HFLAG_PAIRED_SINGLE;
736 }
737 break;
738 }
739 break;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000740 case SPRN_HID4:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100741 case SPRN_HID4_GEKKO:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100742 to_book3s(vcpu)->hid[4] = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000743 break;
744 case SPRN_HID5:
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100745 to_book3s(vcpu)->hid[5] = spr_val;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000746 /* guest HID5 set can change is_dcbz32 */
747 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
748 (mfmsr() & MSR_HV))
749 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
750 break;
Alexander Grafd6d549b2010-02-19 11:00:33 +0100751 case SPRN_GQR0:
752 case SPRN_GQR1:
753 case SPRN_GQR2:
754 case SPRN_GQR3:
755 case SPRN_GQR4:
756 case SPRN_GQR5:
757 case SPRN_GQR6:
758 case SPRN_GQR7:
759 to_book3s(vcpu)->gqr[sprn - SPRN_GQR0] = spr_val;
760 break;
Alexander Graf2e23f542014-04-29 13:36:21 +0200761#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf8e6afa32014-07-31 10:21:59 +0200762 case SPRN_FSCR:
763 kvmppc_set_fscr(vcpu, spr_val);
764 break;
Alexander Graf2e23f542014-04-29 13:36:21 +0200765 case SPRN_BESCR:
766 vcpu->arch.bescr = spr_val;
767 break;
768 case SPRN_EBBHR:
769 vcpu->arch.ebbhr = spr_val;
770 break;
771 case SPRN_EBBRR:
772 vcpu->arch.ebbrr = spr_val;
773 break;
Alexander Graf9916d572014-04-29 17:54:40 +0200774#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
775 case SPRN_TFHAR:
Alexander Graf9916d572014-04-29 17:54:40 +0200776 case SPRN_TEXASR:
Alexander Graf9916d572014-04-29 17:54:40 +0200777 case SPRN_TFIAR:
Simon Guo533082a2018-05-23 15:02:00 +0800778 if (!cpu_has_feature(CPU_FTR_TM))
779 break;
780
781 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
782 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
783 emulated = EMULATE_AGAIN;
784 break;
785 }
786
787 if (MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)) &&
788 !((MSR_TM_SUSPENDED(kvmppc_get_msr(vcpu))) &&
789 (sprn == SPRN_TFHAR))) {
790 /* it is illegal to mtspr() TM regs in
791 * other than non-transactional state, with
792 * the exception of TFHAR in suspend state.
793 */
794 kvmppc_core_queue_program(vcpu, SRR1_PROGTM);
795 emulated = EMULATE_AGAIN;
796 break;
797 }
798
799 tm_enable();
800 if (sprn == SPRN_TFHAR)
801 mtspr(SPRN_TFHAR, spr_val);
802 else if (sprn == SPRN_TEXASR)
803 mtspr(SPRN_TEXASR, spr_val);
804 else
805 mtspr(SPRN_TFIAR, spr_val);
806 tm_disable();
807
Alexander Graf9916d572014-04-29 17:54:40 +0200808 break;
809#endif
Alexander Graf2e23f542014-04-29 13:36:21 +0200810#endif
Alexander Grafc215c6e2009-10-30 05:47:14 +0000811 case SPRN_ICTC:
812 case SPRN_THRM1:
813 case SPRN_THRM2:
814 case SPRN_THRM3:
815 case SPRN_CTRLF:
816 case SPRN_CTRLT:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100817 case SPRN_L2CR:
Paul Mackerrasb0a94d42012-11-04 18:15:43 +0000818 case SPRN_DSCR:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100819 case SPRN_MMCR0_GEKKO:
820 case SPRN_MMCR1_GEKKO:
821 case SPRN_PMC1_GEKKO:
822 case SPRN_PMC2_GEKKO:
823 case SPRN_PMC3_GEKKO:
824 case SPRN_PMC4_GEKKO:
825 case SPRN_WPAR_GEKKO:
Mihai Caramanf2be6552012-12-20 04:52:39 +0000826 case SPRN_MSSSR0:
Alexander Graff3532022013-07-02 16:15:10 +0200827 case SPRN_DABR:
Alexander Graff8f6eb02014-04-22 12:41:06 +0200828#ifdef CONFIG_PPC_BOOK3S_64
829 case SPRN_MMCRS:
830 case SPRN_MMCRA:
831 case SPRN_MMCR0:
832 case SPRN_MMCR1:
833 case SPRN_MMCR2:
Thomas Huthfa73c3b2016-09-21 15:06:45 +0200834 case SPRN_UMMCR2:
Alexander Graff8f6eb02014-04-22 12:41:06 +0200835#endif
Alexander Grafc215c6e2009-10-30 05:47:14 +0000836 break;
Alexander Graf317a8fa2011-08-08 16:07:16 +0200837unprivileged:
Alexander Grafc215c6e2009-10-30 05:47:14 +0000838 default:
Thomas Huthfeafd132017-04-05 15:58:51 +0200839 pr_info_ratelimited("KVM: invalid SPR write: %d\n", sprn);
840 if (sprn & 0x10) {
841 if (kvmppc_get_msr(vcpu) & MSR_PR) {
842 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
843 emulated = EMULATE_AGAIN;
844 }
845 } else {
846 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0) {
847 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
848 emulated = EMULATE_AGAIN;
849 }
850 }
Alexander Grafc215c6e2009-10-30 05:47:14 +0000851 break;
852 }
853
854 return emulated;
855}
856
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530857int kvmppc_core_emulate_mfspr_pr(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val)
Alexander Grafc215c6e2009-10-30 05:47:14 +0000858{
859 int emulated = EMULATE_DONE;
860
861 switch (sprn) {
Alexander Grafc04a6952010-03-24 21:48:25 +0100862 case SPRN_IBAT0U ... SPRN_IBAT3L:
863 case SPRN_IBAT4U ... SPRN_IBAT7L:
864 case SPRN_DBAT0U ... SPRN_DBAT3L:
865 case SPRN_DBAT4U ... SPRN_DBAT7L:
Alexander Grafc1c88e22010-08-02 23:23:04 +0200866 {
867 struct kvmppc_bat *bat = kvmppc_find_bat(vcpu, sprn);
868
869 if (sprn % 2)
Alexander Graf54771e62012-05-04 14:55:12 +0200870 *spr_val = bat->raw >> 32;
Alexander Grafc1c88e22010-08-02 23:23:04 +0200871 else
Alexander Graf54771e62012-05-04 14:55:12 +0200872 *spr_val = bat->raw;
Alexander Grafc1c88e22010-08-02 23:23:04 +0200873
Alexander Grafc04a6952010-03-24 21:48:25 +0100874 break;
Alexander Grafc1c88e22010-08-02 23:23:04 +0200875 }
Alexander Grafc215c6e2009-10-30 05:47:14 +0000876 case SPRN_SDR1:
Alexander Graf317a8fa2011-08-08 16:07:16 +0200877 if (!spr_allowed(vcpu, PRIV_HYPER))
878 goto unprivileged;
Alexander Graf54771e62012-05-04 14:55:12 +0200879 *spr_val = to_book3s(vcpu)->sdr1;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000880 break;
881 case SPRN_DSISR:
Alexander Graf5deb8e72014-04-24 13:46:24 +0200882 *spr_val = kvmppc_get_dsisr(vcpu);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000883 break;
884 case SPRN_DAR:
Alexander Graf5deb8e72014-04-24 13:46:24 +0200885 *spr_val = kvmppc_get_dar(vcpu);
Alexander Grafc215c6e2009-10-30 05:47:14 +0000886 break;
887 case SPRN_HIOR:
Alexander Graf54771e62012-05-04 14:55:12 +0200888 *spr_val = to_book3s(vcpu)->hior;
Alexander Grafc215c6e2009-10-30 05:47:14 +0000889 break;
890 case SPRN_HID0:
Alexander Graf54771e62012-05-04 14:55:12 +0200891 *spr_val = to_book3s(vcpu)->hid[0];
Alexander Grafc215c6e2009-10-30 05:47:14 +0000892 break;
893 case SPRN_HID1:
Alexander Graf54771e62012-05-04 14:55:12 +0200894 *spr_val = to_book3s(vcpu)->hid[1];
Alexander Grafc215c6e2009-10-30 05:47:14 +0000895 break;
896 case SPRN_HID2:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100897 case SPRN_HID2_GEKKO:
Alexander Graf54771e62012-05-04 14:55:12 +0200898 *spr_val = to_book3s(vcpu)->hid[2];
Alexander Grafc215c6e2009-10-30 05:47:14 +0000899 break;
900 case SPRN_HID4:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100901 case SPRN_HID4_GEKKO:
Alexander Graf54771e62012-05-04 14:55:12 +0200902 *spr_val = to_book3s(vcpu)->hid[4];
Alexander Grafc215c6e2009-10-30 05:47:14 +0000903 break;
904 case SPRN_HID5:
Alexander Graf54771e62012-05-04 14:55:12 +0200905 *spr_val = to_book3s(vcpu)->hid[5];
Alexander Grafc215c6e2009-10-30 05:47:14 +0000906 break;
Alexander Grafaacf9aa2011-08-08 17:22:59 +0200907 case SPRN_CFAR:
Paul Mackerrasb0a94d42012-11-04 18:15:43 +0000908 case SPRN_DSCR:
Alexander Graf54771e62012-05-04 14:55:12 +0200909 *spr_val = 0;
Alexander Grafaacf9aa2011-08-08 17:22:59 +0200910 break;
Paul Mackerrasb0a94d42012-11-04 18:15:43 +0000911 case SPRN_PURR:
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530912 /*
913 * On exit we would have updated purr
914 */
915 *spr_val = vcpu->arch.purr;
Paul Mackerrasb0a94d42012-11-04 18:15:43 +0000916 break;
917 case SPRN_SPURR:
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530918 /*
919 * On exit we would have updated spurr
920 */
921 *spr_val = vcpu->arch.spurr;
Paul Mackerrasb0a94d42012-11-04 18:15:43 +0000922 break;
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530923 case SPRN_VTB:
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000924 *spr_val = to_book3s(vcpu)->vtb;
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530925 break;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530926 case SPRN_IC:
927 *spr_val = vcpu->arch.ic;
928 break;
Alexander Grafd6d549b2010-02-19 11:00:33 +0100929 case SPRN_GQR0:
930 case SPRN_GQR1:
931 case SPRN_GQR2:
932 case SPRN_GQR3:
933 case SPRN_GQR4:
934 case SPRN_GQR5:
935 case SPRN_GQR6:
936 case SPRN_GQR7:
Alexander Graf54771e62012-05-04 14:55:12 +0200937 *spr_val = to_book3s(vcpu)->gqr[sprn - SPRN_GQR0];
Alexander Grafd6d549b2010-02-19 11:00:33 +0100938 break;
Alexander Graf8e6afa32014-07-31 10:21:59 +0200939#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf616dff82014-04-29 16:48:44 +0200940 case SPRN_FSCR:
941 *spr_val = vcpu->arch.fscr;
942 break;
Alexander Graf2e23f542014-04-29 13:36:21 +0200943 case SPRN_BESCR:
944 *spr_val = vcpu->arch.bescr;
945 break;
946 case SPRN_EBBHR:
947 *spr_val = vcpu->arch.ebbhr;
948 break;
949 case SPRN_EBBRR:
950 *spr_val = vcpu->arch.ebbrr;
951 break;
Alexander Graf9916d572014-04-29 17:54:40 +0200952#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
953 case SPRN_TFHAR:
Alexander Graf9916d572014-04-29 17:54:40 +0200954 case SPRN_TEXASR:
Alexander Graf9916d572014-04-29 17:54:40 +0200955 case SPRN_TFIAR:
Simon Guo533082a2018-05-23 15:02:00 +0800956 if (!cpu_has_feature(CPU_FTR_TM))
957 break;
958
959 if (!(kvmppc_get_msr(vcpu) & MSR_TM)) {
960 kvmppc_trigger_fac_interrupt(vcpu, FSCR_TM_LG);
961 emulated = EMULATE_AGAIN;
962 break;
963 }
964
965 tm_enable();
966 if (sprn == SPRN_TFHAR)
967 *spr_val = mfspr(SPRN_TFHAR);
968 else if (sprn == SPRN_TEXASR)
969 *spr_val = mfspr(SPRN_TEXASR);
970 else if (sprn == SPRN_TFIAR)
971 *spr_val = mfspr(SPRN_TFIAR);
972 tm_disable();
Alexander Graf9916d572014-04-29 17:54:40 +0200973 break;
974#endif
Alexander Graf2e23f542014-04-29 13:36:21 +0200975#endif
Alexander Grafc215c6e2009-10-30 05:47:14 +0000976 case SPRN_THRM1:
977 case SPRN_THRM2:
978 case SPRN_THRM3:
979 case SPRN_CTRLF:
980 case SPRN_CTRLT:
Alexander Grafd6d549b2010-02-19 11:00:33 +0100981 case SPRN_L2CR:
982 case SPRN_MMCR0_GEKKO:
983 case SPRN_MMCR1_GEKKO:
984 case SPRN_PMC1_GEKKO:
985 case SPRN_PMC2_GEKKO:
986 case SPRN_PMC3_GEKKO:
987 case SPRN_PMC4_GEKKO:
988 case SPRN_WPAR_GEKKO:
Mihai Caramanf2be6552012-12-20 04:52:39 +0000989 case SPRN_MSSSR0:
Alexander Graff3532022013-07-02 16:15:10 +0200990 case SPRN_DABR:
Alexander Graff8f6eb02014-04-22 12:41:06 +0200991#ifdef CONFIG_PPC_BOOK3S_64
992 case SPRN_MMCRS:
993 case SPRN_MMCRA:
994 case SPRN_MMCR0:
995 case SPRN_MMCR1:
996 case SPRN_MMCR2:
Thomas Huthfa73c3b2016-09-21 15:06:45 +0200997 case SPRN_UMMCR2:
Alexander Grafa5948fa2014-04-25 16:07:21 +0200998 case SPRN_TIR:
Alexander Graff8f6eb02014-04-22 12:41:06 +0200999#endif
Alexander Graf54771e62012-05-04 14:55:12 +02001000 *spr_val = 0;
Alexander Grafc215c6e2009-10-30 05:47:14 +00001001 break;
1002 default:
Alexander Graf317a8fa2011-08-08 16:07:16 +02001003unprivileged:
Thomas Huthfeafd132017-04-05 15:58:51 +02001004 pr_info_ratelimited("KVM: invalid SPR read: %d\n", sprn);
1005 if (sprn & 0x10) {
1006 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1007 kvmppc_core_queue_program(vcpu, SRR1_PROGPRIV);
1008 emulated = EMULATE_AGAIN;
1009 }
1010 } else {
1011 if ((kvmppc_get_msr(vcpu) & MSR_PR) || sprn == 0 ||
1012 sprn == 4 || sprn == 5 || sprn == 6) {
1013 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
1014 emulated = EMULATE_AGAIN;
1015 }
1016 }
1017
Alexander Grafc215c6e2009-10-30 05:47:14 +00001018 break;
1019 }
1020
1021 return emulated;
1022}
1023
Alexander Grafca7f4202010-03-24 21:48:28 +01001024u32 kvmppc_alignment_dsisr(struct kvm_vcpu *vcpu, unsigned int inst)
1025{
Aneesh Kumar K.Vddca1562014-05-12 17:04:06 +05301026 return make_dsisr(inst);
Alexander Grafca7f4202010-03-24 21:48:28 +01001027}
1028
1029ulong kvmppc_alignment_dar(struct kvm_vcpu *vcpu, unsigned int inst)
1030{
Aneesh Kumar K.V7310f3a2014-05-12 17:04:05 +05301031#ifdef CONFIG_PPC_BOOK3S_64
1032 /*
1033 * Linux's fix_alignment() assumes that DAR is valid, so can we
1034 */
1035 return vcpu->arch.fault_dar;
1036#else
Alexander Grafca7f4202010-03-24 21:48:28 +01001037 ulong dar = 0;
Alexander Grafc46dc9a2012-05-04 14:01:33 +02001038 ulong ra = get_ra(inst);
1039 ulong rb = get_rb(inst);
Alexander Grafca7f4202010-03-24 21:48:28 +01001040
1041 switch (get_op(inst)) {
1042 case OP_LFS:
1043 case OP_LFD:
1044 case OP_STFD:
1045 case OP_STFS:
Alexander Grafca7f4202010-03-24 21:48:28 +01001046 if (ra)
1047 dar = kvmppc_get_gpr(vcpu, ra);
1048 dar += (s32)((s16)inst);
1049 break;
1050 case 31:
Alexander Grafca7f4202010-03-24 21:48:28 +01001051 if (ra)
1052 dar = kvmppc_get_gpr(vcpu, ra);
Alexander Grafc46dc9a2012-05-04 14:01:33 +02001053 dar += kvmppc_get_gpr(vcpu, rb);
Alexander Grafca7f4202010-03-24 21:48:28 +01001054 break;
1055 default:
1056 printk(KERN_INFO "KVM: Unaligned instruction 0x%x\n", inst);
1057 break;
1058 }
1059
1060 return dar;
Aneesh Kumar K.V7310f3a2014-05-12 17:04:05 +05301061#endif
Alexander Grafca7f4202010-03-24 21:48:28 +01001062}