blob: 9cda1b9dbc51921a9a5983887751ac370c8e2e72 [file] [log] [blame]
Alexander Grafd69614a2014-06-18 14:53:49 +02001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
16 * Copyright 2011 Freescale Semiconductor, Inc.
17 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 */
20
21#include <linux/jiffies.h>
22#include <linux/hrtimer.h>
23#include <linux/types.h>
24#include <linux/string.h>
25#include <linux/kvm_host.h>
26#include <linux/clockchips.h>
27
28#include <asm/reg.h>
29#include <asm/time.h>
30#include <asm/byteorder.h>
31#include <asm/kvm_ppc.h>
32#include <asm/disassemble.h>
33#include <asm/ppc-opcode.h>
34#include "timing.h"
35#include "trace.h"
36
Bin Lu6f63e812017-02-21 21:12:36 +080037#ifdef CONFIG_PPC_FPU
38static bool kvmppc_check_fp_disabled(struct kvm_vcpu *vcpu)
39{
40 if (!(kvmppc_get_msr(vcpu) & MSR_FP)) {
41 kvmppc_core_queue_fpunavail(vcpu);
42 return true;
43 }
44
45 return false;
46}
47#endif /* CONFIG_PPC_FPU */
48
49#ifdef CONFIG_VSX
50static bool kvmppc_check_vsx_disabled(struct kvm_vcpu *vcpu)
51{
52 if (!(kvmppc_get_msr(vcpu) & MSR_VSX)) {
53 kvmppc_core_queue_vsx_unavail(vcpu);
54 return true;
55 }
56
57 return false;
58}
59#endif /* CONFIG_VSX */
60
Alexander Grafd69614a2014-06-18 14:53:49 +020061/* XXX to do:
62 * lhax
63 * lhaux
64 * lswx
65 * lswi
66 * stswx
67 * stswi
68 * lha
69 * lhau
70 * lmw
71 * stmw
72 *
73 */
74int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
75{
76 struct kvm_run *run = vcpu->run;
77 u32 inst;
78 int ra, rs, rt;
79 enum emulation_result emulated;
80 int advance = 1;
81
82 /* this default type might be overwritten by subcategories */
83 kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);
84
Alexander Graf8d0eff62014-09-10 14:37:29 +020085 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
Alexander Grafd69614a2014-06-18 14:53:49 +020086 if (emulated != EMULATE_DONE)
87 return emulated;
88
89 ra = get_ra(inst);
90 rs = get_rs(inst);
91 rt = get_rt(inst);
92
Bin Lu6f63e812017-02-21 21:12:36 +080093 /*
94 * if mmio_vsx_tx_sx_enabled == 0, copy data between
95 * VSR[0..31] and memory
96 * if mmio_vsx_tx_sx_enabled == 1, copy data between
97 * VSR[32..63] and memory
98 */
99 vcpu->arch.mmio_vsx_tx_sx_enabled = get_tx_or_sx(inst);
100 vcpu->arch.mmio_vsx_copy_nums = 0;
101 vcpu->arch.mmio_vsx_offset = 0;
102 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_NONE;
103 vcpu->arch.mmio_sp64_extend = 0;
104 vcpu->arch.mmio_sign_extend = 0;
105
Alexander Grafd69614a2014-06-18 14:53:49 +0200106 switch (get_op(inst)) {
107 case 31:
108 switch (get_xop(inst)) {
109 case OP_31_XOP_LWZX:
110 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
111 break;
112
113 case OP_31_XOP_LBZX:
114 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
115 break;
116
117 case OP_31_XOP_LBZUX:
118 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
119 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
120 break;
121
122 case OP_31_XOP_STWX:
123 emulated = kvmppc_handle_store(run, vcpu,
124 kvmppc_get_gpr(vcpu, rs),
125 4, 1);
126 break;
127
128 case OP_31_XOP_STBX:
129 emulated = kvmppc_handle_store(run, vcpu,
130 kvmppc_get_gpr(vcpu, rs),
131 1, 1);
132 break;
133
134 case OP_31_XOP_STBUX:
135 emulated = kvmppc_handle_store(run, vcpu,
136 kvmppc_get_gpr(vcpu, rs),
137 1, 1);
138 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
139 break;
140
141 case OP_31_XOP_LHAX:
142 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
143 break;
144
145 case OP_31_XOP_LHZX:
146 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
147 break;
148
149 case OP_31_XOP_LHZUX:
150 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
151 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
152 break;
153
154 case OP_31_XOP_STHX:
155 emulated = kvmppc_handle_store(run, vcpu,
156 kvmppc_get_gpr(vcpu, rs),
157 2, 1);
158 break;
159
160 case OP_31_XOP_STHUX:
161 emulated = kvmppc_handle_store(run, vcpu,
162 kvmppc_get_gpr(vcpu, rs),
163 2, 1);
164 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
165 break;
166
167 case OP_31_XOP_DCBST:
168 case OP_31_XOP_DCBF:
169 case OP_31_XOP_DCBI:
170 /* Do nothing. The guest is performing dcbi because
171 * hardware DMA is not snooped by the dcache, but
172 * emulated DMA either goes through the dcache as
173 * normal writes, or the host kernel has handled dcache
174 * coherence. */
175 break;
176
177 case OP_31_XOP_LWBRX:
178 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
179 break;
180
181 case OP_31_XOP_STWBRX:
182 emulated = kvmppc_handle_store(run, vcpu,
183 kvmppc_get_gpr(vcpu, rs),
184 4, 0);
185 break;
186
187 case OP_31_XOP_LHBRX:
188 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
189 break;
190
191 case OP_31_XOP_STHBRX:
192 emulated = kvmppc_handle_store(run, vcpu,
193 kvmppc_get_gpr(vcpu, rs),
194 2, 0);
195 break;
196
Bin Lu6f63e812017-02-21 21:12:36 +0800197 case OP_31_XOP_LDX:
198 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
199 break;
200
201 case OP_31_XOP_LDUX:
202 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
203 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
204 break;
205
206 case OP_31_XOP_LWAX:
207 emulated = kvmppc_handle_loads(run, vcpu, rt, 4, 1);
208 break;
209
210#ifdef CONFIG_PPC_FPU
211 case OP_31_XOP_LFSX:
212 if (kvmppc_check_fp_disabled(vcpu))
213 return EMULATE_DONE;
214 vcpu->arch.mmio_sp64_extend = 1;
215 emulated = kvmppc_handle_load(run, vcpu,
216 KVM_MMIO_REG_FPR|rt, 4, 1);
217 break;
218
219 case OP_31_XOP_LFSUX:
220 if (kvmppc_check_fp_disabled(vcpu))
221 return EMULATE_DONE;
222 vcpu->arch.mmio_sp64_extend = 1;
223 emulated = kvmppc_handle_load(run, vcpu,
224 KVM_MMIO_REG_FPR|rt, 4, 1);
225 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
226 break;
227
228 case OP_31_XOP_LFDX:
229 if (kvmppc_check_fp_disabled(vcpu))
230 return EMULATE_DONE;
231 emulated = kvmppc_handle_load(run, vcpu,
232 KVM_MMIO_REG_FPR|rt, 8, 1);
233 break;
234
235 case OP_31_XOP_LFDUX:
236 if (kvmppc_check_fp_disabled(vcpu))
237 return EMULATE_DONE;
238 emulated = kvmppc_handle_load(run, vcpu,
239 KVM_MMIO_REG_FPR|rt, 8, 1);
240 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
241 break;
242
243 case OP_31_XOP_STFSX:
244 if (kvmppc_check_fp_disabled(vcpu))
245 return EMULATE_DONE;
246 vcpu->arch.mmio_sp64_extend = 1;
247 emulated = kvmppc_handle_store(run, vcpu,
248 VCPU_FPR(vcpu, rs), 4, 1);
249 break;
250
251 case OP_31_XOP_STFSUX:
252 if (kvmppc_check_fp_disabled(vcpu))
253 return EMULATE_DONE;
254 vcpu->arch.mmio_sp64_extend = 1;
255 emulated = kvmppc_handle_store(run, vcpu,
256 VCPU_FPR(vcpu, rs), 4, 1);
257 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
258 break;
259
260 case OP_31_XOP_STFDX:
261 if (kvmppc_check_fp_disabled(vcpu))
262 return EMULATE_DONE;
263 emulated = kvmppc_handle_store(run, vcpu,
264 VCPU_FPR(vcpu, rs),
265 8, 1);
266 break;
267
268 case OP_31_XOP_STFDUX:
269 if (kvmppc_check_fp_disabled(vcpu))
270 return EMULATE_DONE;
271 emulated = kvmppc_handle_store(run, vcpu,
272 VCPU_FPR(vcpu, rs),
273 8, 1);
274 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
275 break;
276
277 case OP_31_XOP_STFIWX:
278 if (kvmppc_check_fp_disabled(vcpu))
279 return EMULATE_DONE;
280 emulated = kvmppc_handle_store(run, vcpu,
281 VCPU_FPR(vcpu, rs),
282 4, 1);
283 break;
284#endif
285
286#ifdef CONFIG_VSX
287 case OP_31_XOP_LXSDX:
288 if (kvmppc_check_vsx_disabled(vcpu))
289 return EMULATE_DONE;
290 vcpu->arch.mmio_vsx_copy_nums = 1;
291 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
292 emulated = kvmppc_handle_vsx_load(run, vcpu,
293 KVM_MMIO_REG_VSX|rt, 8, 1, 0);
294 break;
295
296 case OP_31_XOP_LXSSPX:
297 if (kvmppc_check_vsx_disabled(vcpu))
298 return EMULATE_DONE;
299 vcpu->arch.mmio_vsx_copy_nums = 1;
300 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
301 vcpu->arch.mmio_sp64_extend = 1;
302 emulated = kvmppc_handle_vsx_load(run, vcpu,
303 KVM_MMIO_REG_VSX|rt, 4, 1, 0);
304 break;
305
306 case OP_31_XOP_LXSIWAX:
307 if (kvmppc_check_vsx_disabled(vcpu))
308 return EMULATE_DONE;
309 vcpu->arch.mmio_vsx_copy_nums = 1;
310 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
311 emulated = kvmppc_handle_vsx_load(run, vcpu,
312 KVM_MMIO_REG_VSX|rt, 4, 1, 1);
313 break;
314
315 case OP_31_XOP_LXSIWZX:
316 if (kvmppc_check_vsx_disabled(vcpu))
317 return EMULATE_DONE;
318 vcpu->arch.mmio_vsx_copy_nums = 1;
319 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
320 emulated = kvmppc_handle_vsx_load(run, vcpu,
321 KVM_MMIO_REG_VSX|rt, 4, 1, 0);
322 break;
323
324 case OP_31_XOP_LXVD2X:
325 /*
326 * In this case, the official load/store process is like this:
327 * Step1, exit from vm by page fault isr, then kvm save vsr.
328 * Please see guest_exit_cont->store_fp_state->SAVE_32VSRS
329 * as reference.
330 *
331 * Step2, copy data between memory and VCPU
332 * Notice: for LXVD2X/STXVD2X/LXVW4X/STXVW4X, we use
333 * 2copies*8bytes or 4copies*4bytes
334 * to simulate one copy of 16bytes.
335 * Also there is an endian issue here, we should notice the
336 * layout of memory.
337 * Please see MARCO of LXVD2X_ROT/STXVD2X_ROT as more reference.
338 * If host is little-endian, kvm will call XXSWAPD for
339 * LXVD2X_ROT/STXVD2X_ROT.
340 * So, if host is little-endian,
341 * the postion of memeory should be swapped.
342 *
343 * Step3, return to guest, kvm reset register.
344 * Please see kvmppc_hv_entry->load_fp_state->REST_32VSRS
345 * as reference.
346 */
347 if (kvmppc_check_vsx_disabled(vcpu))
348 return EMULATE_DONE;
349 vcpu->arch.mmio_vsx_copy_nums = 2;
350 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
351 emulated = kvmppc_handle_vsx_load(run, vcpu,
352 KVM_MMIO_REG_VSX|rt, 8, 1, 0);
353 break;
354
355 case OP_31_XOP_LXVW4X:
356 if (kvmppc_check_vsx_disabled(vcpu))
357 return EMULATE_DONE;
358 vcpu->arch.mmio_vsx_copy_nums = 4;
359 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
360 emulated = kvmppc_handle_vsx_load(run, vcpu,
361 KVM_MMIO_REG_VSX|rt, 4, 1, 0);
362 break;
363
364 case OP_31_XOP_LXVDSX:
365 if (kvmppc_check_vsx_disabled(vcpu))
366 return EMULATE_DONE;
367 vcpu->arch.mmio_vsx_copy_nums = 1;
368 vcpu->arch.mmio_vsx_copy_type =
369 KVMPPC_VSX_COPY_DWORD_LOAD_DUMP;
370 emulated = kvmppc_handle_vsx_load(run, vcpu,
371 KVM_MMIO_REG_VSX|rt, 8, 1, 0);
372 break;
373
374 case OP_31_XOP_STXSDX:
375 if (kvmppc_check_vsx_disabled(vcpu))
376 return EMULATE_DONE;
377 vcpu->arch.mmio_vsx_copy_nums = 1;
378 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
379 emulated = kvmppc_handle_vsx_store(run, vcpu,
380 rs, 8, 1);
381 break;
382
383 case OP_31_XOP_STXSSPX:
384 if (kvmppc_check_vsx_disabled(vcpu))
385 return EMULATE_DONE;
386 vcpu->arch.mmio_vsx_copy_nums = 1;
387 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
388 vcpu->arch.mmio_sp64_extend = 1;
389 emulated = kvmppc_handle_vsx_store(run, vcpu,
390 rs, 4, 1);
391 break;
392
393 case OP_31_XOP_STXSIWX:
394 if (kvmppc_check_vsx_disabled(vcpu))
395 return EMULATE_DONE;
396 vcpu->arch.mmio_vsx_offset = 1;
397 vcpu->arch.mmio_vsx_copy_nums = 1;
398 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
399 emulated = kvmppc_handle_vsx_store(run, vcpu,
400 rs, 4, 1);
401 break;
402
403 case OP_31_XOP_STXVD2X:
404 if (kvmppc_check_vsx_disabled(vcpu))
405 return EMULATE_DONE;
406 vcpu->arch.mmio_vsx_copy_nums = 2;
407 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_DWORD;
408 emulated = kvmppc_handle_vsx_store(run, vcpu,
409 rs, 8, 1);
410 break;
411
412 case OP_31_XOP_STXVW4X:
413 if (kvmppc_check_vsx_disabled(vcpu))
414 return EMULATE_DONE;
415 vcpu->arch.mmio_vsx_copy_nums = 4;
416 vcpu->arch.mmio_vsx_copy_type = KVMPPC_VSX_COPY_WORD;
417 emulated = kvmppc_handle_vsx_store(run, vcpu,
418 rs, 4, 1);
419 break;
420#endif /* CONFIG_VSX */
Alexander Grafd69614a2014-06-18 14:53:49 +0200421 default:
422 emulated = EMULATE_FAIL;
423 break;
424 }
425 break;
426
427 case OP_LWZ:
428 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
429 break;
430
Bin Lu6f63e812017-02-21 21:12:36 +0800431#ifdef CONFIG_PPC_FPU
432 case OP_STFS:
433 if (kvmppc_check_fp_disabled(vcpu))
434 return EMULATE_DONE;
435 vcpu->arch.mmio_sp64_extend = 1;
436 emulated = kvmppc_handle_store(run, vcpu,
437 VCPU_FPR(vcpu, rs),
438 4, 1);
439 break;
440
441 case OP_STFSU:
442 if (kvmppc_check_fp_disabled(vcpu))
443 return EMULATE_DONE;
444 vcpu->arch.mmio_sp64_extend = 1;
445 emulated = kvmppc_handle_store(run, vcpu,
446 VCPU_FPR(vcpu, rs),
447 4, 1);
448 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
449 break;
450
451 case OP_STFD:
452 if (kvmppc_check_fp_disabled(vcpu))
453 return EMULATE_DONE;
454 emulated = kvmppc_handle_store(run, vcpu,
455 VCPU_FPR(vcpu, rs),
456 8, 1);
457 break;
458
459 case OP_STFDU:
460 if (kvmppc_check_fp_disabled(vcpu))
461 return EMULATE_DONE;
462 emulated = kvmppc_handle_store(run, vcpu,
463 VCPU_FPR(vcpu, rs),
464 8, 1);
465 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
466 break;
467#endif
468
469 /* TBD: Add support for other 64 bit load variants like ldu etc. */
Alexander Grafd69614a2014-06-18 14:53:49 +0200470 case OP_LD:
471 rt = get_rt(inst);
472 emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
473 break;
474
475 case OP_LWZU:
476 emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
477 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
478 break;
479
480 case OP_LBZ:
481 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
482 break;
483
484 case OP_LBZU:
485 emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
486 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
487 break;
488
489 case OP_STW:
490 emulated = kvmppc_handle_store(run, vcpu,
491 kvmppc_get_gpr(vcpu, rs),
492 4, 1);
493 break;
494
495 /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
496 case OP_STD:
497 rs = get_rs(inst);
498 emulated = kvmppc_handle_store(run, vcpu,
499 kvmppc_get_gpr(vcpu, rs),
500 8, 1);
501 break;
502
503 case OP_STWU:
504 emulated = kvmppc_handle_store(run, vcpu,
505 kvmppc_get_gpr(vcpu, rs),
506 4, 1);
507 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
508 break;
509
510 case OP_STB:
511 emulated = kvmppc_handle_store(run, vcpu,
512 kvmppc_get_gpr(vcpu, rs),
513 1, 1);
514 break;
515
516 case OP_STBU:
517 emulated = kvmppc_handle_store(run, vcpu,
518 kvmppc_get_gpr(vcpu, rs),
519 1, 1);
520 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
521 break;
522
523 case OP_LHZ:
524 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
525 break;
526
527 case OP_LHZU:
528 emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
529 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
530 break;
531
532 case OP_LHA:
533 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
534 break;
535
536 case OP_LHAU:
537 emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
538 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
539 break;
540
541 case OP_STH:
542 emulated = kvmppc_handle_store(run, vcpu,
543 kvmppc_get_gpr(vcpu, rs),
544 2, 1);
545 break;
546
547 case OP_STHU:
548 emulated = kvmppc_handle_store(run, vcpu,
549 kvmppc_get_gpr(vcpu, rs),
550 2, 1);
551 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
552 break;
553
Bin Lu6f63e812017-02-21 21:12:36 +0800554#ifdef CONFIG_PPC_FPU
555 case OP_LFS:
556 if (kvmppc_check_fp_disabled(vcpu))
557 return EMULATE_DONE;
558 vcpu->arch.mmio_sp64_extend = 1;
559 emulated = kvmppc_handle_load(run, vcpu,
560 KVM_MMIO_REG_FPR|rt, 4, 1);
561 break;
562
563 case OP_LFSU:
564 if (kvmppc_check_fp_disabled(vcpu))
565 return EMULATE_DONE;
566 vcpu->arch.mmio_sp64_extend = 1;
567 emulated = kvmppc_handle_load(run, vcpu,
568 KVM_MMIO_REG_FPR|rt, 4, 1);
569 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
570 break;
571
572 case OP_LFD:
573 if (kvmppc_check_fp_disabled(vcpu))
574 return EMULATE_DONE;
575 emulated = kvmppc_handle_load(run, vcpu,
576 KVM_MMIO_REG_FPR|rt, 8, 1);
577 break;
578
579 case OP_LFDU:
580 if (kvmppc_check_fp_disabled(vcpu))
581 return EMULATE_DONE;
582 emulated = kvmppc_handle_load(run, vcpu,
583 KVM_MMIO_REG_FPR|rt, 8, 1);
584 kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
585 break;
586#endif
587
Alexander Grafd69614a2014-06-18 14:53:49 +0200588 default:
589 emulated = EMULATE_FAIL;
590 break;
591 }
592
593 if (emulated == EMULATE_FAIL) {
594 advance = 0;
595 kvmppc_core_queue_program(vcpu, 0);
596 }
597
598 trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);
599
600 /* Advance past emulated instruction. */
601 if (advance)
602 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);
603
604 return emulated;
605}