blob: 088c25d73a11e34c5d0ec3c1f4b0784bc7344332 [file] [log] [blame]
Sanjay Lale685c682012-11-21 18:34:04 -08001/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
James Hogane30492b2014-05-29 10:16:35 +010014#include <linux/ktime.h>
Sanjay Lale685c682012-11-21 18:34:04 -080015#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
19#include <linux/bootmem.h>
20#include <linux/random.h>
21#include <asm/page.h>
22#include <asm/cacheflush.h>
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#include "kvm_mips_opcode.h"
33#include "kvm_mips_int.h"
34#include "kvm_mips_comm.h"
35
36#include "trace.h"
37
38/*
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
41 */
42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44{
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
54 /*
55 * Read the instruction
56 */
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
58
59 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST;
61
62 switch (insn.i_format.opcode) {
63 /*
64 * jr and jalr are in r_format format.
65 */
66 case spec_op:
67 switch (insn.r_format.func) {
68 case jalr_op:
69 arch->gprs[insn.r_format.rd] = epc + 8;
70 /* Fall through */
71 case jr_op:
72 nextpc = arch->gprs[insn.r_format.rs];
73 break;
74 }
75 break;
76
77 /*
78 * This group contains:
79 * bltz_op, bgez_op, bltzl_op, bgezl_op,
80 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
81 */
82 case bcond_op:
83 switch (insn.i_format.rt) {
84 case bltz_op:
85 case bltzl_op:
86 if ((long)arch->gprs[insn.i_format.rs] < 0)
87 epc = epc + 4 + (insn.i_format.simmediate << 2);
88 else
89 epc += 8;
90 nextpc = epc;
91 break;
92
93 case bgez_op:
94 case bgezl_op:
95 if ((long)arch->gprs[insn.i_format.rs] >= 0)
96 epc = epc + 4 + (insn.i_format.simmediate << 2);
97 else
98 epc += 8;
99 nextpc = epc;
100 break;
101
102 case bltzal_op:
103 case bltzall_op:
104 arch->gprs[31] = epc + 8;
105 if ((long)arch->gprs[insn.i_format.rs] < 0)
106 epc = epc + 4 + (insn.i_format.simmediate << 2);
107 else
108 epc += 8;
109 nextpc = epc;
110 break;
111
112 case bgezal_op:
113 case bgezall_op:
114 arch->gprs[31] = epc + 8;
115 if ((long)arch->gprs[insn.i_format.rs] >= 0)
116 epc = epc + 4 + (insn.i_format.simmediate << 2);
117 else
118 epc += 8;
119 nextpc = epc;
120 break;
121 case bposge32_op:
122 if (!cpu_has_dsp)
123 goto sigill;
124
125 dspcontrol = rddsp(0x01);
126
127 if (dspcontrol >= 32) {
128 epc = epc + 4 + (insn.i_format.simmediate << 2);
129 } else
130 epc += 8;
131 nextpc = epc;
132 break;
133 }
134 break;
135
136 /*
137 * These are unconditional and in j_format.
138 */
139 case jal_op:
140 arch->gprs[31] = instpc + 8;
141 case j_op:
142 epc += 4;
143 epc >>= 28;
144 epc <<= 28;
145 epc |= (insn.j_format.target << 2);
146 nextpc = epc;
147 break;
148
149 /*
150 * These are conditional and in i_format.
151 */
152 case beq_op:
153 case beql_op:
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
157 else
158 epc += 8;
159 nextpc = epc;
160 break;
161
162 case bne_op:
163 case bnel_op:
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
167 else
168 epc += 8;
169 nextpc = epc;
170 break;
171
172 case blez_op: /* not really i_format */
173 case blezl_op:
174 /* rt field assumed to be zero */
175 if ((long)arch->gprs[insn.i_format.rs] <= 0)
176 epc = epc + 4 + (insn.i_format.simmediate << 2);
177 else
178 epc += 8;
179 nextpc = epc;
180 break;
181
182 case bgtz_op:
183 case bgtzl_op:
184 /* rt field assumed to be zero */
185 if ((long)arch->gprs[insn.i_format.rs] > 0)
186 epc = epc + 4 + (insn.i_format.simmediate << 2);
187 else
188 epc += 8;
189 nextpc = epc;
190 break;
191
192 /*
193 * And now the FPA/cp1 branch instructions.
194 */
195 case cop1_op:
196 printk("%s: unsupported cop1_op\n", __func__);
197 break;
198 }
199
200 return nextpc;
201
202unaligned:
203 printk("%s: unaligned epc\n", __func__);
204 return nextpc;
205
206sigill:
207 printk("%s: DSP branch but not DSP ASE\n", __func__);
208 return nextpc;
209}
210
211enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
212{
213 unsigned long branch_pc;
214 enum emulation_result er = EMULATE_DONE;
215
216 if (cause & CAUSEF_BD) {
217 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
218 if (branch_pc == KVM_INVALID_INST) {
219 er = EMULATE_FAIL;
220 } else {
221 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
223 }
224 } else
225 vcpu->arch.pc += 4;
226
227 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
228
229 return er;
230}
231
James Hogane30492b2014-05-29 10:16:35 +0100232/**
233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU.
Sanjay Lale685c682012-11-21 18:34:04 -0800235 *
James Hogane30492b2014-05-29 10:16:35 +0100236 * Returns: 1 if the CP0_Count timer is disabled by the guest CP0_Cause.DC
237 * bit.
238 * 0 otherwise (in which case CP0_Count timer is running).
Sanjay Lale685c682012-11-21 18:34:04 -0800239 */
James Hogane30492b2014-05-29 10:16:35 +0100240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800241{
242 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogane30492b2014-05-29 10:16:35 +0100243 return kvm_read_c0_guest_cause(cop0) & CAUSEF_DC;
244}
Sanjay Lale685c682012-11-21 18:34:04 -0800245
James Hogane30492b2014-05-29 10:16:35 +0100246/**
247 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
248 *
249 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
250 *
251 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
252 */
253static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
254{
255 s64 now_ns, periods;
256 u64 delta;
257
258 now_ns = ktime_to_ns(now);
259 delta = now_ns + vcpu->arch.count_dyn_bias;
260
261 if (delta >= vcpu->arch.count_period) {
262 /* If delta is out of safe range the bias needs adjusting */
263 periods = div64_s64(now_ns, vcpu->arch.count_period);
264 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
265 /* Recalculate delta with new bias */
266 delta = now_ns + vcpu->arch.count_dyn_bias;
Sanjay Lale685c682012-11-21 18:34:04 -0800267 }
268
James Hogane30492b2014-05-29 10:16:35 +0100269 /*
270 * We've ensured that:
271 * delta < count_period
272 *
273 * Therefore the intermediate delta*count_hz will never overflow since
274 * at the boundary condition:
275 * delta = count_period
276 * delta = NSEC_PER_SEC * 2^32 / count_hz
277 * delta * count_hz = NSEC_PER_SEC * 2^32
278 */
279 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
280}
281
282/**
283 * kvm_mips_read_count_running() - Read the current count value as if running.
284 * @vcpu: Virtual CPU.
285 * @now: Kernel time to read CP0_Count at.
286 *
287 * Returns the current guest CP0_Count register at time @now and handles if the
288 * timer interrupt is pending and hasn't been handled yet.
289 *
290 * Returns: The current value of the guest CP0_Count register.
291 */
292static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
293{
294 ktime_t expires;
295 int running;
296
297 /* Is the hrtimer pending? */
298 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
299 if (ktime_compare(now, expires) >= 0) {
300 /*
301 * Cancel it while we handle it so there's no chance of
302 * interference with the timeout handler.
303 */
304 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
305
306 /* Nothing should be waiting on the timeout */
307 kvm_mips_callbacks->queue_timer_int(vcpu);
308
309 /*
310 * Restart the timer if it was running based on the expiry time
311 * we read, so that we don't push it back 2 periods.
312 */
313 if (running) {
314 expires = ktime_add_ns(expires,
315 vcpu->arch.count_period);
316 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
317 HRTIMER_MODE_ABS);
318 }
319 }
320
321 /* Return the biased and scaled guest CP0_Count */
322 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
323}
324
325/**
326 * kvm_mips_read_count() - Read the current count value.
327 * @vcpu: Virtual CPU.
328 *
329 * Read the current guest CP0_Count value, taking into account whether the timer
330 * is stopped.
331 *
332 * Returns: The current guest CP0_Count value.
333 */
334uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
335{
336 struct mips_coproc *cop0 = vcpu->arch.cop0;
337
338 /* If count disabled just read static copy of count */
339 if (kvm_mips_count_disabled(vcpu))
340 return kvm_read_c0_guest_count(cop0);
341
342 return kvm_mips_read_count_running(vcpu, ktime_get());
343}
344
345/**
346 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
347 * @vcpu: Virtual CPU.
348 * @count: Output pointer for CP0_Count value at point of freeze.
349 *
350 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
351 * at the point it was frozen. It is guaranteed that any pending interrupts at
352 * the point it was frozen are handled, and none after that point.
353 *
354 * This is useful where the time/CP0_Count is needed in the calculation of the
355 * new parameters.
356 *
357 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
358 *
359 * Returns: The ktime at the point of freeze.
360 */
361static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
362 uint32_t *count)
363{
364 ktime_t now;
365
366 /* stop hrtimer before finding time */
367 hrtimer_cancel(&vcpu->arch.comparecount_timer);
368 now = ktime_get();
369
370 /* find count at this point and handle pending hrtimer */
371 *count = kvm_mips_read_count_running(vcpu, now);
372
373 return now;
374}
375
376
377/**
378 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
379 * @vcpu: Virtual CPU.
380 * @now: ktime at point of resume.
381 * @count: CP0_Count at point of resume.
382 *
383 * Resumes the timer and updates the timer expiry based on @now and @count.
384 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
385 * parameters need to be changed.
386 *
387 * It is guaranteed that a timer interrupt immediately after resume will be
388 * handled, but not if CP_Compare is exactly at @count. That case is already
389 * handled by kvm_mips_freeze_timer().
390 *
391 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
392 */
393static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
394 ktime_t now, uint32_t count)
395{
396 struct mips_coproc *cop0 = vcpu->arch.cop0;
397 uint32_t compare;
398 u64 delta;
399 ktime_t expire;
400
401 /* Calculate timeout (wrap 0 to 2^32) */
402 compare = kvm_read_c0_guest_compare(cop0);
403 delta = (u64)(uint32_t)(compare - count - 1) + 1;
404 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
405 expire = ktime_add_ns(now, delta);
406
407 /* Update hrtimer to use new timeout */
408 hrtimer_cancel(&vcpu->arch.comparecount_timer);
409 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
410}
411
412/**
413 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
414 * @vcpu: Virtual CPU.
415 *
416 * Recalculates and updates the expiry time of the hrtimer. This can be used
417 * after timer parameters have been altered which do not depend on the time that
418 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
419 * kvm_mips_resume_hrtimer() are used directly).
420 *
421 * It is guaranteed that no timer interrupts will be lost in the process.
422 *
423 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
424 */
425static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
426{
427 ktime_t now;
428 uint32_t count;
429
430 /*
431 * freeze_hrtimer takes care of a timer interrupts <= count, and
432 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
433 */
434 now = kvm_mips_freeze_hrtimer(vcpu, &count);
435 kvm_mips_resume_hrtimer(vcpu, now, count);
436}
437
438/**
439 * kvm_mips_write_count() - Modify the count and update timer.
440 * @vcpu: Virtual CPU.
441 * @count: Guest CP0_Count value to set.
442 *
443 * Sets the CP0_Count value and updates the timer accordingly.
444 */
445void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
446{
447 struct mips_coproc *cop0 = vcpu->arch.cop0;
448 ktime_t now;
449
450 /* Calculate bias */
451 now = ktime_get();
452 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
453
454 if (kvm_mips_count_disabled(vcpu))
455 /* The timer's disabled, adjust the static count */
456 kvm_write_c0_guest_count(cop0, count);
457 else
458 /* Update timeout */
459 kvm_mips_resume_hrtimer(vcpu, now, count);
460}
461
462/**
463 * kvm_mips_init_count() - Initialise timer.
464 * @vcpu: Virtual CPU.
465 *
466 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
467 * it going if it's enabled.
468 */
469void kvm_mips_init_count(struct kvm_vcpu *vcpu)
470{
471 /* 100 MHz */
472 vcpu->arch.count_hz = 100*1000*1000;
473 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
474 vcpu->arch.count_hz);
475 vcpu->arch.count_dyn_bias = 0;
476
477 /* Starting at 0 */
478 kvm_mips_write_count(vcpu, 0);
479}
480
481/**
482 * kvm_mips_write_compare() - Modify compare and update timer.
483 * @vcpu: Virtual CPU.
484 * @compare: New CP0_Compare value.
485 *
486 * Update CP0_Compare to a new value and update the timeout.
487 */
488void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
489{
490 struct mips_coproc *cop0 = vcpu->arch.cop0;
491
492 /* if unchanged, must just be an ack */
493 if (kvm_read_c0_guest_compare(cop0) == compare)
494 return;
495
496 /* Update compare */
497 kvm_write_c0_guest_compare(cop0, compare);
498
499 /* Update timeout if count enabled */
500 if (!kvm_mips_count_disabled(vcpu))
501 kvm_mips_update_hrtimer(vcpu);
502}
503
504/**
505 * kvm_mips_count_disable() - Disable count.
506 * @vcpu: Virtual CPU.
507 *
508 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
509 * time will be handled but not after.
510 *
511 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC has been
512 * set (count disabled).
513 *
514 * Returns: The time that the timer was stopped.
515 */
516static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
517{
518 struct mips_coproc *cop0 = vcpu->arch.cop0;
519 uint32_t count;
520 ktime_t now;
521
522 /* Stop hrtimer */
523 hrtimer_cancel(&vcpu->arch.comparecount_timer);
524
525 /* Set the static count from the dynamic count, handling pending TI */
526 now = ktime_get();
527 count = kvm_mips_read_count_running(vcpu, now);
528 kvm_write_c0_guest_count(cop0, count);
529
530 return now;
531}
532
533/**
534 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
535 * @vcpu: Virtual CPU.
536 *
537 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
538 * before the final stop time will be handled, but not after.
539 *
540 * Assumes CP0_Cause.DC is clear (count enabled).
541 */
542void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
543{
544 struct mips_coproc *cop0 = vcpu->arch.cop0;
545
546 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
547 kvm_mips_count_disable(vcpu);
548}
549
550/**
551 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
552 * @vcpu: Virtual CPU.
553 *
554 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
555 * the start time will be handled, potentially before even returning, so the
556 * caller should be careful with ordering of CP0_Cause modifications so as not
557 * to lose it.
558 *
559 * Assumes CP0_Cause.DC is set (count disabled).
560 */
561void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
562{
563 struct mips_coproc *cop0 = vcpu->arch.cop0;
564 uint32_t count;
565
566 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
567
568 /*
569 * Set the dynamic count to match the static count.
570 * This starts the hrtimer.
571 */
572 count = kvm_read_c0_guest_count(cop0);
573 kvm_mips_write_count(vcpu, count);
574}
575
576/**
577 * kvm_mips_count_timeout() - Push timer forward on timeout.
578 * @vcpu: Virtual CPU.
579 *
580 * Handle an hrtimer event by push the hrtimer forward a period.
581 *
582 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
583 */
584enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
585{
586 /* Add the Count period to the current expiry time */
587 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
588 vcpu->arch.count_period);
589 return HRTIMER_RESTART;
Sanjay Lale685c682012-11-21 18:34:04 -0800590}
591
592enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
593{
594 struct mips_coproc *cop0 = vcpu->arch.cop0;
595 enum emulation_result er = EMULATE_DONE;
596
597 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
598 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
599 kvm_read_c0_guest_epc(cop0));
600 kvm_clear_c0_guest_status(cop0, ST0_EXL);
601 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
602
603 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
604 kvm_clear_c0_guest_status(cop0, ST0_ERL);
605 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
606 } else {
607 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
608 vcpu->arch.pc);
609 er = EMULATE_FAIL;
610 }
611
612 return er;
613}
614
615enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
616{
617 enum emulation_result er = EMULATE_DONE;
618
619 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
620 vcpu->arch.pending_exceptions);
621
622 ++vcpu->stat.wait_exits;
623 trace_kvm_exit(vcpu, WAIT_EXITS);
624 if (!vcpu->arch.pending_exceptions) {
625 vcpu->arch.wait = 1;
626 kvm_vcpu_block(vcpu);
627
628 /* We we are runnable, then definitely go off to user space to check if any
629 * I/O interrupts are pending.
630 */
631 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
632 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
633 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
634 }
635 }
636
637 return er;
638}
639
640/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
641 * this, if things ever change
642 */
643enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
644{
645 struct mips_coproc *cop0 = vcpu->arch.cop0;
646 enum emulation_result er = EMULATE_FAIL;
647 uint32_t pc = vcpu->arch.pc;
648
649 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
650 return er;
651}
652
653/* Write Guest TLB Entry @ Index */
654enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
655{
656 struct mips_coproc *cop0 = vcpu->arch.cop0;
657 int index = kvm_read_c0_guest_index(cop0);
658 enum emulation_result er = EMULATE_DONE;
659 struct kvm_mips_tlb *tlb = NULL;
660 uint32_t pc = vcpu->arch.pc;
661
662 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
663 printk("%s: illegal index: %d\n", __func__, index);
664 printk
665 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
666 pc, index, kvm_read_c0_guest_entryhi(cop0),
667 kvm_read_c0_guest_entrylo0(cop0),
668 kvm_read_c0_guest_entrylo1(cop0),
669 kvm_read_c0_guest_pagemask(cop0));
670 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
671 }
672
673 tlb = &vcpu->arch.guest_tlb[index];
674#if 1
675 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
676 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
677#endif
678
679 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
680 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
681 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
682 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
683
684 kvm_debug
685 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
686 pc, index, kvm_read_c0_guest_entryhi(cop0),
687 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
688 kvm_read_c0_guest_pagemask(cop0));
689
690 return er;
691}
692
693/* Write Guest TLB Entry @ Random Index */
694enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
695{
696 struct mips_coproc *cop0 = vcpu->arch.cop0;
697 enum emulation_result er = EMULATE_DONE;
698 struct kvm_mips_tlb *tlb = NULL;
699 uint32_t pc = vcpu->arch.pc;
700 int index;
701
702#if 1
703 get_random_bytes(&index, sizeof(index));
704 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
705#else
706 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
707#endif
708
709 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
710 printk("%s: illegal index: %d\n", __func__, index);
711 return EMULATE_FAIL;
712 }
713
714 tlb = &vcpu->arch.guest_tlb[index];
715
716#if 1
717 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
718 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
719#endif
720
721 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
722 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
723 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
724 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
725
726 kvm_debug
727 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
728 pc, index, kvm_read_c0_guest_entryhi(cop0),
729 kvm_read_c0_guest_entrylo0(cop0),
730 kvm_read_c0_guest_entrylo1(cop0));
731
732 return er;
733}
734
735enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
736{
737 struct mips_coproc *cop0 = vcpu->arch.cop0;
738 long entryhi = kvm_read_c0_guest_entryhi(cop0);
739 enum emulation_result er = EMULATE_DONE;
740 uint32_t pc = vcpu->arch.pc;
741 int index = -1;
742
743 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
744
745 kvm_write_c0_guest_index(cop0, index);
746
747 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
748 index);
749
750 return er;
751}
752
753enum emulation_result
754kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
755 struct kvm_run *run, struct kvm_vcpu *vcpu)
756{
757 struct mips_coproc *cop0 = vcpu->arch.cop0;
758 enum emulation_result er = EMULATE_DONE;
759 int32_t rt, rd, copz, sel, co_bit, op;
760 uint32_t pc = vcpu->arch.pc;
761 unsigned long curr_pc;
762
763 /*
764 * Update PC and hold onto current PC in case there is
765 * an error and we want to rollback the PC
766 */
767 curr_pc = vcpu->arch.pc;
768 er = update_pc(vcpu, cause);
769 if (er == EMULATE_FAIL) {
770 return er;
771 }
772
773 copz = (inst >> 21) & 0x1f;
774 rt = (inst >> 16) & 0x1f;
775 rd = (inst >> 11) & 0x1f;
776 sel = inst & 0x7;
777 co_bit = (inst >> 25) & 1;
778
Sanjay Lale685c682012-11-21 18:34:04 -0800779 if (co_bit) {
780 op = (inst) & 0xff;
781
782 switch (op) {
783 case tlbr_op: /* Read indexed TLB entry */
784 er = kvm_mips_emul_tlbr(vcpu);
785 break;
786 case tlbwi_op: /* Write indexed */
787 er = kvm_mips_emul_tlbwi(vcpu);
788 break;
789 case tlbwr_op: /* Write random */
790 er = kvm_mips_emul_tlbwr(vcpu);
791 break;
792 case tlbp_op: /* TLB Probe */
793 er = kvm_mips_emul_tlbp(vcpu);
794 break;
795 case rfe_op:
796 printk("!!!COP0_RFE!!!\n");
797 break;
798 case eret_op:
799 er = kvm_mips_emul_eret(vcpu);
800 goto dont_update_pc;
801 break;
802 case wait_op:
803 er = kvm_mips_emul_wait(vcpu);
804 break;
805 }
806 } else {
807 switch (copz) {
808 case mfc_op:
809#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
810 cop0->stat[rd][sel]++;
811#endif
812 /* Get reg */
813 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +0100814 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -0800815 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
816 vcpu->arch.gprs[rt] = 0x0;
817#ifdef CONFIG_KVM_MIPS_DYN_TRANS
818 kvm_mips_trans_mfc0(inst, opc, vcpu);
819#endif
820 }
821 else {
822 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
823
824#ifdef CONFIG_KVM_MIPS_DYN_TRANS
825 kvm_mips_trans_mfc0(inst, opc, vcpu);
826#endif
827 }
828
829 kvm_debug
830 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
831 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
832
833 break;
834
835 case dmfc_op:
836 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
837 break;
838
839 case mtc_op:
840#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
841 cop0->stat[rd][sel]++;
842#endif
843 if ((rd == MIPS_CP0_TLB_INDEX)
844 && (vcpu->arch.gprs[rt] >=
845 KVM_MIPS_GUEST_TLB_SIZE)) {
846 printk("Invalid TLB Index: %ld",
847 vcpu->arch.gprs[rt]);
848 er = EMULATE_FAIL;
849 break;
850 }
851#define C0_EBASE_CORE_MASK 0xff
852 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
853 /* Preserve CORE number */
854 kvm_change_c0_guest_ebase(cop0,
855 ~(C0_EBASE_CORE_MASK),
856 vcpu->arch.gprs[rt]);
857 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
858 kvm_read_c0_guest_ebase(cop0));
859 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
David Daney48c4ac92013-05-13 13:56:44 -0700860 uint32_t nasid =
861 vcpu->arch.gprs[rt] & ASID_MASK;
Sanjay Lale685c682012-11-21 18:34:04 -0800862 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
863 &&
David Daney48c4ac92013-05-13 13:56:44 -0700864 ((kvm_read_c0_guest_entryhi(cop0) &
865 ASID_MASK) != nasid)) {
Sanjay Lale685c682012-11-21 18:34:04 -0800866
867 kvm_debug
868 ("MTCz, change ASID from %#lx to %#lx\n",
David Daney48c4ac92013-05-13 13:56:44 -0700869 kvm_read_c0_guest_entryhi(cop0) &
870 ASID_MASK,
871 vcpu->arch.gprs[rt] & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -0800872
873 /* Blow away the shadow host TLBs */
874 kvm_mips_flush_host_tlb(1);
875 }
876 kvm_write_c0_guest_entryhi(cop0,
877 vcpu->arch.gprs[rt]);
878 }
879 /* Are we writing to COUNT */
880 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +0100881 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -0800882 goto done;
883 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
884 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
885 pc, kvm_read_c0_guest_compare(cop0),
886 vcpu->arch.gprs[rt]);
887
888 /* If we are writing to COMPARE */
889 /* Clear pending timer interrupt, if any */
890 kvm_mips_callbacks->dequeue_timer_int(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100891 kvm_mips_write_compare(vcpu,
892 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -0800893 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
894 kvm_write_c0_guest_status(cop0,
895 vcpu->arch.gprs[rt]);
896 /* Make sure that CU1 and NMI bits are never set */
897 kvm_clear_c0_guest_status(cop0,
898 (ST0_CU1 | ST0_NMI));
899
900#ifdef CONFIG_KVM_MIPS_DYN_TRANS
901 kvm_mips_trans_mtc0(inst, opc, vcpu);
902#endif
James Hogane30492b2014-05-29 10:16:35 +0100903 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
904 uint32_t old_cause, new_cause;
905 old_cause = kvm_read_c0_guest_cause(cop0);
906 new_cause = vcpu->arch.gprs[rt];
907 /* Update R/W bits */
908 kvm_change_c0_guest_cause(cop0, 0x08800300,
909 new_cause);
910 /* DC bit enabling/disabling timer? */
911 if ((old_cause ^ new_cause) & CAUSEF_DC) {
912 if (new_cause & CAUSEF_DC)
913 kvm_mips_count_disable_cause(vcpu);
914 else
915 kvm_mips_count_enable_cause(vcpu);
916 }
Sanjay Lale685c682012-11-21 18:34:04 -0800917 } else {
918 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
919#ifdef CONFIG_KVM_MIPS_DYN_TRANS
920 kvm_mips_trans_mtc0(inst, opc, vcpu);
921#endif
922 }
923
924 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
925 rd, sel, cop0->reg[rd][sel]);
926 break;
927
928 case dmtc_op:
929 printk
930 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
931 vcpu->arch.pc, rt, rd, sel);
932 er = EMULATE_FAIL;
933 break;
934
935 case mfmcz_op:
936#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
937 cop0->stat[MIPS_CP0_STATUS][0]++;
938#endif
939 if (rt != 0) {
940 vcpu->arch.gprs[rt] =
941 kvm_read_c0_guest_status(cop0);
942 }
943 /* EI */
944 if (inst & 0x20) {
945 kvm_debug("[%#lx] mfmcz_op: EI\n",
946 vcpu->arch.pc);
947 kvm_set_c0_guest_status(cop0, ST0_IE);
948 } else {
949 kvm_debug("[%#lx] mfmcz_op: DI\n",
950 vcpu->arch.pc);
951 kvm_clear_c0_guest_status(cop0, ST0_IE);
952 }
953
954 break;
955
956 case wrpgpr_op:
957 {
958 uint32_t css =
959 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
960 uint32_t pss =
961 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
962 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
963 if (css || pss) {
964 er = EMULATE_FAIL;
965 break;
966 }
967 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
968 vcpu->arch.gprs[rt]);
969 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
970 }
971 break;
972 default:
973 printk
974 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
975 vcpu->arch.pc, copz);
976 er = EMULATE_FAIL;
977 break;
978 }
979 }
980
981done:
982 /*
983 * Rollback PC only if emulation was unsuccessful
984 */
985 if (er == EMULATE_FAIL) {
986 vcpu->arch.pc = curr_pc;
987 }
988
989dont_update_pc:
990 /*
991 * This is for special instructions whose emulation
992 * updates the PC, so do not overwrite the PC under
993 * any circumstances
994 */
995
996 return er;
997}
998
999enum emulation_result
1000kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1001 struct kvm_run *run, struct kvm_vcpu *vcpu)
1002{
1003 enum emulation_result er = EMULATE_DO_MMIO;
1004 int32_t op, base, rt, offset;
1005 uint32_t bytes;
1006 void *data = run->mmio.data;
1007 unsigned long curr_pc;
1008
1009 /*
1010 * Update PC and hold onto current PC in case there is
1011 * an error and we want to rollback the PC
1012 */
1013 curr_pc = vcpu->arch.pc;
1014 er = update_pc(vcpu, cause);
1015 if (er == EMULATE_FAIL)
1016 return er;
1017
1018 rt = (inst >> 16) & 0x1f;
1019 base = (inst >> 21) & 0x1f;
1020 offset = inst & 0xffff;
1021 op = (inst >> 26) & 0x3f;
1022
1023 switch (op) {
1024 case sb_op:
1025 bytes = 1;
1026 if (bytes > sizeof(run->mmio.data)) {
1027 kvm_err("%s: bad MMIO length: %d\n", __func__,
1028 run->mmio.len);
1029 }
1030 run->mmio.phys_addr =
1031 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1032 host_cp0_badvaddr);
1033 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1034 er = EMULATE_FAIL;
1035 break;
1036 }
1037 run->mmio.len = bytes;
1038 run->mmio.is_write = 1;
1039 vcpu->mmio_needed = 1;
1040 vcpu->mmio_is_write = 1;
1041 *(u8 *) data = vcpu->arch.gprs[rt];
1042 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1043 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1044 *(uint8_t *) data);
1045
1046 break;
1047
1048 case sw_op:
1049 bytes = 4;
1050 if (bytes > sizeof(run->mmio.data)) {
1051 kvm_err("%s: bad MMIO length: %d\n", __func__,
1052 run->mmio.len);
1053 }
1054 run->mmio.phys_addr =
1055 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1056 host_cp0_badvaddr);
1057 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1058 er = EMULATE_FAIL;
1059 break;
1060 }
1061
1062 run->mmio.len = bytes;
1063 run->mmio.is_write = 1;
1064 vcpu->mmio_needed = 1;
1065 vcpu->mmio_is_write = 1;
1066 *(uint32_t *) data = vcpu->arch.gprs[rt];
1067
1068 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1069 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1070 vcpu->arch.gprs[rt], *(uint32_t *) data);
1071 break;
1072
1073 case sh_op:
1074 bytes = 2;
1075 if (bytes > sizeof(run->mmio.data)) {
1076 kvm_err("%s: bad MMIO length: %d\n", __func__,
1077 run->mmio.len);
1078 }
1079 run->mmio.phys_addr =
1080 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1081 host_cp0_badvaddr);
1082 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1083 er = EMULATE_FAIL;
1084 break;
1085 }
1086
1087 run->mmio.len = bytes;
1088 run->mmio.is_write = 1;
1089 vcpu->mmio_needed = 1;
1090 vcpu->mmio_is_write = 1;
1091 *(uint16_t *) data = vcpu->arch.gprs[rt];
1092
1093 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1094 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1095 vcpu->arch.gprs[rt], *(uint32_t *) data);
1096 break;
1097
1098 default:
1099 printk("Store not yet supported");
1100 er = EMULATE_FAIL;
1101 break;
1102 }
1103
1104 /*
1105 * Rollback PC if emulation was unsuccessful
1106 */
1107 if (er == EMULATE_FAIL) {
1108 vcpu->arch.pc = curr_pc;
1109 }
1110
1111 return er;
1112}
1113
1114enum emulation_result
1115kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1116 struct kvm_run *run, struct kvm_vcpu *vcpu)
1117{
1118 enum emulation_result er = EMULATE_DO_MMIO;
1119 int32_t op, base, rt, offset;
1120 uint32_t bytes;
1121
1122 rt = (inst >> 16) & 0x1f;
1123 base = (inst >> 21) & 0x1f;
1124 offset = inst & 0xffff;
1125 op = (inst >> 26) & 0x3f;
1126
1127 vcpu->arch.pending_load_cause = cause;
1128 vcpu->arch.io_gpr = rt;
1129
1130 switch (op) {
1131 case lw_op:
1132 bytes = 4;
1133 if (bytes > sizeof(run->mmio.data)) {
1134 kvm_err("%s: bad MMIO length: %d\n", __func__,
1135 run->mmio.len);
1136 er = EMULATE_FAIL;
1137 break;
1138 }
1139 run->mmio.phys_addr =
1140 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1141 host_cp0_badvaddr);
1142 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1143 er = EMULATE_FAIL;
1144 break;
1145 }
1146
1147 run->mmio.len = bytes;
1148 run->mmio.is_write = 0;
1149 vcpu->mmio_needed = 1;
1150 vcpu->mmio_is_write = 0;
1151 break;
1152
1153 case lh_op:
1154 case lhu_op:
1155 bytes = 2;
1156 if (bytes > sizeof(run->mmio.data)) {
1157 kvm_err("%s: bad MMIO length: %d\n", __func__,
1158 run->mmio.len);
1159 er = EMULATE_FAIL;
1160 break;
1161 }
1162 run->mmio.phys_addr =
1163 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1164 host_cp0_badvaddr);
1165 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1166 er = EMULATE_FAIL;
1167 break;
1168 }
1169
1170 run->mmio.len = bytes;
1171 run->mmio.is_write = 0;
1172 vcpu->mmio_needed = 1;
1173 vcpu->mmio_is_write = 0;
1174
1175 if (op == lh_op)
1176 vcpu->mmio_needed = 2;
1177 else
1178 vcpu->mmio_needed = 1;
1179
1180 break;
1181
1182 case lbu_op:
1183 case lb_op:
1184 bytes = 1;
1185 if (bytes > sizeof(run->mmio.data)) {
1186 kvm_err("%s: bad MMIO length: %d\n", __func__,
1187 run->mmio.len);
1188 er = EMULATE_FAIL;
1189 break;
1190 }
1191 run->mmio.phys_addr =
1192 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1193 host_cp0_badvaddr);
1194 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1195 er = EMULATE_FAIL;
1196 break;
1197 }
1198
1199 run->mmio.len = bytes;
1200 run->mmio.is_write = 0;
1201 vcpu->mmio_is_write = 0;
1202
1203 if (op == lb_op)
1204 vcpu->mmio_needed = 2;
1205 else
1206 vcpu->mmio_needed = 1;
1207
1208 break;
1209
1210 default:
1211 printk("Load not yet supported");
1212 er = EMULATE_FAIL;
1213 break;
1214 }
1215
1216 return er;
1217}
1218
1219int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1220{
1221 unsigned long offset = (va & ~PAGE_MASK);
1222 struct kvm *kvm = vcpu->kvm;
1223 unsigned long pa;
1224 gfn_t gfn;
1225 pfn_t pfn;
1226
1227 gfn = va >> PAGE_SHIFT;
1228
1229 if (gfn >= kvm->arch.guest_pmap_npages) {
1230 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
1231 kvm_mips_dump_host_tlbs();
1232 kvm_arch_vcpu_dump_regs(vcpu);
1233 return -1;
1234 }
1235 pfn = kvm->arch.guest_pmap[gfn];
1236 pa = (pfn << PAGE_SHIFT) | offset;
1237
1238 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
1239
James Hoganfacaaec2014-05-29 10:16:25 +01001240 local_flush_icache_range(CKSEG0ADDR(pa), 32);
Sanjay Lale685c682012-11-21 18:34:04 -08001241 return 0;
1242}
1243
1244#define MIPS_CACHE_OP_INDEX_INV 0x0
1245#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1246#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1247#define MIPS_CACHE_OP_IMP 0x3
1248#define MIPS_CACHE_OP_HIT_INV 0x4
1249#define MIPS_CACHE_OP_FILL_WB_INV 0x5
1250#define MIPS_CACHE_OP_HIT_HB 0x6
1251#define MIPS_CACHE_OP_FETCH_LOCK 0x7
1252
1253#define MIPS_CACHE_ICACHE 0x0
1254#define MIPS_CACHE_DCACHE 0x1
1255#define MIPS_CACHE_SEC 0x3
1256
1257enum emulation_result
1258kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1259 struct kvm_run *run, struct kvm_vcpu *vcpu)
1260{
1261 struct mips_coproc *cop0 = vcpu->arch.cop0;
1262 extern void (*r4k_blast_dcache) (void);
1263 extern void (*r4k_blast_icache) (void);
1264 enum emulation_result er = EMULATE_DONE;
1265 int32_t offset, cache, op_inst, op, base;
1266 struct kvm_vcpu_arch *arch = &vcpu->arch;
1267 unsigned long va;
1268 unsigned long curr_pc;
1269
1270 /*
1271 * Update PC and hold onto current PC in case there is
1272 * an error and we want to rollback the PC
1273 */
1274 curr_pc = vcpu->arch.pc;
1275 er = update_pc(vcpu, cause);
1276 if (er == EMULATE_FAIL)
1277 return er;
1278
1279 base = (inst >> 21) & 0x1f;
1280 op_inst = (inst >> 16) & 0x1f;
1281 offset = inst & 0xffff;
1282 cache = (inst >> 16) & 0x3;
1283 op = (inst >> 18) & 0x7;
1284
1285 va = arch->gprs[base] + offset;
1286
1287 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1288 cache, op, base, arch->gprs[base], offset);
1289
1290 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
1291 * the caches entirely by stepping through all the ways/indexes
1292 */
1293 if (op == MIPS_CACHE_OP_INDEX_INV) {
1294 kvm_debug
1295 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1296 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1297 arch->gprs[base], offset);
1298
1299 if (cache == MIPS_CACHE_DCACHE)
1300 r4k_blast_dcache();
1301 else if (cache == MIPS_CACHE_ICACHE)
1302 r4k_blast_icache();
1303 else {
1304 printk("%s: unsupported CACHE INDEX operation\n",
1305 __func__);
1306 return EMULATE_FAIL;
1307 }
1308
1309#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1310 kvm_mips_trans_cache_index(inst, opc, vcpu);
1311#endif
1312 goto done;
1313 }
1314
1315 preempt_disable();
1316 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1317
1318 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1319 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1320 }
1321 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1322 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1323 int index;
1324
1325 /* If an entry already exists then skip */
1326 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
1327 goto skip_fault;
1328 }
1329
1330 /* If address not in the guest TLB, then give the guest a fault, the
1331 * resulting handler will do the right thing
1332 */
1333 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001334 (kvm_read_c0_guest_entryhi
1335 (cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08001336
1337 if (index < 0) {
1338 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1339 vcpu->arch.host_cp0_badvaddr = va;
1340 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1341 vcpu);
1342 preempt_enable();
1343 goto dont_update_pc;
1344 } else {
1345 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1346 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1347 if (!TLB_IS_VALID(*tlb, va)) {
1348 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1349 run, vcpu);
1350 preempt_enable();
1351 goto dont_update_pc;
1352 } else {
1353 /* We fault an entry from the guest tlb to the shadow host TLB */
1354 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1355 NULL,
1356 NULL);
1357 }
1358 }
1359 } else {
1360 printk
1361 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1362 cache, op, base, arch->gprs[base], offset);
1363 er = EMULATE_FAIL;
1364 preempt_enable();
1365 goto dont_update_pc;
1366
1367 }
1368
1369skip_fault:
1370 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1371 if (cache == MIPS_CACHE_DCACHE
1372 && (op == MIPS_CACHE_OP_FILL_WB_INV
1373 || op == MIPS_CACHE_OP_HIT_INV)) {
1374 flush_dcache_line(va);
1375
1376#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1377 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1378 kvm_mips_trans_cache_va(inst, opc, vcpu);
1379#endif
1380 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1381 flush_dcache_line(va);
1382 flush_icache_line(va);
1383
1384#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1385 /* Replace the CACHE instruction, with a SYNCI */
1386 kvm_mips_trans_cache_va(inst, opc, vcpu);
1387#endif
1388 } else {
1389 printk
1390 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1391 cache, op, base, arch->gprs[base], offset);
1392 er = EMULATE_FAIL;
1393 preempt_enable();
1394 goto dont_update_pc;
1395 }
1396
1397 preempt_enable();
1398
1399 dont_update_pc:
1400 /*
1401 * Rollback PC
1402 */
1403 vcpu->arch.pc = curr_pc;
1404 done:
1405 return er;
1406}
1407
1408enum emulation_result
1409kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1410 struct kvm_run *run, struct kvm_vcpu *vcpu)
1411{
1412 enum emulation_result er = EMULATE_DONE;
1413 uint32_t inst;
1414
1415 /*
1416 * Fetch the instruction.
1417 */
1418 if (cause & CAUSEF_BD) {
1419 opc += 1;
1420 }
1421
1422 inst = kvm_get_inst(opc, vcpu);
1423
1424 switch (((union mips_instruction)inst).r_format.opcode) {
1425 case cop0_op:
1426 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1427 break;
1428 case sb_op:
1429 case sh_op:
1430 case sw_op:
1431 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1432 break;
1433 case lb_op:
1434 case lbu_op:
1435 case lhu_op:
1436 case lh_op:
1437 case lw_op:
1438 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1439 break;
1440
1441 case cache_op:
1442 ++vcpu->stat.cache_exits;
1443 trace_kvm_exit(vcpu, CACHE_EXITS);
1444 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1445 break;
1446
1447 default:
1448 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1449 inst);
1450 kvm_arch_vcpu_dump_regs(vcpu);
1451 er = EMULATE_FAIL;
1452 break;
1453 }
1454
1455 return er;
1456}
1457
1458enum emulation_result
1459kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1460 struct kvm_run *run, struct kvm_vcpu *vcpu)
1461{
1462 struct mips_coproc *cop0 = vcpu->arch.cop0;
1463 struct kvm_vcpu_arch *arch = &vcpu->arch;
1464 enum emulation_result er = EMULATE_DONE;
1465
1466 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1467 /* save old pc */
1468 kvm_write_c0_guest_epc(cop0, arch->pc);
1469 kvm_set_c0_guest_status(cop0, ST0_EXL);
1470
1471 if (cause & CAUSEF_BD)
1472 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1473 else
1474 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1475
1476 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1477
1478 kvm_change_c0_guest_cause(cop0, (0xff),
1479 (T_SYSCALL << CAUSEB_EXCCODE));
1480
1481 /* Set PC to the exception entry point */
1482 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1483
1484 } else {
1485 printk("Trying to deliver SYSCALL when EXL is already set\n");
1486 er = EMULATE_FAIL;
1487 }
1488
1489 return er;
1490}
1491
1492enum emulation_result
1493kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1494 struct kvm_run *run, struct kvm_vcpu *vcpu)
1495{
1496 struct mips_coproc *cop0 = vcpu->arch.cop0;
1497 struct kvm_vcpu_arch *arch = &vcpu->arch;
1498 enum emulation_result er = EMULATE_DONE;
1499 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001500 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001501
1502 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1503 /* save old pc */
1504 kvm_write_c0_guest_epc(cop0, arch->pc);
1505 kvm_set_c0_guest_status(cop0, ST0_EXL);
1506
1507 if (cause & CAUSEF_BD)
1508 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1509 else
1510 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1511
1512 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1513 arch->pc);
1514
1515 /* set pc to the exception entry point */
1516 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1517
1518 } else {
1519 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1520 arch->pc);
1521
1522 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1523 }
1524
1525 kvm_change_c0_guest_cause(cop0, (0xff),
1526 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1527
1528 /* setup badvaddr, context and entryhi registers for the guest */
1529 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1530 /* XXXKYMA: is the context register used by linux??? */
1531 kvm_write_c0_guest_entryhi(cop0, entryhi);
1532 /* Blow away the shadow host TLBs */
1533 kvm_mips_flush_host_tlb(1);
1534
1535 return er;
1536}
1537
1538enum emulation_result
1539kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1540 struct kvm_run *run, struct kvm_vcpu *vcpu)
1541{
1542 struct mips_coproc *cop0 = vcpu->arch.cop0;
1543 struct kvm_vcpu_arch *arch = &vcpu->arch;
1544 enum emulation_result er = EMULATE_DONE;
1545 unsigned long entryhi =
1546 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001547 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001548
1549 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1550 /* save old pc */
1551 kvm_write_c0_guest_epc(cop0, arch->pc);
1552 kvm_set_c0_guest_status(cop0, ST0_EXL);
1553
1554 if (cause & CAUSEF_BD)
1555 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1556 else
1557 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1558
1559 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1560 arch->pc);
1561
1562 /* set pc to the exception entry point */
1563 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1564
1565 } else {
1566 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1567 arch->pc);
1568 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1569 }
1570
1571 kvm_change_c0_guest_cause(cop0, (0xff),
1572 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1573
1574 /* setup badvaddr, context and entryhi registers for the guest */
1575 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1576 /* XXXKYMA: is the context register used by linux??? */
1577 kvm_write_c0_guest_entryhi(cop0, entryhi);
1578 /* Blow away the shadow host TLBs */
1579 kvm_mips_flush_host_tlb(1);
1580
1581 return er;
1582}
1583
1584enum emulation_result
1585kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1586 struct kvm_run *run, struct kvm_vcpu *vcpu)
1587{
1588 struct mips_coproc *cop0 = vcpu->arch.cop0;
1589 struct kvm_vcpu_arch *arch = &vcpu->arch;
1590 enum emulation_result er = EMULATE_DONE;
1591 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001592 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001593
1594 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1595 /* save old pc */
1596 kvm_write_c0_guest_epc(cop0, arch->pc);
1597 kvm_set_c0_guest_status(cop0, ST0_EXL);
1598
1599 if (cause & CAUSEF_BD)
1600 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1601 else
1602 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1603
1604 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1605 arch->pc);
1606
1607 /* Set PC to the exception entry point */
1608 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1609 } else {
1610 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1611 arch->pc);
1612 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1613 }
1614
1615 kvm_change_c0_guest_cause(cop0, (0xff),
1616 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1617
1618 /* setup badvaddr, context and entryhi registers for the guest */
1619 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1620 /* XXXKYMA: is the context register used by linux??? */
1621 kvm_write_c0_guest_entryhi(cop0, entryhi);
1622 /* Blow away the shadow host TLBs */
1623 kvm_mips_flush_host_tlb(1);
1624
1625 return er;
1626}
1627
1628enum emulation_result
1629kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1630 struct kvm_run *run, struct kvm_vcpu *vcpu)
1631{
1632 struct mips_coproc *cop0 = vcpu->arch.cop0;
1633 struct kvm_vcpu_arch *arch = &vcpu->arch;
1634 enum emulation_result er = EMULATE_DONE;
1635 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001636 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001637
1638 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1639 /* save old pc */
1640 kvm_write_c0_guest_epc(cop0, arch->pc);
1641 kvm_set_c0_guest_status(cop0, ST0_EXL);
1642
1643 if (cause & CAUSEF_BD)
1644 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1645 else
1646 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1647
1648 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1649 arch->pc);
1650
1651 /* Set PC to the exception entry point */
1652 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1653 } else {
1654 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1655 arch->pc);
1656 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1657 }
1658
1659 kvm_change_c0_guest_cause(cop0, (0xff),
1660 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1661
1662 /* setup badvaddr, context and entryhi registers for the guest */
1663 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1664 /* XXXKYMA: is the context register used by linux??? */
1665 kvm_write_c0_guest_entryhi(cop0, entryhi);
1666 /* Blow away the shadow host TLBs */
1667 kvm_mips_flush_host_tlb(1);
1668
1669 return er;
1670}
1671
1672/* TLBMOD: store into address matching TLB with Dirty bit off */
1673enum emulation_result
1674kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1675 struct kvm_run *run, struct kvm_vcpu *vcpu)
1676{
1677 enum emulation_result er = EMULATE_DONE;
1678
1679#ifdef DEBUG
1680 /*
1681 * If address not in the guest TLB, then we are in trouble
1682 */
1683 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1684 if (index < 0) {
1685 /* XXXKYMA Invalidate and retry */
1686 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1687 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1688 __func__, entryhi);
1689 kvm_mips_dump_guest_tlbs(vcpu);
1690 kvm_mips_dump_host_tlbs();
1691 return EMULATE_FAIL;
1692 }
1693#endif
1694
1695 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1696 return er;
1697}
1698
1699enum emulation_result
1700kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1701 struct kvm_run *run, struct kvm_vcpu *vcpu)
1702{
1703 struct mips_coproc *cop0 = vcpu->arch.cop0;
1704 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001705 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001706 struct kvm_vcpu_arch *arch = &vcpu->arch;
1707 enum emulation_result er = EMULATE_DONE;
1708
1709 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1710 /* save old pc */
1711 kvm_write_c0_guest_epc(cop0, arch->pc);
1712 kvm_set_c0_guest_status(cop0, ST0_EXL);
1713
1714 if (cause & CAUSEF_BD)
1715 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1716 else
1717 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1718
1719 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1720 arch->pc);
1721
1722 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1723 } else {
1724 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1725 arch->pc);
1726 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1727 }
1728
1729 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1730
1731 /* setup badvaddr, context and entryhi registers for the guest */
1732 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1733 /* XXXKYMA: is the context register used by linux??? */
1734 kvm_write_c0_guest_entryhi(cop0, entryhi);
1735 /* Blow away the shadow host TLBs */
1736 kvm_mips_flush_host_tlb(1);
1737
1738 return er;
1739}
1740
1741enum emulation_result
1742kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1743 struct kvm_run *run, struct kvm_vcpu *vcpu)
1744{
1745 struct mips_coproc *cop0 = vcpu->arch.cop0;
1746 struct kvm_vcpu_arch *arch = &vcpu->arch;
1747 enum emulation_result er = EMULATE_DONE;
1748
1749 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1750 /* save old pc */
1751 kvm_write_c0_guest_epc(cop0, arch->pc);
1752 kvm_set_c0_guest_status(cop0, ST0_EXL);
1753
1754 if (cause & CAUSEF_BD)
1755 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1756 else
1757 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1758
1759 }
1760
1761 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1762
1763 kvm_change_c0_guest_cause(cop0, (0xff),
1764 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1765 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1766
1767 return er;
1768}
1769
1770enum emulation_result
1771kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1772 struct kvm_run *run, struct kvm_vcpu *vcpu)
1773{
1774 struct mips_coproc *cop0 = vcpu->arch.cop0;
1775 struct kvm_vcpu_arch *arch = &vcpu->arch;
1776 enum emulation_result er = EMULATE_DONE;
1777
1778 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1779 /* save old pc */
1780 kvm_write_c0_guest_epc(cop0, arch->pc);
1781 kvm_set_c0_guest_status(cop0, ST0_EXL);
1782
1783 if (cause & CAUSEF_BD)
1784 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1785 else
1786 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1787
1788 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1789
1790 kvm_change_c0_guest_cause(cop0, (0xff),
1791 (T_RES_INST << CAUSEB_EXCCODE));
1792
1793 /* Set PC to the exception entry point */
1794 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1795
1796 } else {
1797 kvm_err("Trying to deliver RI when EXL is already set\n");
1798 er = EMULATE_FAIL;
1799 }
1800
1801 return er;
1802}
1803
1804enum emulation_result
1805kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1806 struct kvm_run *run, struct kvm_vcpu *vcpu)
1807{
1808 struct mips_coproc *cop0 = vcpu->arch.cop0;
1809 struct kvm_vcpu_arch *arch = &vcpu->arch;
1810 enum emulation_result er = EMULATE_DONE;
1811
1812 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1813 /* save old pc */
1814 kvm_write_c0_guest_epc(cop0, arch->pc);
1815 kvm_set_c0_guest_status(cop0, ST0_EXL);
1816
1817 if (cause & CAUSEF_BD)
1818 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1819 else
1820 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1821
1822 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1823
1824 kvm_change_c0_guest_cause(cop0, (0xff),
1825 (T_BREAK << CAUSEB_EXCCODE));
1826
1827 /* Set PC to the exception entry point */
1828 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1829
1830 } else {
1831 printk("Trying to deliver BP when EXL is already set\n");
1832 er = EMULATE_FAIL;
1833 }
1834
1835 return er;
1836}
1837
1838/*
1839 * ll/sc, rdhwr, sync emulation
1840 */
1841
1842#define OPCODE 0xfc000000
1843#define BASE 0x03e00000
1844#define RT 0x001f0000
1845#define OFFSET 0x0000ffff
1846#define LL 0xc0000000
1847#define SC 0xe0000000
1848#define SPEC0 0x00000000
1849#define SPEC3 0x7c000000
1850#define RD 0x0000f800
1851#define FUNC 0x0000003f
1852#define SYNC 0x0000000f
1853#define RDHWR 0x0000003b
1854
1855enum emulation_result
1856kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1857 struct kvm_run *run, struct kvm_vcpu *vcpu)
1858{
1859 struct mips_coproc *cop0 = vcpu->arch.cop0;
1860 struct kvm_vcpu_arch *arch = &vcpu->arch;
1861 enum emulation_result er = EMULATE_DONE;
1862 unsigned long curr_pc;
1863 uint32_t inst;
1864
1865 /*
1866 * Update PC and hold onto current PC in case there is
1867 * an error and we want to rollback the PC
1868 */
1869 curr_pc = vcpu->arch.pc;
1870 er = update_pc(vcpu, cause);
1871 if (er == EMULATE_FAIL)
1872 return er;
1873
1874 /*
1875 * Fetch the instruction.
1876 */
1877 if (cause & CAUSEF_BD)
1878 opc += 1;
1879
1880 inst = kvm_get_inst(opc, vcpu);
1881
1882 if (inst == KVM_INVALID_INST) {
1883 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1884 return EMULATE_FAIL;
1885 }
1886
1887 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
James Hogan26f4f3b2014-03-14 13:06:09 +00001888 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001889 int rd = (inst & RD) >> 11;
1890 int rt = (inst & RT) >> 16;
James Hogan26f4f3b2014-03-14 13:06:09 +00001891 /* If usermode, check RDHWR rd is allowed by guest HWREna */
1892 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
1893 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
1894 rd, opc);
1895 goto emulate_ri;
1896 }
Sanjay Lale685c682012-11-21 18:34:04 -08001897 switch (rd) {
1898 case 0: /* CPU number */
1899 arch->gprs[rt] = 0;
1900 break;
1901 case 1: /* SYNCI length */
1902 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
1903 current_cpu_data.icache.linesz);
1904 break;
1905 case 2: /* Read count register */
James Hogane30492b2014-05-29 10:16:35 +01001906 arch->gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001907 break;
1908 case 3: /* Count register resolution */
1909 switch (current_cpu_data.cputype) {
1910 case CPU_20KC:
1911 case CPU_25KF:
1912 arch->gprs[rt] = 1;
1913 break;
1914 default:
1915 arch->gprs[rt] = 2;
1916 }
1917 break;
1918 case 29:
Sanjay Lale685c682012-11-21 18:34:04 -08001919 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -08001920 break;
1921
1922 default:
James Hogan15505672014-03-14 13:06:07 +00001923 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
James Hogan26f4f3b2014-03-14 13:06:09 +00001924 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08001925 }
1926 } else {
James Hogan15505672014-03-14 13:06:07 +00001927 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
James Hogan26f4f3b2014-03-14 13:06:09 +00001928 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08001929 }
1930
James Hogan26f4f3b2014-03-14 13:06:09 +00001931 return EMULATE_DONE;
1932
1933emulate_ri:
Sanjay Lale685c682012-11-21 18:34:04 -08001934 /*
James Hogan26f4f3b2014-03-14 13:06:09 +00001935 * Rollback PC (if in branch delay slot then the PC already points to
1936 * branch target), and pass the RI exception to the guest OS.
Sanjay Lale685c682012-11-21 18:34:04 -08001937 */
James Hogan26f4f3b2014-03-14 13:06:09 +00001938 vcpu->arch.pc = curr_pc;
1939 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001940}
1941
1942enum emulation_result
1943kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
1944{
1945 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
1946 enum emulation_result er = EMULATE_DONE;
1947 unsigned long curr_pc;
1948
1949 if (run->mmio.len > sizeof(*gpr)) {
1950 printk("Bad MMIO length: %d", run->mmio.len);
1951 er = EMULATE_FAIL;
1952 goto done;
1953 }
1954
1955 /*
1956 * Update PC and hold onto current PC in case there is
1957 * an error and we want to rollback the PC
1958 */
1959 curr_pc = vcpu->arch.pc;
1960 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
1961 if (er == EMULATE_FAIL)
1962 return er;
1963
1964 switch (run->mmio.len) {
1965 case 4:
1966 *gpr = *(int32_t *) run->mmio.data;
1967 break;
1968
1969 case 2:
1970 if (vcpu->mmio_needed == 2)
1971 *gpr = *(int16_t *) run->mmio.data;
1972 else
1973 *gpr = *(int16_t *) run->mmio.data;
1974
1975 break;
1976 case 1:
1977 if (vcpu->mmio_needed == 2)
1978 *gpr = *(int8_t *) run->mmio.data;
1979 else
1980 *gpr = *(u8 *) run->mmio.data;
1981 break;
1982 }
1983
1984 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
1985 kvm_debug
1986 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
1987 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
1988 vcpu->mmio_needed);
1989
1990done:
1991 return er;
1992}
1993
1994static enum emulation_result
1995kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
1996 struct kvm_run *run, struct kvm_vcpu *vcpu)
1997{
1998 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
1999 struct mips_coproc *cop0 = vcpu->arch.cop0;
2000 struct kvm_vcpu_arch *arch = &vcpu->arch;
2001 enum emulation_result er = EMULATE_DONE;
2002
2003 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2004 /* save old pc */
2005 kvm_write_c0_guest_epc(cop0, arch->pc);
2006 kvm_set_c0_guest_status(cop0, ST0_EXL);
2007
2008 if (cause & CAUSEF_BD)
2009 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2010 else
2011 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2012
2013 kvm_change_c0_guest_cause(cop0, (0xff),
2014 (exccode << CAUSEB_EXCCODE));
2015
2016 /* Set PC to the exception entry point */
2017 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2018 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2019
2020 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2021 exccode, kvm_read_c0_guest_epc(cop0),
2022 kvm_read_c0_guest_badvaddr(cop0));
2023 } else {
2024 printk("Trying to deliver EXC when EXL is already set\n");
2025 er = EMULATE_FAIL;
2026 }
2027
2028 return er;
2029}
2030
2031enum emulation_result
2032kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2033 struct kvm_run *run, struct kvm_vcpu *vcpu)
2034{
2035 enum emulation_result er = EMULATE_DONE;
2036 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2037 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2038
2039 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2040
2041 if (usermode) {
2042 switch (exccode) {
2043 case T_INT:
2044 case T_SYSCALL:
2045 case T_BREAK:
2046 case T_RES_INST:
2047 break;
2048
2049 case T_COP_UNUSABLE:
2050 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2051 er = EMULATE_PRIV_FAIL;
2052 break;
2053
2054 case T_TLB_MOD:
2055 break;
2056
2057 case T_TLB_LD_MISS:
2058 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2059 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2060 printk("%s: LD MISS @ %#lx\n", __func__,
2061 badvaddr);
2062 cause &= ~0xff;
2063 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2064 er = EMULATE_PRIV_FAIL;
2065 }
2066 break;
2067
2068 case T_TLB_ST_MISS:
2069 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2070 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2071 printk("%s: ST MISS @ %#lx\n", __func__,
2072 badvaddr);
2073 cause &= ~0xff;
2074 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2075 er = EMULATE_PRIV_FAIL;
2076 }
2077 break;
2078
2079 case T_ADDR_ERR_ST:
2080 printk("%s: address error ST @ %#lx\n", __func__,
2081 badvaddr);
2082 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2083 cause &= ~0xff;
2084 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2085 }
2086 er = EMULATE_PRIV_FAIL;
2087 break;
2088 case T_ADDR_ERR_LD:
2089 printk("%s: address error LD @ %#lx\n", __func__,
2090 badvaddr);
2091 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2092 cause &= ~0xff;
2093 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2094 }
2095 er = EMULATE_PRIV_FAIL;
2096 break;
2097 default:
2098 er = EMULATE_PRIV_FAIL;
2099 break;
2100 }
2101 }
2102
2103 if (er == EMULATE_PRIV_FAIL) {
2104 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2105 }
2106 return er;
2107}
2108
2109/* User Address (UA) fault, this could happen if
2110 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2111 * case we pass on the fault to the guest kernel and let it handle it.
2112 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2113 * case we inject the TLB from the Guest TLB into the shadow host TLB
2114 */
2115enum emulation_result
2116kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2117 struct kvm_run *run, struct kvm_vcpu *vcpu)
2118{
2119 enum emulation_result er = EMULATE_DONE;
2120 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2121 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2122 int index;
2123
2124 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2125 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2126
2127 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
2128 * Check the Guest TLB, if the entry is not there then send the guest an
2129 * exception. The guest exc handler should then inject an entry into the
2130 * guest TLB
2131 */
2132 index = kvm_mips_guest_tlb_lookup(vcpu,
2133 (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07002134 (kvm_read_c0_guest_entryhi
2135 (vcpu->arch.cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08002136 if (index < 0) {
2137 if (exccode == T_TLB_LD_MISS) {
2138 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2139 } else if (exccode == T_TLB_ST_MISS) {
2140 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2141 } else {
2142 printk("%s: invalid exc code: %d\n", __func__, exccode);
2143 er = EMULATE_FAIL;
2144 }
2145 } else {
2146 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2147
2148 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
2149 if (!TLB_IS_VALID(*tlb, va)) {
2150 if (exccode == T_TLB_LD_MISS) {
2151 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2152 vcpu);
2153 } else if (exccode == T_TLB_ST_MISS) {
2154 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2155 vcpu);
2156 } else {
2157 printk("%s: invalid exc code: %d\n", __func__,
2158 exccode);
2159 er = EMULATE_FAIL;
2160 }
2161 } else {
2162#ifdef DEBUG
2163 kvm_debug
2164 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2165 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2166#endif
2167 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
2168 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2169 NULL);
2170 }
2171 }
2172
2173 return er;
2174}