blob: 65c8dea6d1f5d43fa09892b050369799a4b6ada5 [file] [log] [blame]
Sanjay Lale685c682012-11-21 18:34:04 -08001/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS: Instruction/Exception emulation
7*
8* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9* Authors: Sanjay Lal <sanjayl@kymasys.com>
10*/
11
12#include <linux/errno.h>
13#include <linux/err.h>
James Hogane30492b2014-05-29 10:16:35 +010014#include <linux/ktime.h>
Sanjay Lale685c682012-11-21 18:34:04 -080015#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
19#include <linux/bootmem.h>
20#include <linux/random.h>
21#include <asm/page.h>
22#include <asm/cacheflush.h>
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#include "kvm_mips_opcode.h"
33#include "kvm_mips_int.h"
34#include "kvm_mips_comm.h"
35
36#include "trace.h"
37
38/*
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
41 */
42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44{
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
54 /*
55 * Read the instruction
56 */
57 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
58
59 if (insn.word == KVM_INVALID_INST)
60 return KVM_INVALID_INST;
61
62 switch (insn.i_format.opcode) {
63 /*
64 * jr and jalr are in r_format format.
65 */
66 case spec_op:
67 switch (insn.r_format.func) {
68 case jalr_op:
69 arch->gprs[insn.r_format.rd] = epc + 8;
70 /* Fall through */
71 case jr_op:
72 nextpc = arch->gprs[insn.r_format.rs];
73 break;
74 }
75 break;
76
77 /*
78 * This group contains:
79 * bltz_op, bgez_op, bltzl_op, bgezl_op,
80 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
81 */
82 case bcond_op:
83 switch (insn.i_format.rt) {
84 case bltz_op:
85 case bltzl_op:
86 if ((long)arch->gprs[insn.i_format.rs] < 0)
87 epc = epc + 4 + (insn.i_format.simmediate << 2);
88 else
89 epc += 8;
90 nextpc = epc;
91 break;
92
93 case bgez_op:
94 case bgezl_op:
95 if ((long)arch->gprs[insn.i_format.rs] >= 0)
96 epc = epc + 4 + (insn.i_format.simmediate << 2);
97 else
98 epc += 8;
99 nextpc = epc;
100 break;
101
102 case bltzal_op:
103 case bltzall_op:
104 arch->gprs[31] = epc + 8;
105 if ((long)arch->gprs[insn.i_format.rs] < 0)
106 epc = epc + 4 + (insn.i_format.simmediate << 2);
107 else
108 epc += 8;
109 nextpc = epc;
110 break;
111
112 case bgezal_op:
113 case bgezall_op:
114 arch->gprs[31] = epc + 8;
115 if ((long)arch->gprs[insn.i_format.rs] >= 0)
116 epc = epc + 4 + (insn.i_format.simmediate << 2);
117 else
118 epc += 8;
119 nextpc = epc;
120 break;
121 case bposge32_op:
122 if (!cpu_has_dsp)
123 goto sigill;
124
125 dspcontrol = rddsp(0x01);
126
127 if (dspcontrol >= 32) {
128 epc = epc + 4 + (insn.i_format.simmediate << 2);
129 } else
130 epc += 8;
131 nextpc = epc;
132 break;
133 }
134 break;
135
136 /*
137 * These are unconditional and in j_format.
138 */
139 case jal_op:
140 arch->gprs[31] = instpc + 8;
141 case j_op:
142 epc += 4;
143 epc >>= 28;
144 epc <<= 28;
145 epc |= (insn.j_format.target << 2);
146 nextpc = epc;
147 break;
148
149 /*
150 * These are conditional and in i_format.
151 */
152 case beq_op:
153 case beql_op:
154 if (arch->gprs[insn.i_format.rs] ==
155 arch->gprs[insn.i_format.rt])
156 epc = epc + 4 + (insn.i_format.simmediate << 2);
157 else
158 epc += 8;
159 nextpc = epc;
160 break;
161
162 case bne_op:
163 case bnel_op:
164 if (arch->gprs[insn.i_format.rs] !=
165 arch->gprs[insn.i_format.rt])
166 epc = epc + 4 + (insn.i_format.simmediate << 2);
167 else
168 epc += 8;
169 nextpc = epc;
170 break;
171
172 case blez_op: /* not really i_format */
173 case blezl_op:
174 /* rt field assumed to be zero */
175 if ((long)arch->gprs[insn.i_format.rs] <= 0)
176 epc = epc + 4 + (insn.i_format.simmediate << 2);
177 else
178 epc += 8;
179 nextpc = epc;
180 break;
181
182 case bgtz_op:
183 case bgtzl_op:
184 /* rt field assumed to be zero */
185 if ((long)arch->gprs[insn.i_format.rs] > 0)
186 epc = epc + 4 + (insn.i_format.simmediate << 2);
187 else
188 epc += 8;
189 nextpc = epc;
190 break;
191
192 /*
193 * And now the FPA/cp1 branch instructions.
194 */
195 case cop1_op:
196 printk("%s: unsupported cop1_op\n", __func__);
197 break;
198 }
199
200 return nextpc;
201
202unaligned:
203 printk("%s: unaligned epc\n", __func__);
204 return nextpc;
205
206sigill:
207 printk("%s: DSP branch but not DSP ASE\n", __func__);
208 return nextpc;
209}
210
211enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
212{
213 unsigned long branch_pc;
214 enum emulation_result er = EMULATE_DONE;
215
216 if (cause & CAUSEF_BD) {
217 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
218 if (branch_pc == KVM_INVALID_INST) {
219 er = EMULATE_FAIL;
220 } else {
221 vcpu->arch.pc = branch_pc;
222 kvm_debug("BD update_pc(): New PC: %#lx\n", vcpu->arch.pc);
223 }
224 } else
225 vcpu->arch.pc += 4;
226
227 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
228
229 return er;
230}
231
James Hogane30492b2014-05-29 10:16:35 +0100232/**
233 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
234 * @vcpu: Virtual CPU.
Sanjay Lale685c682012-11-21 18:34:04 -0800235 *
James Hoganf8239342014-05-29 10:16:37 +0100236 * Returns: 1 if the CP0_Count timer is disabled by either the guest
237 * CP0_Cause.DC bit or the count_ctl.DC bit.
James Hogane30492b2014-05-29 10:16:35 +0100238 * 0 otherwise (in which case CP0_Count timer is running).
Sanjay Lale685c682012-11-21 18:34:04 -0800239 */
James Hogane30492b2014-05-29 10:16:35 +0100240static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800241{
242 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hoganf8239342014-05-29 10:16:37 +0100243 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
244 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
James Hogane30492b2014-05-29 10:16:35 +0100245}
Sanjay Lale685c682012-11-21 18:34:04 -0800246
James Hogane30492b2014-05-29 10:16:35 +0100247/**
248 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
249 *
250 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
251 *
252 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
253 */
254static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
255{
256 s64 now_ns, periods;
257 u64 delta;
258
259 now_ns = ktime_to_ns(now);
260 delta = now_ns + vcpu->arch.count_dyn_bias;
261
262 if (delta >= vcpu->arch.count_period) {
263 /* If delta is out of safe range the bias needs adjusting */
264 periods = div64_s64(now_ns, vcpu->arch.count_period);
265 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
266 /* Recalculate delta with new bias */
267 delta = now_ns + vcpu->arch.count_dyn_bias;
Sanjay Lale685c682012-11-21 18:34:04 -0800268 }
269
James Hogane30492b2014-05-29 10:16:35 +0100270 /*
271 * We've ensured that:
272 * delta < count_period
273 *
274 * Therefore the intermediate delta*count_hz will never overflow since
275 * at the boundary condition:
276 * delta = count_period
277 * delta = NSEC_PER_SEC * 2^32 / count_hz
278 * delta * count_hz = NSEC_PER_SEC * 2^32
279 */
280 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
281}
282
283/**
James Hoganf8239342014-05-29 10:16:37 +0100284 * kvm_mips_count_time() - Get effective current time.
285 * @vcpu: Virtual CPU.
286 *
287 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
288 * except when the master disable bit is set in count_ctl, in which case it is
289 * count_resume, i.e. the time that the count was disabled.
290 *
291 * Returns: Effective monotonic ktime for CP0_Count.
292 */
293static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
294{
295 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
296 return vcpu->arch.count_resume;
297
298 return ktime_get();
299}
300
301/**
James Hogane30492b2014-05-29 10:16:35 +0100302 * kvm_mips_read_count_running() - Read the current count value as if running.
303 * @vcpu: Virtual CPU.
304 * @now: Kernel time to read CP0_Count at.
305 *
306 * Returns the current guest CP0_Count register at time @now and handles if the
307 * timer interrupt is pending and hasn't been handled yet.
308 *
309 * Returns: The current value of the guest CP0_Count register.
310 */
311static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
312{
313 ktime_t expires;
314 int running;
315
316 /* Is the hrtimer pending? */
317 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
318 if (ktime_compare(now, expires) >= 0) {
319 /*
320 * Cancel it while we handle it so there's no chance of
321 * interference with the timeout handler.
322 */
323 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
324
325 /* Nothing should be waiting on the timeout */
326 kvm_mips_callbacks->queue_timer_int(vcpu);
327
328 /*
329 * Restart the timer if it was running based on the expiry time
330 * we read, so that we don't push it back 2 periods.
331 */
332 if (running) {
333 expires = ktime_add_ns(expires,
334 vcpu->arch.count_period);
335 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
336 HRTIMER_MODE_ABS);
337 }
338 }
339
340 /* Return the biased and scaled guest CP0_Count */
341 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
342}
343
344/**
345 * kvm_mips_read_count() - Read the current count value.
346 * @vcpu: Virtual CPU.
347 *
348 * Read the current guest CP0_Count value, taking into account whether the timer
349 * is stopped.
350 *
351 * Returns: The current guest CP0_Count value.
352 */
353uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
354{
355 struct mips_coproc *cop0 = vcpu->arch.cop0;
356
357 /* If count disabled just read static copy of count */
358 if (kvm_mips_count_disabled(vcpu))
359 return kvm_read_c0_guest_count(cop0);
360
361 return kvm_mips_read_count_running(vcpu, ktime_get());
362}
363
364/**
365 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
366 * @vcpu: Virtual CPU.
367 * @count: Output pointer for CP0_Count value at point of freeze.
368 *
369 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
370 * at the point it was frozen. It is guaranteed that any pending interrupts at
371 * the point it was frozen are handled, and none after that point.
372 *
373 * This is useful where the time/CP0_Count is needed in the calculation of the
374 * new parameters.
375 *
376 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
377 *
378 * Returns: The ktime at the point of freeze.
379 */
380static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
381 uint32_t *count)
382{
383 ktime_t now;
384
385 /* stop hrtimer before finding time */
386 hrtimer_cancel(&vcpu->arch.comparecount_timer);
387 now = ktime_get();
388
389 /* find count at this point and handle pending hrtimer */
390 *count = kvm_mips_read_count_running(vcpu, now);
391
392 return now;
393}
394
395
396/**
397 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
398 * @vcpu: Virtual CPU.
399 * @now: ktime at point of resume.
400 * @count: CP0_Count at point of resume.
401 *
402 * Resumes the timer and updates the timer expiry based on @now and @count.
403 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
404 * parameters need to be changed.
405 *
406 * It is guaranteed that a timer interrupt immediately after resume will be
407 * handled, but not if CP_Compare is exactly at @count. That case is already
408 * handled by kvm_mips_freeze_timer().
409 *
410 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
411 */
412static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
413 ktime_t now, uint32_t count)
414{
415 struct mips_coproc *cop0 = vcpu->arch.cop0;
416 uint32_t compare;
417 u64 delta;
418 ktime_t expire;
419
420 /* Calculate timeout (wrap 0 to 2^32) */
421 compare = kvm_read_c0_guest_compare(cop0);
422 delta = (u64)(uint32_t)(compare - count - 1) + 1;
423 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
424 expire = ktime_add_ns(now, delta);
425
426 /* Update hrtimer to use new timeout */
427 hrtimer_cancel(&vcpu->arch.comparecount_timer);
428 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
429}
430
431/**
432 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
433 * @vcpu: Virtual CPU.
434 *
435 * Recalculates and updates the expiry time of the hrtimer. This can be used
436 * after timer parameters have been altered which do not depend on the time that
437 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
438 * kvm_mips_resume_hrtimer() are used directly).
439 *
440 * It is guaranteed that no timer interrupts will be lost in the process.
441 *
442 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
443 */
444static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
445{
446 ktime_t now;
447 uint32_t count;
448
449 /*
450 * freeze_hrtimer takes care of a timer interrupts <= count, and
451 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
452 */
453 now = kvm_mips_freeze_hrtimer(vcpu, &count);
454 kvm_mips_resume_hrtimer(vcpu, now, count);
455}
456
457/**
458 * kvm_mips_write_count() - Modify the count and update timer.
459 * @vcpu: Virtual CPU.
460 * @count: Guest CP0_Count value to set.
461 *
462 * Sets the CP0_Count value and updates the timer accordingly.
463 */
464void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
465{
466 struct mips_coproc *cop0 = vcpu->arch.cop0;
467 ktime_t now;
468
469 /* Calculate bias */
James Hoganf8239342014-05-29 10:16:37 +0100470 now = kvm_mips_count_time(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100471 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
472
473 if (kvm_mips_count_disabled(vcpu))
474 /* The timer's disabled, adjust the static count */
475 kvm_write_c0_guest_count(cop0, count);
476 else
477 /* Update timeout */
478 kvm_mips_resume_hrtimer(vcpu, now, count);
479}
480
481/**
482 * kvm_mips_init_count() - Initialise timer.
483 * @vcpu: Virtual CPU.
484 *
485 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
486 * it going if it's enabled.
487 */
488void kvm_mips_init_count(struct kvm_vcpu *vcpu)
489{
490 /* 100 MHz */
491 vcpu->arch.count_hz = 100*1000*1000;
492 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
493 vcpu->arch.count_hz);
494 vcpu->arch.count_dyn_bias = 0;
495
496 /* Starting at 0 */
497 kvm_mips_write_count(vcpu, 0);
498}
499
500/**
501 * kvm_mips_write_compare() - Modify compare and update timer.
502 * @vcpu: Virtual CPU.
503 * @compare: New CP0_Compare value.
504 *
505 * Update CP0_Compare to a new value and update the timeout.
506 */
507void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
508{
509 struct mips_coproc *cop0 = vcpu->arch.cop0;
510
511 /* if unchanged, must just be an ack */
512 if (kvm_read_c0_guest_compare(cop0) == compare)
513 return;
514
515 /* Update compare */
516 kvm_write_c0_guest_compare(cop0, compare);
517
518 /* Update timeout if count enabled */
519 if (!kvm_mips_count_disabled(vcpu))
520 kvm_mips_update_hrtimer(vcpu);
521}
522
523/**
524 * kvm_mips_count_disable() - Disable count.
525 * @vcpu: Virtual CPU.
526 *
527 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
528 * time will be handled but not after.
529 *
James Hoganf8239342014-05-29 10:16:37 +0100530 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
531 * count_ctl.DC has been set (count disabled).
James Hogane30492b2014-05-29 10:16:35 +0100532 *
533 * Returns: The time that the timer was stopped.
534 */
535static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
536{
537 struct mips_coproc *cop0 = vcpu->arch.cop0;
538 uint32_t count;
539 ktime_t now;
540
541 /* Stop hrtimer */
542 hrtimer_cancel(&vcpu->arch.comparecount_timer);
543
544 /* Set the static count from the dynamic count, handling pending TI */
545 now = ktime_get();
546 count = kvm_mips_read_count_running(vcpu, now);
547 kvm_write_c0_guest_count(cop0, count);
548
549 return now;
550}
551
552/**
553 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
554 * @vcpu: Virtual CPU.
555 *
556 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
James Hoganf8239342014-05-29 10:16:37 +0100557 * before the final stop time will be handled if the timer isn't disabled by
558 * count_ctl.DC, but not after.
James Hogane30492b2014-05-29 10:16:35 +0100559 *
560 * Assumes CP0_Cause.DC is clear (count enabled).
561 */
562void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
563{
564 struct mips_coproc *cop0 = vcpu->arch.cop0;
565
566 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
James Hoganf8239342014-05-29 10:16:37 +0100567 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
568 kvm_mips_count_disable(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100569}
570
571/**
572 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
573 * @vcpu: Virtual CPU.
574 *
575 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
James Hoganf8239342014-05-29 10:16:37 +0100576 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
577 * potentially before even returning, so the caller should be careful with
578 * ordering of CP0_Cause modifications so as not to lose it.
James Hogane30492b2014-05-29 10:16:35 +0100579 *
580 * Assumes CP0_Cause.DC is set (count disabled).
581 */
582void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
583{
584 struct mips_coproc *cop0 = vcpu->arch.cop0;
585 uint32_t count;
586
587 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
588
589 /*
590 * Set the dynamic count to match the static count.
James Hoganf8239342014-05-29 10:16:37 +0100591 * This starts the hrtimer if count_ctl.DC allows it.
592 * Otherwise it conveniently updates the biases.
James Hogane30492b2014-05-29 10:16:35 +0100593 */
594 count = kvm_read_c0_guest_count(cop0);
595 kvm_mips_write_count(vcpu, count);
596}
597
598/**
James Hoganf8239342014-05-29 10:16:37 +0100599 * kvm_mips_set_count_ctl() - Update the count control KVM register.
600 * @vcpu: Virtual CPU.
601 * @count_ctl: Count control register new value.
602 *
603 * Set the count control KVM register. The timer is updated accordingly.
604 *
605 * Returns: -EINVAL if reserved bits are set.
606 * 0 on success.
607 */
608int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
609{
610 struct mips_coproc *cop0 = vcpu->arch.cop0;
611 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
612 s64 delta;
613 ktime_t expire, now;
614 uint32_t count, compare;
615
616 /* Only allow defined bits to be changed */
617 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
618 return -EINVAL;
619
620 /* Apply new value */
621 vcpu->arch.count_ctl = count_ctl;
622
623 /* Master CP0_Count disable */
624 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
625 /* Is CP0_Cause.DC already disabling CP0_Count? */
626 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
627 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
628 /* Just record the current time */
629 vcpu->arch.count_resume = ktime_get();
630 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
631 /* disable timer and record current time */
632 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
633 } else {
634 /*
635 * Calculate timeout relative to static count at resume
636 * time (wrap 0 to 2^32).
637 */
638 count = kvm_read_c0_guest_count(cop0);
639 compare = kvm_read_c0_guest_compare(cop0);
640 delta = (u64)(uint32_t)(compare - count - 1) + 1;
641 delta = div_u64(delta * NSEC_PER_SEC,
642 vcpu->arch.count_hz);
643 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
644
645 /* Handle pending interrupt */
646 now = ktime_get();
647 if (ktime_compare(now, expire) >= 0)
648 /* Nothing should be waiting on the timeout */
649 kvm_mips_callbacks->queue_timer_int(vcpu);
650
651 /* Resume hrtimer without changing bias */
652 count = kvm_mips_read_count_running(vcpu, now);
653 kvm_mips_resume_hrtimer(vcpu, now, count);
654 }
655 }
656
657 return 0;
658}
659
660/**
661 * kvm_mips_set_count_resume() - Update the count resume KVM register.
662 * @vcpu: Virtual CPU.
663 * @count_resume: Count resume register new value.
664 *
665 * Set the count resume KVM register.
666 *
667 * Returns: -EINVAL if out of valid range (0..now).
668 * 0 on success.
669 */
670int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
671{
672 /*
673 * It doesn't make sense for the resume time to be in the future, as it
674 * would be possible for the next interrupt to be more than a full
675 * period in the future.
676 */
677 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
678 return -EINVAL;
679
680 vcpu->arch.count_resume = ns_to_ktime(count_resume);
681 return 0;
682}
683
684/**
James Hogane30492b2014-05-29 10:16:35 +0100685 * kvm_mips_count_timeout() - Push timer forward on timeout.
686 * @vcpu: Virtual CPU.
687 *
688 * Handle an hrtimer event by push the hrtimer forward a period.
689 *
690 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
691 */
692enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
693{
694 /* Add the Count period to the current expiry time */
695 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
696 vcpu->arch.count_period);
697 return HRTIMER_RESTART;
Sanjay Lale685c682012-11-21 18:34:04 -0800698}
699
700enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
701{
702 struct mips_coproc *cop0 = vcpu->arch.cop0;
703 enum emulation_result er = EMULATE_DONE;
704
705 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
706 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
707 kvm_read_c0_guest_epc(cop0));
708 kvm_clear_c0_guest_status(cop0, ST0_EXL);
709 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
710
711 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
712 kvm_clear_c0_guest_status(cop0, ST0_ERL);
713 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
714 } else {
715 printk("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
716 vcpu->arch.pc);
717 er = EMULATE_FAIL;
718 }
719
720 return er;
721}
722
723enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
724{
725 enum emulation_result er = EMULATE_DONE;
726
727 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
728 vcpu->arch.pending_exceptions);
729
730 ++vcpu->stat.wait_exits;
731 trace_kvm_exit(vcpu, WAIT_EXITS);
732 if (!vcpu->arch.pending_exceptions) {
733 vcpu->arch.wait = 1;
734 kvm_vcpu_block(vcpu);
735
736 /* We we are runnable, then definitely go off to user space to check if any
737 * I/O interrupts are pending.
738 */
739 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
740 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
741 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
742 }
743 }
744
745 return er;
746}
747
748/* XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that we can catch
749 * this, if things ever change
750 */
751enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
752{
753 struct mips_coproc *cop0 = vcpu->arch.cop0;
754 enum emulation_result er = EMULATE_FAIL;
755 uint32_t pc = vcpu->arch.pc;
756
757 printk("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
758 return er;
759}
760
761/* Write Guest TLB Entry @ Index */
762enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
763{
764 struct mips_coproc *cop0 = vcpu->arch.cop0;
765 int index = kvm_read_c0_guest_index(cop0);
766 enum emulation_result er = EMULATE_DONE;
767 struct kvm_mips_tlb *tlb = NULL;
768 uint32_t pc = vcpu->arch.pc;
769
770 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
771 printk("%s: illegal index: %d\n", __func__, index);
772 printk
773 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
774 pc, index, kvm_read_c0_guest_entryhi(cop0),
775 kvm_read_c0_guest_entrylo0(cop0),
776 kvm_read_c0_guest_entrylo1(cop0),
777 kvm_read_c0_guest_pagemask(cop0));
778 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
779 }
780
781 tlb = &vcpu->arch.guest_tlb[index];
782#if 1
783 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
784 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
785#endif
786
787 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
788 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
789 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
790 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
791
792 kvm_debug
793 ("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
794 pc, index, kvm_read_c0_guest_entryhi(cop0),
795 kvm_read_c0_guest_entrylo0(cop0), kvm_read_c0_guest_entrylo1(cop0),
796 kvm_read_c0_guest_pagemask(cop0));
797
798 return er;
799}
800
801/* Write Guest TLB Entry @ Random Index */
802enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
803{
804 struct mips_coproc *cop0 = vcpu->arch.cop0;
805 enum emulation_result er = EMULATE_DONE;
806 struct kvm_mips_tlb *tlb = NULL;
807 uint32_t pc = vcpu->arch.pc;
808 int index;
809
810#if 1
811 get_random_bytes(&index, sizeof(index));
812 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
813#else
814 index = jiffies % KVM_MIPS_GUEST_TLB_SIZE;
815#endif
816
817 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
818 printk("%s: illegal index: %d\n", __func__, index);
819 return EMULATE_FAIL;
820 }
821
822 tlb = &vcpu->arch.guest_tlb[index];
823
824#if 1
825 /* Probe the shadow host TLB for the entry being overwritten, if one matches, invalidate it */
826 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
827#endif
828
829 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
830 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
831 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
832 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
833
834 kvm_debug
835 ("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
836 pc, index, kvm_read_c0_guest_entryhi(cop0),
837 kvm_read_c0_guest_entrylo0(cop0),
838 kvm_read_c0_guest_entrylo1(cop0));
839
840 return er;
841}
842
843enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
844{
845 struct mips_coproc *cop0 = vcpu->arch.cop0;
846 long entryhi = kvm_read_c0_guest_entryhi(cop0);
847 enum emulation_result er = EMULATE_DONE;
848 uint32_t pc = vcpu->arch.pc;
849 int index = -1;
850
851 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
852
853 kvm_write_c0_guest_index(cop0, index);
854
855 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
856 index);
857
858 return er;
859}
860
861enum emulation_result
862kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc, uint32_t cause,
863 struct kvm_run *run, struct kvm_vcpu *vcpu)
864{
865 struct mips_coproc *cop0 = vcpu->arch.cop0;
866 enum emulation_result er = EMULATE_DONE;
867 int32_t rt, rd, copz, sel, co_bit, op;
868 uint32_t pc = vcpu->arch.pc;
869 unsigned long curr_pc;
870
871 /*
872 * Update PC and hold onto current PC in case there is
873 * an error and we want to rollback the PC
874 */
875 curr_pc = vcpu->arch.pc;
876 er = update_pc(vcpu, cause);
877 if (er == EMULATE_FAIL) {
878 return er;
879 }
880
881 copz = (inst >> 21) & 0x1f;
882 rt = (inst >> 16) & 0x1f;
883 rd = (inst >> 11) & 0x1f;
884 sel = inst & 0x7;
885 co_bit = (inst >> 25) & 1;
886
Sanjay Lale685c682012-11-21 18:34:04 -0800887 if (co_bit) {
888 op = (inst) & 0xff;
889
890 switch (op) {
891 case tlbr_op: /* Read indexed TLB entry */
892 er = kvm_mips_emul_tlbr(vcpu);
893 break;
894 case tlbwi_op: /* Write indexed */
895 er = kvm_mips_emul_tlbwi(vcpu);
896 break;
897 case tlbwr_op: /* Write random */
898 er = kvm_mips_emul_tlbwr(vcpu);
899 break;
900 case tlbp_op: /* TLB Probe */
901 er = kvm_mips_emul_tlbp(vcpu);
902 break;
903 case rfe_op:
904 printk("!!!COP0_RFE!!!\n");
905 break;
906 case eret_op:
907 er = kvm_mips_emul_eret(vcpu);
908 goto dont_update_pc;
909 break;
910 case wait_op:
911 er = kvm_mips_emul_wait(vcpu);
912 break;
913 }
914 } else {
915 switch (copz) {
916 case mfc_op:
917#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
918 cop0->stat[rd][sel]++;
919#endif
920 /* Get reg */
921 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +0100922 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -0800923 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
924 vcpu->arch.gprs[rt] = 0x0;
925#ifdef CONFIG_KVM_MIPS_DYN_TRANS
926 kvm_mips_trans_mfc0(inst, opc, vcpu);
927#endif
928 }
929 else {
930 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
931
932#ifdef CONFIG_KVM_MIPS_DYN_TRANS
933 kvm_mips_trans_mfc0(inst, opc, vcpu);
934#endif
935 }
936
937 kvm_debug
938 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
939 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
940
941 break;
942
943 case dmfc_op:
944 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
945 break;
946
947 case mtc_op:
948#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
949 cop0->stat[rd][sel]++;
950#endif
951 if ((rd == MIPS_CP0_TLB_INDEX)
952 && (vcpu->arch.gprs[rt] >=
953 KVM_MIPS_GUEST_TLB_SIZE)) {
954 printk("Invalid TLB Index: %ld",
955 vcpu->arch.gprs[rt]);
956 er = EMULATE_FAIL;
957 break;
958 }
959#define C0_EBASE_CORE_MASK 0xff
960 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
961 /* Preserve CORE number */
962 kvm_change_c0_guest_ebase(cop0,
963 ~(C0_EBASE_CORE_MASK),
964 vcpu->arch.gprs[rt]);
965 printk("MTCz, cop0->reg[EBASE]: %#lx\n",
966 kvm_read_c0_guest_ebase(cop0));
967 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
David Daney48c4ac92013-05-13 13:56:44 -0700968 uint32_t nasid =
969 vcpu->arch.gprs[rt] & ASID_MASK;
Sanjay Lale685c682012-11-21 18:34:04 -0800970 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0)
971 &&
David Daney48c4ac92013-05-13 13:56:44 -0700972 ((kvm_read_c0_guest_entryhi(cop0) &
973 ASID_MASK) != nasid)) {
Sanjay Lale685c682012-11-21 18:34:04 -0800974
975 kvm_debug
976 ("MTCz, change ASID from %#lx to %#lx\n",
David Daney48c4ac92013-05-13 13:56:44 -0700977 kvm_read_c0_guest_entryhi(cop0) &
978 ASID_MASK,
979 vcpu->arch.gprs[rt] & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -0800980
981 /* Blow away the shadow host TLBs */
982 kvm_mips_flush_host_tlb(1);
983 }
984 kvm_write_c0_guest_entryhi(cop0,
985 vcpu->arch.gprs[rt]);
986 }
987 /* Are we writing to COUNT */
988 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +0100989 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -0800990 goto done;
991 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
992 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
993 pc, kvm_read_c0_guest_compare(cop0),
994 vcpu->arch.gprs[rt]);
995
996 /* If we are writing to COMPARE */
997 /* Clear pending timer interrupt, if any */
998 kvm_mips_callbacks->dequeue_timer_int(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100999 kvm_mips_write_compare(vcpu,
1000 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001001 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1002 kvm_write_c0_guest_status(cop0,
1003 vcpu->arch.gprs[rt]);
1004 /* Make sure that CU1 and NMI bits are never set */
1005 kvm_clear_c0_guest_status(cop0,
1006 (ST0_CU1 | ST0_NMI));
1007
1008#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1009 kvm_mips_trans_mtc0(inst, opc, vcpu);
1010#endif
James Hogane30492b2014-05-29 10:16:35 +01001011 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1012 uint32_t old_cause, new_cause;
1013 old_cause = kvm_read_c0_guest_cause(cop0);
1014 new_cause = vcpu->arch.gprs[rt];
1015 /* Update R/W bits */
1016 kvm_change_c0_guest_cause(cop0, 0x08800300,
1017 new_cause);
1018 /* DC bit enabling/disabling timer? */
1019 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1020 if (new_cause & CAUSEF_DC)
1021 kvm_mips_count_disable_cause(vcpu);
1022 else
1023 kvm_mips_count_enable_cause(vcpu);
1024 }
Sanjay Lale685c682012-11-21 18:34:04 -08001025 } else {
1026 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1027#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1028 kvm_mips_trans_mtc0(inst, opc, vcpu);
1029#endif
1030 }
1031
1032 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1033 rd, sel, cop0->reg[rd][sel]);
1034 break;
1035
1036 case dmtc_op:
1037 printk
1038 ("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1039 vcpu->arch.pc, rt, rd, sel);
1040 er = EMULATE_FAIL;
1041 break;
1042
1043 case mfmcz_op:
1044#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1045 cop0->stat[MIPS_CP0_STATUS][0]++;
1046#endif
1047 if (rt != 0) {
1048 vcpu->arch.gprs[rt] =
1049 kvm_read_c0_guest_status(cop0);
1050 }
1051 /* EI */
1052 if (inst & 0x20) {
1053 kvm_debug("[%#lx] mfmcz_op: EI\n",
1054 vcpu->arch.pc);
1055 kvm_set_c0_guest_status(cop0, ST0_IE);
1056 } else {
1057 kvm_debug("[%#lx] mfmcz_op: DI\n",
1058 vcpu->arch.pc);
1059 kvm_clear_c0_guest_status(cop0, ST0_IE);
1060 }
1061
1062 break;
1063
1064 case wrpgpr_op:
1065 {
1066 uint32_t css =
1067 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1068 uint32_t pss =
1069 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
1070 /* We don't support any shadow register sets, so SRSCtl[PSS] == SRSCtl[CSS] = 0 */
1071 if (css || pss) {
1072 er = EMULATE_FAIL;
1073 break;
1074 }
1075 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1076 vcpu->arch.gprs[rt]);
1077 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1078 }
1079 break;
1080 default:
1081 printk
1082 ("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1083 vcpu->arch.pc, copz);
1084 er = EMULATE_FAIL;
1085 break;
1086 }
1087 }
1088
1089done:
1090 /*
1091 * Rollback PC only if emulation was unsuccessful
1092 */
1093 if (er == EMULATE_FAIL) {
1094 vcpu->arch.pc = curr_pc;
1095 }
1096
1097dont_update_pc:
1098 /*
1099 * This is for special instructions whose emulation
1100 * updates the PC, so do not overwrite the PC under
1101 * any circumstances
1102 */
1103
1104 return er;
1105}
1106
1107enum emulation_result
1108kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1109 struct kvm_run *run, struct kvm_vcpu *vcpu)
1110{
1111 enum emulation_result er = EMULATE_DO_MMIO;
1112 int32_t op, base, rt, offset;
1113 uint32_t bytes;
1114 void *data = run->mmio.data;
1115 unsigned long curr_pc;
1116
1117 /*
1118 * Update PC and hold onto current PC in case there is
1119 * an error and we want to rollback the PC
1120 */
1121 curr_pc = vcpu->arch.pc;
1122 er = update_pc(vcpu, cause);
1123 if (er == EMULATE_FAIL)
1124 return er;
1125
1126 rt = (inst >> 16) & 0x1f;
1127 base = (inst >> 21) & 0x1f;
1128 offset = inst & 0xffff;
1129 op = (inst >> 26) & 0x3f;
1130
1131 switch (op) {
1132 case sb_op:
1133 bytes = 1;
1134 if (bytes > sizeof(run->mmio.data)) {
1135 kvm_err("%s: bad MMIO length: %d\n", __func__,
1136 run->mmio.len);
1137 }
1138 run->mmio.phys_addr =
1139 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1140 host_cp0_badvaddr);
1141 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1142 er = EMULATE_FAIL;
1143 break;
1144 }
1145 run->mmio.len = bytes;
1146 run->mmio.is_write = 1;
1147 vcpu->mmio_needed = 1;
1148 vcpu->mmio_is_write = 1;
1149 *(u8 *) data = vcpu->arch.gprs[rt];
1150 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1151 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1152 *(uint8_t *) data);
1153
1154 break;
1155
1156 case sw_op:
1157 bytes = 4;
1158 if (bytes > sizeof(run->mmio.data)) {
1159 kvm_err("%s: bad MMIO length: %d\n", __func__,
1160 run->mmio.len);
1161 }
1162 run->mmio.phys_addr =
1163 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1164 host_cp0_badvaddr);
1165 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1166 er = EMULATE_FAIL;
1167 break;
1168 }
1169
1170 run->mmio.len = bytes;
1171 run->mmio.is_write = 1;
1172 vcpu->mmio_needed = 1;
1173 vcpu->mmio_is_write = 1;
1174 *(uint32_t *) data = vcpu->arch.gprs[rt];
1175
1176 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1177 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1178 vcpu->arch.gprs[rt], *(uint32_t *) data);
1179 break;
1180
1181 case sh_op:
1182 bytes = 2;
1183 if (bytes > sizeof(run->mmio.data)) {
1184 kvm_err("%s: bad MMIO length: %d\n", __func__,
1185 run->mmio.len);
1186 }
1187 run->mmio.phys_addr =
1188 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1189 host_cp0_badvaddr);
1190 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1191 er = EMULATE_FAIL;
1192 break;
1193 }
1194
1195 run->mmio.len = bytes;
1196 run->mmio.is_write = 1;
1197 vcpu->mmio_needed = 1;
1198 vcpu->mmio_is_write = 1;
1199 *(uint16_t *) data = vcpu->arch.gprs[rt];
1200
1201 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1202 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1203 vcpu->arch.gprs[rt], *(uint32_t *) data);
1204 break;
1205
1206 default:
1207 printk("Store not yet supported");
1208 er = EMULATE_FAIL;
1209 break;
1210 }
1211
1212 /*
1213 * Rollback PC if emulation was unsuccessful
1214 */
1215 if (er == EMULATE_FAIL) {
1216 vcpu->arch.pc = curr_pc;
1217 }
1218
1219 return er;
1220}
1221
1222enum emulation_result
1223kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1224 struct kvm_run *run, struct kvm_vcpu *vcpu)
1225{
1226 enum emulation_result er = EMULATE_DO_MMIO;
1227 int32_t op, base, rt, offset;
1228 uint32_t bytes;
1229
1230 rt = (inst >> 16) & 0x1f;
1231 base = (inst >> 21) & 0x1f;
1232 offset = inst & 0xffff;
1233 op = (inst >> 26) & 0x3f;
1234
1235 vcpu->arch.pending_load_cause = cause;
1236 vcpu->arch.io_gpr = rt;
1237
1238 switch (op) {
1239 case lw_op:
1240 bytes = 4;
1241 if (bytes > sizeof(run->mmio.data)) {
1242 kvm_err("%s: bad MMIO length: %d\n", __func__,
1243 run->mmio.len);
1244 er = EMULATE_FAIL;
1245 break;
1246 }
1247 run->mmio.phys_addr =
1248 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1249 host_cp0_badvaddr);
1250 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1251 er = EMULATE_FAIL;
1252 break;
1253 }
1254
1255 run->mmio.len = bytes;
1256 run->mmio.is_write = 0;
1257 vcpu->mmio_needed = 1;
1258 vcpu->mmio_is_write = 0;
1259 break;
1260
1261 case lh_op:
1262 case lhu_op:
1263 bytes = 2;
1264 if (bytes > sizeof(run->mmio.data)) {
1265 kvm_err("%s: bad MMIO length: %d\n", __func__,
1266 run->mmio.len);
1267 er = EMULATE_FAIL;
1268 break;
1269 }
1270 run->mmio.phys_addr =
1271 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1272 host_cp0_badvaddr);
1273 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1274 er = EMULATE_FAIL;
1275 break;
1276 }
1277
1278 run->mmio.len = bytes;
1279 run->mmio.is_write = 0;
1280 vcpu->mmio_needed = 1;
1281 vcpu->mmio_is_write = 0;
1282
1283 if (op == lh_op)
1284 vcpu->mmio_needed = 2;
1285 else
1286 vcpu->mmio_needed = 1;
1287
1288 break;
1289
1290 case lbu_op:
1291 case lb_op:
1292 bytes = 1;
1293 if (bytes > sizeof(run->mmio.data)) {
1294 kvm_err("%s: bad MMIO length: %d\n", __func__,
1295 run->mmio.len);
1296 er = EMULATE_FAIL;
1297 break;
1298 }
1299 run->mmio.phys_addr =
1300 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1301 host_cp0_badvaddr);
1302 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1303 er = EMULATE_FAIL;
1304 break;
1305 }
1306
1307 run->mmio.len = bytes;
1308 run->mmio.is_write = 0;
1309 vcpu->mmio_is_write = 0;
1310
1311 if (op == lb_op)
1312 vcpu->mmio_needed = 2;
1313 else
1314 vcpu->mmio_needed = 1;
1315
1316 break;
1317
1318 default:
1319 printk("Load not yet supported");
1320 er = EMULATE_FAIL;
1321 break;
1322 }
1323
1324 return er;
1325}
1326
1327int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1328{
1329 unsigned long offset = (va & ~PAGE_MASK);
1330 struct kvm *kvm = vcpu->kvm;
1331 unsigned long pa;
1332 gfn_t gfn;
1333 pfn_t pfn;
1334
1335 gfn = va >> PAGE_SHIFT;
1336
1337 if (gfn >= kvm->arch.guest_pmap_npages) {
1338 printk("%s: Invalid gfn: %#llx\n", __func__, gfn);
1339 kvm_mips_dump_host_tlbs();
1340 kvm_arch_vcpu_dump_regs(vcpu);
1341 return -1;
1342 }
1343 pfn = kvm->arch.guest_pmap[gfn];
1344 pa = (pfn << PAGE_SHIFT) | offset;
1345
1346 printk("%s: va: %#lx, unmapped: %#x\n", __func__, va, CKSEG0ADDR(pa));
1347
James Hoganfacaaec2014-05-29 10:16:25 +01001348 local_flush_icache_range(CKSEG0ADDR(pa), 32);
Sanjay Lale685c682012-11-21 18:34:04 -08001349 return 0;
1350}
1351
1352#define MIPS_CACHE_OP_INDEX_INV 0x0
1353#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1354#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1355#define MIPS_CACHE_OP_IMP 0x3
1356#define MIPS_CACHE_OP_HIT_INV 0x4
1357#define MIPS_CACHE_OP_FILL_WB_INV 0x5
1358#define MIPS_CACHE_OP_HIT_HB 0x6
1359#define MIPS_CACHE_OP_FETCH_LOCK 0x7
1360
1361#define MIPS_CACHE_ICACHE 0x0
1362#define MIPS_CACHE_DCACHE 0x1
1363#define MIPS_CACHE_SEC 0x3
1364
1365enum emulation_result
1366kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc, uint32_t cause,
1367 struct kvm_run *run, struct kvm_vcpu *vcpu)
1368{
1369 struct mips_coproc *cop0 = vcpu->arch.cop0;
1370 extern void (*r4k_blast_dcache) (void);
1371 extern void (*r4k_blast_icache) (void);
1372 enum emulation_result er = EMULATE_DONE;
1373 int32_t offset, cache, op_inst, op, base;
1374 struct kvm_vcpu_arch *arch = &vcpu->arch;
1375 unsigned long va;
1376 unsigned long curr_pc;
1377
1378 /*
1379 * Update PC and hold onto current PC in case there is
1380 * an error and we want to rollback the PC
1381 */
1382 curr_pc = vcpu->arch.pc;
1383 er = update_pc(vcpu, cause);
1384 if (er == EMULATE_FAIL)
1385 return er;
1386
1387 base = (inst >> 21) & 0x1f;
1388 op_inst = (inst >> 16) & 0x1f;
1389 offset = inst & 0xffff;
1390 cache = (inst >> 16) & 0x3;
1391 op = (inst >> 18) & 0x7;
1392
1393 va = arch->gprs[base] + offset;
1394
1395 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1396 cache, op, base, arch->gprs[base], offset);
1397
1398 /* Treat INDEX_INV as a nop, basically issued by Linux on startup to invalidate
1399 * the caches entirely by stepping through all the ways/indexes
1400 */
1401 if (op == MIPS_CACHE_OP_INDEX_INV) {
1402 kvm_debug
1403 ("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1404 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1405 arch->gprs[base], offset);
1406
1407 if (cache == MIPS_CACHE_DCACHE)
1408 r4k_blast_dcache();
1409 else if (cache == MIPS_CACHE_ICACHE)
1410 r4k_blast_icache();
1411 else {
1412 printk("%s: unsupported CACHE INDEX operation\n",
1413 __func__);
1414 return EMULATE_FAIL;
1415 }
1416
1417#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1418 kvm_mips_trans_cache_index(inst, opc, vcpu);
1419#endif
1420 goto done;
1421 }
1422
1423 preempt_disable();
1424 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
1425
1426 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0) {
1427 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
1428 }
1429 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1430 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1431 int index;
1432
1433 /* If an entry already exists then skip */
1434 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0) {
1435 goto skip_fault;
1436 }
1437
1438 /* If address not in the guest TLB, then give the guest a fault, the
1439 * resulting handler will do the right thing
1440 */
1441 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001442 (kvm_read_c0_guest_entryhi
1443 (cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08001444
1445 if (index < 0) {
1446 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1447 vcpu->arch.host_cp0_badvaddr = va;
1448 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1449 vcpu);
1450 preempt_enable();
1451 goto dont_update_pc;
1452 } else {
1453 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
1454 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
1455 if (!TLB_IS_VALID(*tlb, va)) {
1456 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1457 run, vcpu);
1458 preempt_enable();
1459 goto dont_update_pc;
1460 } else {
1461 /* We fault an entry from the guest tlb to the shadow host TLB */
1462 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1463 NULL,
1464 NULL);
1465 }
1466 }
1467 } else {
1468 printk
1469 ("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1470 cache, op, base, arch->gprs[base], offset);
1471 er = EMULATE_FAIL;
1472 preempt_enable();
1473 goto dont_update_pc;
1474
1475 }
1476
1477skip_fault:
1478 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1479 if (cache == MIPS_CACHE_DCACHE
1480 && (op == MIPS_CACHE_OP_FILL_WB_INV
1481 || op == MIPS_CACHE_OP_HIT_INV)) {
1482 flush_dcache_line(va);
1483
1484#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1485 /* Replace the CACHE instruction, with a SYNCI, not the same, but avoids a trap */
1486 kvm_mips_trans_cache_va(inst, opc, vcpu);
1487#endif
1488 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1489 flush_dcache_line(va);
1490 flush_icache_line(va);
1491
1492#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1493 /* Replace the CACHE instruction, with a SYNCI */
1494 kvm_mips_trans_cache_va(inst, opc, vcpu);
1495#endif
1496 } else {
1497 printk
1498 ("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1499 cache, op, base, arch->gprs[base], offset);
1500 er = EMULATE_FAIL;
1501 preempt_enable();
1502 goto dont_update_pc;
1503 }
1504
1505 preempt_enable();
1506
1507 dont_update_pc:
1508 /*
1509 * Rollback PC
1510 */
1511 vcpu->arch.pc = curr_pc;
1512 done:
1513 return er;
1514}
1515
1516enum emulation_result
1517kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1518 struct kvm_run *run, struct kvm_vcpu *vcpu)
1519{
1520 enum emulation_result er = EMULATE_DONE;
1521 uint32_t inst;
1522
1523 /*
1524 * Fetch the instruction.
1525 */
1526 if (cause & CAUSEF_BD) {
1527 opc += 1;
1528 }
1529
1530 inst = kvm_get_inst(opc, vcpu);
1531
1532 switch (((union mips_instruction)inst).r_format.opcode) {
1533 case cop0_op:
1534 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1535 break;
1536 case sb_op:
1537 case sh_op:
1538 case sw_op:
1539 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1540 break;
1541 case lb_op:
1542 case lbu_op:
1543 case lhu_op:
1544 case lh_op:
1545 case lw_op:
1546 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1547 break;
1548
1549 case cache_op:
1550 ++vcpu->stat.cache_exits;
1551 trace_kvm_exit(vcpu, CACHE_EXITS);
1552 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1553 break;
1554
1555 default:
1556 printk("Instruction emulation not supported (%p/%#x)\n", opc,
1557 inst);
1558 kvm_arch_vcpu_dump_regs(vcpu);
1559 er = EMULATE_FAIL;
1560 break;
1561 }
1562
1563 return er;
1564}
1565
1566enum emulation_result
1567kvm_mips_emulate_syscall(unsigned long cause, uint32_t *opc,
1568 struct kvm_run *run, struct kvm_vcpu *vcpu)
1569{
1570 struct mips_coproc *cop0 = vcpu->arch.cop0;
1571 struct kvm_vcpu_arch *arch = &vcpu->arch;
1572 enum emulation_result er = EMULATE_DONE;
1573
1574 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1575 /* save old pc */
1576 kvm_write_c0_guest_epc(cop0, arch->pc);
1577 kvm_set_c0_guest_status(cop0, ST0_EXL);
1578
1579 if (cause & CAUSEF_BD)
1580 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1581 else
1582 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1583
1584 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1585
1586 kvm_change_c0_guest_cause(cop0, (0xff),
1587 (T_SYSCALL << CAUSEB_EXCCODE));
1588
1589 /* Set PC to the exception entry point */
1590 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1591
1592 } else {
1593 printk("Trying to deliver SYSCALL when EXL is already set\n");
1594 er = EMULATE_FAIL;
1595 }
1596
1597 return er;
1598}
1599
1600enum emulation_result
1601kvm_mips_emulate_tlbmiss_ld(unsigned long cause, uint32_t *opc,
1602 struct kvm_run *run, struct kvm_vcpu *vcpu)
1603{
1604 struct mips_coproc *cop0 = vcpu->arch.cop0;
1605 struct kvm_vcpu_arch *arch = &vcpu->arch;
1606 enum emulation_result er = EMULATE_DONE;
1607 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001608 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001609
1610 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1611 /* save old pc */
1612 kvm_write_c0_guest_epc(cop0, arch->pc);
1613 kvm_set_c0_guest_status(cop0, ST0_EXL);
1614
1615 if (cause & CAUSEF_BD)
1616 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1617 else
1618 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1619
1620 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1621 arch->pc);
1622
1623 /* set pc to the exception entry point */
1624 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1625
1626 } else {
1627 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1628 arch->pc);
1629
1630 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1631 }
1632
1633 kvm_change_c0_guest_cause(cop0, (0xff),
1634 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1635
1636 /* setup badvaddr, context and entryhi registers for the guest */
1637 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1638 /* XXXKYMA: is the context register used by linux??? */
1639 kvm_write_c0_guest_entryhi(cop0, entryhi);
1640 /* Blow away the shadow host TLBs */
1641 kvm_mips_flush_host_tlb(1);
1642
1643 return er;
1644}
1645
1646enum emulation_result
1647kvm_mips_emulate_tlbinv_ld(unsigned long cause, uint32_t *opc,
1648 struct kvm_run *run, struct kvm_vcpu *vcpu)
1649{
1650 struct mips_coproc *cop0 = vcpu->arch.cop0;
1651 struct kvm_vcpu_arch *arch = &vcpu->arch;
1652 enum emulation_result er = EMULATE_DONE;
1653 unsigned long entryhi =
1654 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001655 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001656
1657 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1658 /* save old pc */
1659 kvm_write_c0_guest_epc(cop0, arch->pc);
1660 kvm_set_c0_guest_status(cop0, ST0_EXL);
1661
1662 if (cause & CAUSEF_BD)
1663 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1664 else
1665 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1666
1667 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1668 arch->pc);
1669
1670 /* set pc to the exception entry point */
1671 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1672
1673 } else {
1674 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1675 arch->pc);
1676 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1677 }
1678
1679 kvm_change_c0_guest_cause(cop0, (0xff),
1680 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1681
1682 /* setup badvaddr, context and entryhi registers for the guest */
1683 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1684 /* XXXKYMA: is the context register used by linux??? */
1685 kvm_write_c0_guest_entryhi(cop0, entryhi);
1686 /* Blow away the shadow host TLBs */
1687 kvm_mips_flush_host_tlb(1);
1688
1689 return er;
1690}
1691
1692enum emulation_result
1693kvm_mips_emulate_tlbmiss_st(unsigned long cause, uint32_t *opc,
1694 struct kvm_run *run, struct kvm_vcpu *vcpu)
1695{
1696 struct mips_coproc *cop0 = vcpu->arch.cop0;
1697 struct kvm_vcpu_arch *arch = &vcpu->arch;
1698 enum emulation_result er = EMULATE_DONE;
1699 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001700 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001701
1702 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1703 /* save old pc */
1704 kvm_write_c0_guest_epc(cop0, arch->pc);
1705 kvm_set_c0_guest_status(cop0, ST0_EXL);
1706
1707 if (cause & CAUSEF_BD)
1708 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1709 else
1710 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1711
1712 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1713 arch->pc);
1714
1715 /* Set PC to the exception entry point */
1716 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1717 } else {
1718 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1719 arch->pc);
1720 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1721 }
1722
1723 kvm_change_c0_guest_cause(cop0, (0xff),
1724 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1725
1726 /* setup badvaddr, context and entryhi registers for the guest */
1727 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1728 /* XXXKYMA: is the context register used by linux??? */
1729 kvm_write_c0_guest_entryhi(cop0, entryhi);
1730 /* Blow away the shadow host TLBs */
1731 kvm_mips_flush_host_tlb(1);
1732
1733 return er;
1734}
1735
1736enum emulation_result
1737kvm_mips_emulate_tlbinv_st(unsigned long cause, uint32_t *opc,
1738 struct kvm_run *run, struct kvm_vcpu *vcpu)
1739{
1740 struct mips_coproc *cop0 = vcpu->arch.cop0;
1741 struct kvm_vcpu_arch *arch = &vcpu->arch;
1742 enum emulation_result er = EMULATE_DONE;
1743 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001744 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001745
1746 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1747 /* save old pc */
1748 kvm_write_c0_guest_epc(cop0, arch->pc);
1749 kvm_set_c0_guest_status(cop0, ST0_EXL);
1750
1751 if (cause & CAUSEF_BD)
1752 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1753 else
1754 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1755
1756 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1757 arch->pc);
1758
1759 /* Set PC to the exception entry point */
1760 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1761 } else {
1762 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1763 arch->pc);
1764 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1765 }
1766
1767 kvm_change_c0_guest_cause(cop0, (0xff),
1768 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1769
1770 /* setup badvaddr, context and entryhi registers for the guest */
1771 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1772 /* XXXKYMA: is the context register used by linux??? */
1773 kvm_write_c0_guest_entryhi(cop0, entryhi);
1774 /* Blow away the shadow host TLBs */
1775 kvm_mips_flush_host_tlb(1);
1776
1777 return er;
1778}
1779
1780/* TLBMOD: store into address matching TLB with Dirty bit off */
1781enum emulation_result
1782kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1783 struct kvm_run *run, struct kvm_vcpu *vcpu)
1784{
1785 enum emulation_result er = EMULATE_DONE;
1786
1787#ifdef DEBUG
1788 /*
1789 * If address not in the guest TLB, then we are in trouble
1790 */
1791 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1792 if (index < 0) {
1793 /* XXXKYMA Invalidate and retry */
1794 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1795 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1796 __func__, entryhi);
1797 kvm_mips_dump_guest_tlbs(vcpu);
1798 kvm_mips_dump_host_tlbs();
1799 return EMULATE_FAIL;
1800 }
1801#endif
1802
1803 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1804 return er;
1805}
1806
1807enum emulation_result
1808kvm_mips_emulate_tlbmod(unsigned long cause, uint32_t *opc,
1809 struct kvm_run *run, struct kvm_vcpu *vcpu)
1810{
1811 struct mips_coproc *cop0 = vcpu->arch.cop0;
1812 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001813 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001814 struct kvm_vcpu_arch *arch = &vcpu->arch;
1815 enum emulation_result er = EMULATE_DONE;
1816
1817 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1818 /* save old pc */
1819 kvm_write_c0_guest_epc(cop0, arch->pc);
1820 kvm_set_c0_guest_status(cop0, ST0_EXL);
1821
1822 if (cause & CAUSEF_BD)
1823 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1824 else
1825 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1826
1827 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1828 arch->pc);
1829
1830 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1831 } else {
1832 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1833 arch->pc);
1834 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1835 }
1836
1837 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1838
1839 /* setup badvaddr, context and entryhi registers for the guest */
1840 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1841 /* XXXKYMA: is the context register used by linux??? */
1842 kvm_write_c0_guest_entryhi(cop0, entryhi);
1843 /* Blow away the shadow host TLBs */
1844 kvm_mips_flush_host_tlb(1);
1845
1846 return er;
1847}
1848
1849enum emulation_result
1850kvm_mips_emulate_fpu_exc(unsigned long cause, uint32_t *opc,
1851 struct kvm_run *run, struct kvm_vcpu *vcpu)
1852{
1853 struct mips_coproc *cop0 = vcpu->arch.cop0;
1854 struct kvm_vcpu_arch *arch = &vcpu->arch;
1855 enum emulation_result er = EMULATE_DONE;
1856
1857 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1858 /* save old pc */
1859 kvm_write_c0_guest_epc(cop0, arch->pc);
1860 kvm_set_c0_guest_status(cop0, ST0_EXL);
1861
1862 if (cause & CAUSEF_BD)
1863 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1864 else
1865 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1866
1867 }
1868
1869 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1870
1871 kvm_change_c0_guest_cause(cop0, (0xff),
1872 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1873 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1874
1875 return er;
1876}
1877
1878enum emulation_result
1879kvm_mips_emulate_ri_exc(unsigned long cause, uint32_t *opc,
1880 struct kvm_run *run, struct kvm_vcpu *vcpu)
1881{
1882 struct mips_coproc *cop0 = vcpu->arch.cop0;
1883 struct kvm_vcpu_arch *arch = &vcpu->arch;
1884 enum emulation_result er = EMULATE_DONE;
1885
1886 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1887 /* save old pc */
1888 kvm_write_c0_guest_epc(cop0, arch->pc);
1889 kvm_set_c0_guest_status(cop0, ST0_EXL);
1890
1891 if (cause & CAUSEF_BD)
1892 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1893 else
1894 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1895
1896 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1897
1898 kvm_change_c0_guest_cause(cop0, (0xff),
1899 (T_RES_INST << CAUSEB_EXCCODE));
1900
1901 /* Set PC to the exception entry point */
1902 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1903
1904 } else {
1905 kvm_err("Trying to deliver RI when EXL is already set\n");
1906 er = EMULATE_FAIL;
1907 }
1908
1909 return er;
1910}
1911
1912enum emulation_result
1913kvm_mips_emulate_bp_exc(unsigned long cause, uint32_t *opc,
1914 struct kvm_run *run, struct kvm_vcpu *vcpu)
1915{
1916 struct mips_coproc *cop0 = vcpu->arch.cop0;
1917 struct kvm_vcpu_arch *arch = &vcpu->arch;
1918 enum emulation_result er = EMULATE_DONE;
1919
1920 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1921 /* save old pc */
1922 kvm_write_c0_guest_epc(cop0, arch->pc);
1923 kvm_set_c0_guest_status(cop0, ST0_EXL);
1924
1925 if (cause & CAUSEF_BD)
1926 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1927 else
1928 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1929
1930 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1931
1932 kvm_change_c0_guest_cause(cop0, (0xff),
1933 (T_BREAK << CAUSEB_EXCCODE));
1934
1935 /* Set PC to the exception entry point */
1936 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1937
1938 } else {
1939 printk("Trying to deliver BP when EXL is already set\n");
1940 er = EMULATE_FAIL;
1941 }
1942
1943 return er;
1944}
1945
1946/*
1947 * ll/sc, rdhwr, sync emulation
1948 */
1949
1950#define OPCODE 0xfc000000
1951#define BASE 0x03e00000
1952#define RT 0x001f0000
1953#define OFFSET 0x0000ffff
1954#define LL 0xc0000000
1955#define SC 0xe0000000
1956#define SPEC0 0x00000000
1957#define SPEC3 0x7c000000
1958#define RD 0x0000f800
1959#define FUNC 0x0000003f
1960#define SYNC 0x0000000f
1961#define RDHWR 0x0000003b
1962
1963enum emulation_result
1964kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
1965 struct kvm_run *run, struct kvm_vcpu *vcpu)
1966{
1967 struct mips_coproc *cop0 = vcpu->arch.cop0;
1968 struct kvm_vcpu_arch *arch = &vcpu->arch;
1969 enum emulation_result er = EMULATE_DONE;
1970 unsigned long curr_pc;
1971 uint32_t inst;
1972
1973 /*
1974 * Update PC and hold onto current PC in case there is
1975 * an error and we want to rollback the PC
1976 */
1977 curr_pc = vcpu->arch.pc;
1978 er = update_pc(vcpu, cause);
1979 if (er == EMULATE_FAIL)
1980 return er;
1981
1982 /*
1983 * Fetch the instruction.
1984 */
1985 if (cause & CAUSEF_BD)
1986 opc += 1;
1987
1988 inst = kvm_get_inst(opc, vcpu);
1989
1990 if (inst == KVM_INVALID_INST) {
1991 printk("%s: Cannot get inst @ %p\n", __func__, opc);
1992 return EMULATE_FAIL;
1993 }
1994
1995 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
James Hogan26f4f3b2014-03-14 13:06:09 +00001996 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001997 int rd = (inst & RD) >> 11;
1998 int rt = (inst & RT) >> 16;
James Hogan26f4f3b2014-03-14 13:06:09 +00001999 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2000 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2001 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2002 rd, opc);
2003 goto emulate_ri;
2004 }
Sanjay Lale685c682012-11-21 18:34:04 -08002005 switch (rd) {
2006 case 0: /* CPU number */
2007 arch->gprs[rt] = 0;
2008 break;
2009 case 1: /* SYNCI length */
2010 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2011 current_cpu_data.icache.linesz);
2012 break;
2013 case 2: /* Read count register */
James Hogane30492b2014-05-29 10:16:35 +01002014 arch->gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002015 break;
2016 case 3: /* Count register resolution */
2017 switch (current_cpu_data.cputype) {
2018 case CPU_20KC:
2019 case CPU_25KF:
2020 arch->gprs[rt] = 1;
2021 break;
2022 default:
2023 arch->gprs[rt] = 2;
2024 }
2025 break;
2026 case 29:
Sanjay Lale685c682012-11-21 18:34:04 -08002027 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -08002028 break;
2029
2030 default:
James Hogan15505672014-03-14 13:06:07 +00002031 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
James Hogan26f4f3b2014-03-14 13:06:09 +00002032 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002033 }
2034 } else {
James Hogan15505672014-03-14 13:06:07 +00002035 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
James Hogan26f4f3b2014-03-14 13:06:09 +00002036 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002037 }
2038
James Hogan26f4f3b2014-03-14 13:06:09 +00002039 return EMULATE_DONE;
2040
2041emulate_ri:
Sanjay Lale685c682012-11-21 18:34:04 -08002042 /*
James Hogan26f4f3b2014-03-14 13:06:09 +00002043 * Rollback PC (if in branch delay slot then the PC already points to
2044 * branch target), and pass the RI exception to the guest OS.
Sanjay Lale685c682012-11-21 18:34:04 -08002045 */
James Hogan26f4f3b2014-03-14 13:06:09 +00002046 vcpu->arch.pc = curr_pc;
2047 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002048}
2049
2050enum emulation_result
2051kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run)
2052{
2053 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2054 enum emulation_result er = EMULATE_DONE;
2055 unsigned long curr_pc;
2056
2057 if (run->mmio.len > sizeof(*gpr)) {
2058 printk("Bad MMIO length: %d", run->mmio.len);
2059 er = EMULATE_FAIL;
2060 goto done;
2061 }
2062
2063 /*
2064 * Update PC and hold onto current PC in case there is
2065 * an error and we want to rollback the PC
2066 */
2067 curr_pc = vcpu->arch.pc;
2068 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2069 if (er == EMULATE_FAIL)
2070 return er;
2071
2072 switch (run->mmio.len) {
2073 case 4:
2074 *gpr = *(int32_t *) run->mmio.data;
2075 break;
2076
2077 case 2:
2078 if (vcpu->mmio_needed == 2)
2079 *gpr = *(int16_t *) run->mmio.data;
2080 else
2081 *gpr = *(int16_t *) run->mmio.data;
2082
2083 break;
2084 case 1:
2085 if (vcpu->mmio_needed == 2)
2086 *gpr = *(int8_t *) run->mmio.data;
2087 else
2088 *gpr = *(u8 *) run->mmio.data;
2089 break;
2090 }
2091
2092 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
2093 kvm_debug
2094 ("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2095 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2096 vcpu->mmio_needed);
2097
2098done:
2099 return er;
2100}
2101
2102static enum emulation_result
2103kvm_mips_emulate_exc(unsigned long cause, uint32_t *opc,
2104 struct kvm_run *run, struct kvm_vcpu *vcpu)
2105{
2106 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2107 struct mips_coproc *cop0 = vcpu->arch.cop0;
2108 struct kvm_vcpu_arch *arch = &vcpu->arch;
2109 enum emulation_result er = EMULATE_DONE;
2110
2111 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2112 /* save old pc */
2113 kvm_write_c0_guest_epc(cop0, arch->pc);
2114 kvm_set_c0_guest_status(cop0, ST0_EXL);
2115
2116 if (cause & CAUSEF_BD)
2117 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2118 else
2119 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2120
2121 kvm_change_c0_guest_cause(cop0, (0xff),
2122 (exccode << CAUSEB_EXCCODE));
2123
2124 /* Set PC to the exception entry point */
2125 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2126 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2127
2128 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2129 exccode, kvm_read_c0_guest_epc(cop0),
2130 kvm_read_c0_guest_badvaddr(cop0));
2131 } else {
2132 printk("Trying to deliver EXC when EXL is already set\n");
2133 er = EMULATE_FAIL;
2134 }
2135
2136 return er;
2137}
2138
2139enum emulation_result
2140kvm_mips_check_privilege(unsigned long cause, uint32_t *opc,
2141 struct kvm_run *run, struct kvm_vcpu *vcpu)
2142{
2143 enum emulation_result er = EMULATE_DONE;
2144 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2145 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2146
2147 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2148
2149 if (usermode) {
2150 switch (exccode) {
2151 case T_INT:
2152 case T_SYSCALL:
2153 case T_BREAK:
2154 case T_RES_INST:
2155 break;
2156
2157 case T_COP_UNUSABLE:
2158 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2159 er = EMULATE_PRIV_FAIL;
2160 break;
2161
2162 case T_TLB_MOD:
2163 break;
2164
2165 case T_TLB_LD_MISS:
2166 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2167 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2168 printk("%s: LD MISS @ %#lx\n", __func__,
2169 badvaddr);
2170 cause &= ~0xff;
2171 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2172 er = EMULATE_PRIV_FAIL;
2173 }
2174 break;
2175
2176 case T_TLB_ST_MISS:
2177 /* We we are accessing Guest kernel space, then send an address error exception to the guest */
2178 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
2179 printk("%s: ST MISS @ %#lx\n", __func__,
2180 badvaddr);
2181 cause &= ~0xff;
2182 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2183 er = EMULATE_PRIV_FAIL;
2184 }
2185 break;
2186
2187 case T_ADDR_ERR_ST:
2188 printk("%s: address error ST @ %#lx\n", __func__,
2189 badvaddr);
2190 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2191 cause &= ~0xff;
2192 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2193 }
2194 er = EMULATE_PRIV_FAIL;
2195 break;
2196 case T_ADDR_ERR_LD:
2197 printk("%s: address error LD @ %#lx\n", __func__,
2198 badvaddr);
2199 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2200 cause &= ~0xff;
2201 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2202 }
2203 er = EMULATE_PRIV_FAIL;
2204 break;
2205 default:
2206 er = EMULATE_PRIV_FAIL;
2207 break;
2208 }
2209 }
2210
2211 if (er == EMULATE_PRIV_FAIL) {
2212 kvm_mips_emulate_exc(cause, opc, run, vcpu);
2213 }
2214 return er;
2215}
2216
2217/* User Address (UA) fault, this could happen if
2218 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2219 * case we pass on the fault to the guest kernel and let it handle it.
2220 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2221 * case we inject the TLB from the Guest TLB into the shadow host TLB
2222 */
2223enum emulation_result
2224kvm_mips_handle_tlbmiss(unsigned long cause, uint32_t *opc,
2225 struct kvm_run *run, struct kvm_vcpu *vcpu)
2226{
2227 enum emulation_result er = EMULATE_DONE;
2228 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2229 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2230 int index;
2231
2232 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2233 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2234
2235 /* KVM would not have got the exception if this entry was valid in the shadow host TLB
2236 * Check the Guest TLB, if the entry is not there then send the guest an
2237 * exception. The guest exc handler should then inject an entry into the
2238 * guest TLB
2239 */
2240 index = kvm_mips_guest_tlb_lookup(vcpu,
2241 (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07002242 (kvm_read_c0_guest_entryhi
2243 (vcpu->arch.cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08002244 if (index < 0) {
2245 if (exccode == T_TLB_LD_MISS) {
2246 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2247 } else if (exccode == T_TLB_ST_MISS) {
2248 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2249 } else {
2250 printk("%s: invalid exc code: %d\n", __func__, exccode);
2251 er = EMULATE_FAIL;
2252 }
2253 } else {
2254 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2255
2256 /* Check if the entry is valid, if not then setup a TLB invalid exception to the guest */
2257 if (!TLB_IS_VALID(*tlb, va)) {
2258 if (exccode == T_TLB_LD_MISS) {
2259 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2260 vcpu);
2261 } else if (exccode == T_TLB_ST_MISS) {
2262 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2263 vcpu);
2264 } else {
2265 printk("%s: invalid exc code: %d\n", __func__,
2266 exccode);
2267 er = EMULATE_FAIL;
2268 }
2269 } else {
2270#ifdef DEBUG
2271 kvm_debug
2272 ("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2273 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2274#endif
2275 /* OK we have a Guest TLB entry, now inject it into the shadow host TLB */
2276 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2277 NULL);
2278 }
2279 }
2280
2281 return er;
2282}