blob: bdd1421b78fc98b3d086310589b89ddfb27e7cf8 [file] [log] [blame]
Sanjay Lale685c682012-11-21 18:34:04 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lale685c682012-11-21 18:34:04 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
James Hogane30492b2014-05-29 10:16:35 +010014#include <linux/ktime.h>
Sanjay Lale685c682012-11-21 18:34:04 -080015#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
19#include <linux/bootmem.h>
20#include <linux/random.h>
21#include <asm/page.h>
22#include <asm/cacheflush.h>
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
32#include "kvm_mips_opcode.h"
33#include "kvm_mips_int.h"
34#include "kvm_mips_comm.h"
35
36#include "trace.h"
37
38/*
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
41 */
42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44{
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070054 /* Read the instruction */
Sanjay Lale685c682012-11-21 18:34:04 -080055 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
56
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
59
60 switch (insn.i_format.opcode) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070061 /* jr and jalr are in r_format format. */
Sanjay Lale685c682012-11-21 18:34:04 -080062 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
66 /* Fall through */
67 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
70 }
71 break;
72
73 /*
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
77 */
78 case bcond_op:
79 switch (insn.i_format.rt) {
80 case bltz_op:
81 case bltzl_op:
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
84 else
85 epc += 8;
86 nextpc = epc;
87 break;
88
89 case bgez_op:
90 case bgezl_op:
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
93 else
94 epc += 8;
95 nextpc = epc;
96 break;
97
98 case bltzal_op:
99 case bltzall_op:
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
103 else
104 epc += 8;
105 nextpc = epc;
106 break;
107
108 case bgezal_op:
109 case bgezall_op:
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
113 else
114 epc += 8;
115 nextpc = epc;
116 break;
117 case bposge32_op:
118 if (!cpu_has_dsp)
119 goto sigill;
120
121 dspcontrol = rddsp(0x01);
122
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700123 if (dspcontrol >= 32)
Sanjay Lale685c682012-11-21 18:34:04 -0800124 epc = epc + 4 + (insn.i_format.simmediate << 2);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700125 else
Sanjay Lale685c682012-11-21 18:34:04 -0800126 epc += 8;
127 nextpc = epc;
128 break;
129 }
130 break;
131
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700132 /* These are unconditional and in j_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800133 case jal_op:
134 arch->gprs[31] = instpc + 8;
135 case j_op:
136 epc += 4;
137 epc >>= 28;
138 epc <<= 28;
139 epc |= (insn.j_format.target << 2);
140 nextpc = epc;
141 break;
142
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700143 /* These are conditional and in i_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800144 case beq_op:
145 case beql_op:
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
149 else
150 epc += 8;
151 nextpc = epc;
152 break;
153
154 case bne_op:
155 case bnel_op:
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
159 else
160 epc += 8;
161 nextpc = epc;
162 break;
163
164 case blez_op: /* not really i_format */
165 case blezl_op:
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
169 else
170 epc += 8;
171 nextpc = epc;
172 break;
173
174 case bgtz_op:
175 case bgtzl_op:
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
179 else
180 epc += 8;
181 nextpc = epc;
182 break;
183
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700184 /* And now the FPA/cp1 branch instructions. */
Sanjay Lale685c682012-11-21 18:34:04 -0800185 case cop1_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700186 kvm_err("%s: unsupported cop1_op\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800187 break;
188 }
189
190 return nextpc;
191
192unaligned:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700193 kvm_err("%s: unaligned epc\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800194 return nextpc;
195
196sigill:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800198 return nextpc;
199}
200
201enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
202{
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
205
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
209 er = EMULATE_FAIL;
210 } else {
211 vcpu->arch.pc = branch_pc;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700212 kvm_debug("BD update_pc(): New PC: %#lx\n",
213 vcpu->arch.pc);
Sanjay Lale685c682012-11-21 18:34:04 -0800214 }
215 } else
216 vcpu->arch.pc += 4;
217
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
219
220 return er;
221}
222
James Hogane30492b2014-05-29 10:16:35 +0100223/**
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
Sanjay Lale685c682012-11-21 18:34:04 -0800226 *
James Hoganf8239342014-05-29 10:16:37 +0100227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
James Hogane30492b2014-05-29 10:16:35 +0100229 * 0 otherwise (in which case CP0_Count timer is running).
Sanjay Lale685c682012-11-21 18:34:04 -0800230 */
James Hogane30492b2014-05-29 10:16:35 +0100231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800232{
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700234
James Hoganf8239342014-05-29 10:16:37 +0100235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
James Hogane30492b2014-05-29 10:16:35 +0100237}
Sanjay Lale685c682012-11-21 18:34:04 -0800238
James Hogane30492b2014-05-29 10:16:35 +0100239/**
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
241 *
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
243 *
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
245 */
246static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
247{
248 s64 now_ns, periods;
249 u64 delta;
250
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
253
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
Sanjay Lale685c682012-11-21 18:34:04 -0800260 }
261
James Hogane30492b2014-05-29 10:16:35 +0100262 /*
263 * We've ensured that:
264 * delta < count_period
265 *
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
271 */
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
273}
274
275/**
James Hoganf8239342014-05-29 10:16:37 +0100276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
278 *
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
282 *
283 * Returns: Effective monotonic ktime for CP0_Count.
284 */
285static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
286{
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
289
290 return ktime_get();
291}
292
293/**
James Hogane30492b2014-05-29 10:16:35 +0100294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
297 *
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
300 *
301 * Returns: The current value of the guest CP0_Count register.
302 */
303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
304{
305 ktime_t expires;
306 int running;
307
308 /* Is the hrtimer pending? */
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) {
311 /*
312 * Cancel it while we handle it so there's no chance of
313 * interference with the timeout handler.
314 */
315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
316
317 /* Nothing should be waiting on the timeout */
318 kvm_mips_callbacks->queue_timer_int(vcpu);
319
320 /*
321 * Restart the timer if it was running based on the expiry time
322 * we read, so that we don't push it back 2 periods.
323 */
324 if (running) {
325 expires = ktime_add_ns(expires,
326 vcpu->arch.count_period);
327 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
328 HRTIMER_MODE_ABS);
329 }
330 }
331
332 /* Return the biased and scaled guest CP0_Count */
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
334}
335
336/**
337 * kvm_mips_read_count() - Read the current count value.
338 * @vcpu: Virtual CPU.
339 *
340 * Read the current guest CP0_Count value, taking into account whether the timer
341 * is stopped.
342 *
343 * Returns: The current guest CP0_Count value.
344 */
345uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
346{
347 struct mips_coproc *cop0 = vcpu->arch.cop0;
348
349 /* If count disabled just read static copy of count */
350 if (kvm_mips_count_disabled(vcpu))
351 return kvm_read_c0_guest_count(cop0);
352
353 return kvm_mips_read_count_running(vcpu, ktime_get());
354}
355
356/**
357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
358 * @vcpu: Virtual CPU.
359 * @count: Output pointer for CP0_Count value at point of freeze.
360 *
361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
362 * at the point it was frozen. It is guaranteed that any pending interrupts at
363 * the point it was frozen are handled, and none after that point.
364 *
365 * This is useful where the time/CP0_Count is needed in the calculation of the
366 * new parameters.
367 *
368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
369 *
370 * Returns: The ktime at the point of freeze.
371 */
372static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
373 uint32_t *count)
374{
375 ktime_t now;
376
377 /* stop hrtimer before finding time */
378 hrtimer_cancel(&vcpu->arch.comparecount_timer);
379 now = ktime_get();
380
381 /* find count at this point and handle pending hrtimer */
382 *count = kvm_mips_read_count_running(vcpu, now);
383
384 return now;
385}
386
James Hogane30492b2014-05-29 10:16:35 +0100387/**
388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
389 * @vcpu: Virtual CPU.
390 * @now: ktime at point of resume.
391 * @count: CP0_Count at point of resume.
392 *
393 * Resumes the timer and updates the timer expiry based on @now and @count.
394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
395 * parameters need to be changed.
396 *
397 * It is guaranteed that a timer interrupt immediately after resume will be
398 * handled, but not if CP_Compare is exactly at @count. That case is already
399 * handled by kvm_mips_freeze_timer().
400 *
401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
402 */
403static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
404 ktime_t now, uint32_t count)
405{
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
407 uint32_t compare;
408 u64 delta;
409 ktime_t expire;
410
411 /* Calculate timeout (wrap 0 to 2^32) */
412 compare = kvm_read_c0_guest_compare(cop0);
413 delta = (u64)(uint32_t)(compare - count - 1) + 1;
414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
415 expire = ktime_add_ns(now, delta);
416
417 /* Update hrtimer to use new timeout */
418 hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
420}
421
422/**
423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424 * @vcpu: Virtual CPU.
425 *
426 * Recalculates and updates the expiry time of the hrtimer. This can be used
427 * after timer parameters have been altered which do not depend on the time that
428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429 * kvm_mips_resume_hrtimer() are used directly).
430 *
431 * It is guaranteed that no timer interrupts will be lost in the process.
432 *
433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
434 */
435static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
436{
437 ktime_t now;
438 uint32_t count;
439
440 /*
441 * freeze_hrtimer takes care of a timer interrupts <= count, and
442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
443 */
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
446}
447
448/**
449 * kvm_mips_write_count() - Modify the count and update timer.
450 * @vcpu: Virtual CPU.
451 * @count: Guest CP0_Count value to set.
452 *
453 * Sets the CP0_Count value and updates the timer accordingly.
454 */
455void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
456{
457 struct mips_coproc *cop0 = vcpu->arch.cop0;
458 ktime_t now;
459
460 /* Calculate bias */
James Hoganf8239342014-05-29 10:16:37 +0100461 now = kvm_mips_count_time(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
463
464 if (kvm_mips_count_disabled(vcpu))
465 /* The timer's disabled, adjust the static count */
466 kvm_write_c0_guest_count(cop0, count);
467 else
468 /* Update timeout */
469 kvm_mips_resume_hrtimer(vcpu, now, count);
470}
471
472/**
473 * kvm_mips_init_count() - Initialise timer.
474 * @vcpu: Virtual CPU.
475 *
476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
477 * it going if it's enabled.
478 */
479void kvm_mips_init_count(struct kvm_vcpu *vcpu)
480{
481 /* 100 MHz */
482 vcpu->arch.count_hz = 100*1000*1000;
483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
484 vcpu->arch.count_hz);
485 vcpu->arch.count_dyn_bias = 0;
486
487 /* Starting at 0 */
488 kvm_mips_write_count(vcpu, 0);
489}
490
491/**
James Hoganf74a8e22014-05-29 10:16:38 +0100492 * kvm_mips_set_count_hz() - Update the frequency of the timer.
493 * @vcpu: Virtual CPU.
494 * @count_hz: Frequency of CP0_Count timer in Hz.
495 *
496 * Change the frequency of the CP0_Count timer. This is done atomically so that
497 * CP0_Count is continuous and no timer interrupt is lost.
498 *
499 * Returns: -EINVAL if @count_hz is out of range.
500 * 0 on success.
501 */
502int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
503{
504 struct mips_coproc *cop0 = vcpu->arch.cop0;
505 int dc;
506 ktime_t now;
507 u32 count;
508
509 /* ensure the frequency is in a sensible range... */
510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
511 return -EINVAL;
512 /* ... and has actually changed */
513 if (vcpu->arch.count_hz == count_hz)
514 return 0;
515
516 /* Safely freeze timer so we can keep it continuous */
517 dc = kvm_mips_count_disabled(vcpu);
518 if (dc) {
519 now = kvm_mips_count_time(vcpu);
520 count = kvm_read_c0_guest_count(cop0);
521 } else {
522 now = kvm_mips_freeze_hrtimer(vcpu, &count);
523 }
524
525 /* Update the frequency */
526 vcpu->arch.count_hz = count_hz;
527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
528 vcpu->arch.count_dyn_bias = 0;
529
530 /* Calculate adjusted bias so dynamic count is unchanged */
531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
532
533 /* Update and resume hrtimer */
534 if (!dc)
535 kvm_mips_resume_hrtimer(vcpu, now, count);
536 return 0;
537}
538
539/**
James Hogane30492b2014-05-29 10:16:35 +0100540 * kvm_mips_write_compare() - Modify compare and update timer.
541 * @vcpu: Virtual CPU.
542 * @compare: New CP0_Compare value.
543 *
544 * Update CP0_Compare to a new value and update the timeout.
545 */
546void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
547{
548 struct mips_coproc *cop0 = vcpu->arch.cop0;
549
550 /* if unchanged, must just be an ack */
551 if (kvm_read_c0_guest_compare(cop0) == compare)
552 return;
553
554 /* Update compare */
555 kvm_write_c0_guest_compare(cop0, compare);
556
557 /* Update timeout if count enabled */
558 if (!kvm_mips_count_disabled(vcpu))
559 kvm_mips_update_hrtimer(vcpu);
560}
561
562/**
563 * kvm_mips_count_disable() - Disable count.
564 * @vcpu: Virtual CPU.
565 *
566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
567 * time will be handled but not after.
568 *
James Hoganf8239342014-05-29 10:16:37 +0100569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
570 * count_ctl.DC has been set (count disabled).
James Hogane30492b2014-05-29 10:16:35 +0100571 *
572 * Returns: The time that the timer was stopped.
573 */
574static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
575{
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 uint32_t count;
578 ktime_t now;
579
580 /* Stop hrtimer */
581 hrtimer_cancel(&vcpu->arch.comparecount_timer);
582
583 /* Set the static count from the dynamic count, handling pending TI */
584 now = ktime_get();
585 count = kvm_mips_read_count_running(vcpu, now);
586 kvm_write_c0_guest_count(cop0, count);
587
588 return now;
589}
590
591/**
592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
593 * @vcpu: Virtual CPU.
594 *
595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
James Hoganf8239342014-05-29 10:16:37 +0100596 * before the final stop time will be handled if the timer isn't disabled by
597 * count_ctl.DC, but not after.
James Hogane30492b2014-05-29 10:16:35 +0100598 *
599 * Assumes CP0_Cause.DC is clear (count enabled).
600 */
601void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
602{
603 struct mips_coproc *cop0 = vcpu->arch.cop0;
604
605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
James Hoganf8239342014-05-29 10:16:37 +0100606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
607 kvm_mips_count_disable(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100608}
609
610/**
611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
612 * @vcpu: Virtual CPU.
613 *
614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
James Hoganf8239342014-05-29 10:16:37 +0100615 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
616 * potentially before even returning, so the caller should be careful with
617 * ordering of CP0_Cause modifications so as not to lose it.
James Hogane30492b2014-05-29 10:16:35 +0100618 *
619 * Assumes CP0_Cause.DC is set (count disabled).
620 */
621void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
622{
623 struct mips_coproc *cop0 = vcpu->arch.cop0;
624 uint32_t count;
625
626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
627
628 /*
629 * Set the dynamic count to match the static count.
James Hoganf8239342014-05-29 10:16:37 +0100630 * This starts the hrtimer if count_ctl.DC allows it.
631 * Otherwise it conveniently updates the biases.
James Hogane30492b2014-05-29 10:16:35 +0100632 */
633 count = kvm_read_c0_guest_count(cop0);
634 kvm_mips_write_count(vcpu, count);
635}
636
637/**
James Hoganf8239342014-05-29 10:16:37 +0100638 * kvm_mips_set_count_ctl() - Update the count control KVM register.
639 * @vcpu: Virtual CPU.
640 * @count_ctl: Count control register new value.
641 *
642 * Set the count control KVM register. The timer is updated accordingly.
643 *
644 * Returns: -EINVAL if reserved bits are set.
645 * 0 on success.
646 */
647int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
648{
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
651 s64 delta;
652 ktime_t expire, now;
653 uint32_t count, compare;
654
655 /* Only allow defined bits to be changed */
656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
657 return -EINVAL;
658
659 /* Apply new value */
660 vcpu->arch.count_ctl = count_ctl;
661
662 /* Master CP0_Count disable */
663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
664 /* Is CP0_Cause.DC already disabling CP0_Count? */
665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
667 /* Just record the current time */
668 vcpu->arch.count_resume = ktime_get();
669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
670 /* disable timer and record current time */
671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
672 } else {
673 /*
674 * Calculate timeout relative to static count at resume
675 * time (wrap 0 to 2^32).
676 */
677 count = kvm_read_c0_guest_count(cop0);
678 compare = kvm_read_c0_guest_compare(cop0);
679 delta = (u64)(uint32_t)(compare - count - 1) + 1;
680 delta = div_u64(delta * NSEC_PER_SEC,
681 vcpu->arch.count_hz);
682 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
683
684 /* Handle pending interrupt */
685 now = ktime_get();
686 if (ktime_compare(now, expire) >= 0)
687 /* Nothing should be waiting on the timeout */
688 kvm_mips_callbacks->queue_timer_int(vcpu);
689
690 /* Resume hrtimer without changing bias */
691 count = kvm_mips_read_count_running(vcpu, now);
692 kvm_mips_resume_hrtimer(vcpu, now, count);
693 }
694 }
695
696 return 0;
697}
698
699/**
700 * kvm_mips_set_count_resume() - Update the count resume KVM register.
701 * @vcpu: Virtual CPU.
702 * @count_resume: Count resume register new value.
703 *
704 * Set the count resume KVM register.
705 *
706 * Returns: -EINVAL if out of valid range (0..now).
707 * 0 on success.
708 */
709int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
710{
711 /*
712 * It doesn't make sense for the resume time to be in the future, as it
713 * would be possible for the next interrupt to be more than a full
714 * period in the future.
715 */
716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
717 return -EINVAL;
718
719 vcpu->arch.count_resume = ns_to_ktime(count_resume);
720 return 0;
721}
722
723/**
James Hogane30492b2014-05-29 10:16:35 +0100724 * kvm_mips_count_timeout() - Push timer forward on timeout.
725 * @vcpu: Virtual CPU.
726 *
727 * Handle an hrtimer event by push the hrtimer forward a period.
728 *
729 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
730 */
731enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
732{
733 /* Add the Count period to the current expiry time */
734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
735 vcpu->arch.count_period);
736 return HRTIMER_RESTART;
Sanjay Lale685c682012-11-21 18:34:04 -0800737}
738
739enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
740{
741 struct mips_coproc *cop0 = vcpu->arch.cop0;
742 enum emulation_result er = EMULATE_DONE;
743
744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
746 kvm_read_c0_guest_epc(cop0));
747 kvm_clear_c0_guest_status(cop0, ST0_EXL);
748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
749
750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
753 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
755 vcpu->arch.pc);
Sanjay Lale685c682012-11-21 18:34:04 -0800756 er = EMULATE_FAIL;
757 }
758
759 return er;
760}
761
762enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
763{
764 enum emulation_result er = EMULATE_DONE;
765
766 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
767 vcpu->arch.pending_exceptions);
768
769 ++vcpu->stat.wait_exits;
770 trace_kvm_exit(vcpu, WAIT_EXITS);
771 if (!vcpu->arch.pending_exceptions) {
772 vcpu->arch.wait = 1;
773 kvm_vcpu_block(vcpu);
774
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700775 /*
776 * We we are runnable, then definitely go off to user space to
777 * check if any I/O interrupts are pending.
Sanjay Lale685c682012-11-21 18:34:04 -0800778 */
779 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
780 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
781 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
782 }
783 }
784
785 return er;
786}
787
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700788/*
789 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
790 * we can catch this, if things ever change
Sanjay Lale685c682012-11-21 18:34:04 -0800791 */
792enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
793{
794 struct mips_coproc *cop0 = vcpu->arch.cop0;
795 enum emulation_result er = EMULATE_FAIL;
796 uint32_t pc = vcpu->arch.pc;
797
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700798 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800799 return er;
800}
801
802/* Write Guest TLB Entry @ Index */
803enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
804{
805 struct mips_coproc *cop0 = vcpu->arch.cop0;
806 int index = kvm_read_c0_guest_index(cop0);
807 enum emulation_result er = EMULATE_DONE;
808 struct kvm_mips_tlb *tlb = NULL;
809 uint32_t pc = vcpu->arch.pc;
810
811 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700812 kvm_debug("%s: illegal index: %d\n", __func__, index);
813 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
814 pc, index, kvm_read_c0_guest_entryhi(cop0),
815 kvm_read_c0_guest_entrylo0(cop0),
816 kvm_read_c0_guest_entrylo1(cop0),
817 kvm_read_c0_guest_pagemask(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800818 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
819 }
820
821 tlb = &vcpu->arch.guest_tlb[index];
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700822 /*
823 * Probe the shadow host TLB for the entry being overwritten, if one
824 * matches, invalidate it
825 */
Sanjay Lale685c682012-11-21 18:34:04 -0800826 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
Sanjay Lale685c682012-11-21 18:34:04 -0800827
828 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
829 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
830 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
831 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
832
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700833 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
834 pc, index, kvm_read_c0_guest_entryhi(cop0),
835 kvm_read_c0_guest_entrylo0(cop0),
836 kvm_read_c0_guest_entrylo1(cop0),
837 kvm_read_c0_guest_pagemask(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800838
839 return er;
840}
841
842/* Write Guest TLB Entry @ Random Index */
843enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
844{
845 struct mips_coproc *cop0 = vcpu->arch.cop0;
846 enum emulation_result er = EMULATE_DONE;
847 struct kvm_mips_tlb *tlb = NULL;
848 uint32_t pc = vcpu->arch.pc;
849 int index;
850
Sanjay Lale685c682012-11-21 18:34:04 -0800851 get_random_bytes(&index, sizeof(index));
852 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
Sanjay Lale685c682012-11-21 18:34:04 -0800853
854 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700855 kvm_err("%s: illegal index: %d\n", __func__, index);
Sanjay Lale685c682012-11-21 18:34:04 -0800856 return EMULATE_FAIL;
857 }
858
859 tlb = &vcpu->arch.guest_tlb[index];
860
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700861 /*
862 * Probe the shadow host TLB for the entry being overwritten, if one
863 * matches, invalidate it
864 */
Sanjay Lale685c682012-11-21 18:34:04 -0800865 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
Sanjay Lale685c682012-11-21 18:34:04 -0800866
867 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
868 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
869 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
870 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
871
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700872 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
873 pc, index, kvm_read_c0_guest_entryhi(cop0),
874 kvm_read_c0_guest_entrylo0(cop0),
875 kvm_read_c0_guest_entrylo1(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800876
877 return er;
878}
879
880enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
881{
882 struct mips_coproc *cop0 = vcpu->arch.cop0;
883 long entryhi = kvm_read_c0_guest_entryhi(cop0);
884 enum emulation_result er = EMULATE_DONE;
885 uint32_t pc = vcpu->arch.pc;
886 int index = -1;
887
888 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
889
890 kvm_write_c0_guest_index(cop0, index);
891
892 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
893 index);
894
895 return er;
896}
897
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700898enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
899 uint32_t cause, struct kvm_run *run,
900 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800901{
902 struct mips_coproc *cop0 = vcpu->arch.cop0;
903 enum emulation_result er = EMULATE_DONE;
904 int32_t rt, rd, copz, sel, co_bit, op;
905 uint32_t pc = vcpu->arch.pc;
906 unsigned long curr_pc;
907
908 /*
909 * Update PC and hold onto current PC in case there is
910 * an error and we want to rollback the PC
911 */
912 curr_pc = vcpu->arch.pc;
913 er = update_pc(vcpu, cause);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700914 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -0800915 return er;
Sanjay Lale685c682012-11-21 18:34:04 -0800916
917 copz = (inst >> 21) & 0x1f;
918 rt = (inst >> 16) & 0x1f;
919 rd = (inst >> 11) & 0x1f;
920 sel = inst & 0x7;
921 co_bit = (inst >> 25) & 1;
922
Sanjay Lale685c682012-11-21 18:34:04 -0800923 if (co_bit) {
924 op = (inst) & 0xff;
925
926 switch (op) {
927 case tlbr_op: /* Read indexed TLB entry */
928 er = kvm_mips_emul_tlbr(vcpu);
929 break;
930 case tlbwi_op: /* Write indexed */
931 er = kvm_mips_emul_tlbwi(vcpu);
932 break;
933 case tlbwr_op: /* Write random */
934 er = kvm_mips_emul_tlbwr(vcpu);
935 break;
936 case tlbp_op: /* TLB Probe */
937 er = kvm_mips_emul_tlbp(vcpu);
938 break;
939 case rfe_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700940 kvm_err("!!!COP0_RFE!!!\n");
Sanjay Lale685c682012-11-21 18:34:04 -0800941 break;
942 case eret_op:
943 er = kvm_mips_emul_eret(vcpu);
944 goto dont_update_pc;
945 break;
946 case wait_op:
947 er = kvm_mips_emul_wait(vcpu);
948 break;
949 }
950 } else {
951 switch (copz) {
952 case mfc_op:
953#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
954 cop0->stat[rd][sel]++;
955#endif
956 /* Get reg */
957 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +0100958 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -0800959 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
960 vcpu->arch.gprs[rt] = 0x0;
961#ifdef CONFIG_KVM_MIPS_DYN_TRANS
962 kvm_mips_trans_mfc0(inst, opc, vcpu);
963#endif
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700964 } else {
Sanjay Lale685c682012-11-21 18:34:04 -0800965 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
966
967#ifdef CONFIG_KVM_MIPS_DYN_TRANS
968 kvm_mips_trans_mfc0(inst, opc, vcpu);
969#endif
970 }
971
972 kvm_debug
973 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
974 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
975
976 break;
977
978 case dmfc_op:
979 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
980 break;
981
982 case mtc_op:
983#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
984 cop0->stat[rd][sel]++;
985#endif
986 if ((rd == MIPS_CP0_TLB_INDEX)
987 && (vcpu->arch.gprs[rt] >=
988 KVM_MIPS_GUEST_TLB_SIZE)) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700989 kvm_err("Invalid TLB Index: %ld",
990 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -0800991 er = EMULATE_FAIL;
992 break;
993 }
994#define C0_EBASE_CORE_MASK 0xff
995 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
996 /* Preserve CORE number */
997 kvm_change_c0_guest_ebase(cop0,
998 ~(C0_EBASE_CORE_MASK),
999 vcpu->arch.gprs[rt]);
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001000 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1001 kvm_read_c0_guest_ebase(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -08001002 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
David Daney48c4ac92013-05-13 13:56:44 -07001003 uint32_t nasid =
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001004 vcpu->arch.gprs[rt] & ASID_MASK;
1005 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
David Daney48c4ac92013-05-13 13:56:44 -07001006 ((kvm_read_c0_guest_entryhi(cop0) &
1007 ASID_MASK) != nasid)) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001008 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1009 kvm_read_c0_guest_entryhi(cop0)
1010 & ASID_MASK,
1011 vcpu->arch.gprs[rt]
1012 & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001013
1014 /* Blow away the shadow host TLBs */
1015 kvm_mips_flush_host_tlb(1);
1016 }
1017 kvm_write_c0_guest_entryhi(cop0,
1018 vcpu->arch.gprs[rt]);
1019 }
1020 /* Are we writing to COUNT */
1021 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +01001022 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001023 goto done;
1024 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1025 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1026 pc, kvm_read_c0_guest_compare(cop0),
1027 vcpu->arch.gprs[rt]);
1028
1029 /* If we are writing to COMPARE */
1030 /* Clear pending timer interrupt, if any */
1031 kvm_mips_callbacks->dequeue_timer_int(vcpu);
James Hogane30492b2014-05-29 10:16:35 +01001032 kvm_mips_write_compare(vcpu,
1033 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001034 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1035 kvm_write_c0_guest_status(cop0,
1036 vcpu->arch.gprs[rt]);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001037 /*
1038 * Make sure that CU1 and NMI bits are
1039 * never set
1040 */
Sanjay Lale685c682012-11-21 18:34:04 -08001041 kvm_clear_c0_guest_status(cop0,
1042 (ST0_CU1 | ST0_NMI));
1043
1044#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1045 kvm_mips_trans_mtc0(inst, opc, vcpu);
1046#endif
James Hogane30492b2014-05-29 10:16:35 +01001047 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1048 uint32_t old_cause, new_cause;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001049
James Hogane30492b2014-05-29 10:16:35 +01001050 old_cause = kvm_read_c0_guest_cause(cop0);
1051 new_cause = vcpu->arch.gprs[rt];
1052 /* Update R/W bits */
1053 kvm_change_c0_guest_cause(cop0, 0x08800300,
1054 new_cause);
1055 /* DC bit enabling/disabling timer? */
1056 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1057 if (new_cause & CAUSEF_DC)
1058 kvm_mips_count_disable_cause(vcpu);
1059 else
1060 kvm_mips_count_enable_cause(vcpu);
1061 }
Sanjay Lale685c682012-11-21 18:34:04 -08001062 } else {
1063 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1064#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1065 kvm_mips_trans_mtc0(inst, opc, vcpu);
1066#endif
1067 }
1068
1069 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1070 rd, sel, cop0->reg[rd][sel]);
1071 break;
1072
1073 case dmtc_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001074 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1075 vcpu->arch.pc, rt, rd, sel);
Sanjay Lale685c682012-11-21 18:34:04 -08001076 er = EMULATE_FAIL;
1077 break;
1078
1079 case mfmcz_op:
1080#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1081 cop0->stat[MIPS_CP0_STATUS][0]++;
1082#endif
1083 if (rt != 0) {
1084 vcpu->arch.gprs[rt] =
1085 kvm_read_c0_guest_status(cop0);
1086 }
1087 /* EI */
1088 if (inst & 0x20) {
1089 kvm_debug("[%#lx] mfmcz_op: EI\n",
1090 vcpu->arch.pc);
1091 kvm_set_c0_guest_status(cop0, ST0_IE);
1092 } else {
1093 kvm_debug("[%#lx] mfmcz_op: DI\n",
1094 vcpu->arch.pc);
1095 kvm_clear_c0_guest_status(cop0, ST0_IE);
1096 }
1097
1098 break;
1099
1100 case wrpgpr_op:
1101 {
1102 uint32_t css =
1103 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1104 uint32_t pss =
1105 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001106 /*
1107 * We don't support any shadow register sets, so
1108 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1109 */
Sanjay Lale685c682012-11-21 18:34:04 -08001110 if (css || pss) {
1111 er = EMULATE_FAIL;
1112 break;
1113 }
1114 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1115 vcpu->arch.gprs[rt]);
1116 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1117 }
1118 break;
1119 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001120 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1121 vcpu->arch.pc, copz);
Sanjay Lale685c682012-11-21 18:34:04 -08001122 er = EMULATE_FAIL;
1123 break;
1124 }
1125 }
1126
1127done:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001128 /* Rollback PC only if emulation was unsuccessful */
1129 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08001130 vcpu->arch.pc = curr_pc;
Sanjay Lale685c682012-11-21 18:34:04 -08001131
1132dont_update_pc:
1133 /*
1134 * This is for special instructions whose emulation
1135 * updates the PC, so do not overwrite the PC under
1136 * any circumstances
1137 */
1138
1139 return er;
1140}
1141
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001142enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1143 struct kvm_run *run,
1144 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001145{
1146 enum emulation_result er = EMULATE_DO_MMIO;
1147 int32_t op, base, rt, offset;
1148 uint32_t bytes;
1149 void *data = run->mmio.data;
1150 unsigned long curr_pc;
1151
1152 /*
1153 * Update PC and hold onto current PC in case there is
1154 * an error and we want to rollback the PC
1155 */
1156 curr_pc = vcpu->arch.pc;
1157 er = update_pc(vcpu, cause);
1158 if (er == EMULATE_FAIL)
1159 return er;
1160
1161 rt = (inst >> 16) & 0x1f;
1162 base = (inst >> 21) & 0x1f;
1163 offset = inst & 0xffff;
1164 op = (inst >> 26) & 0x3f;
1165
1166 switch (op) {
1167 case sb_op:
1168 bytes = 1;
1169 if (bytes > sizeof(run->mmio.data)) {
1170 kvm_err("%s: bad MMIO length: %d\n", __func__,
1171 run->mmio.len);
1172 }
1173 run->mmio.phys_addr =
1174 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1175 host_cp0_badvaddr);
1176 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1177 er = EMULATE_FAIL;
1178 break;
1179 }
1180 run->mmio.len = bytes;
1181 run->mmio.is_write = 1;
1182 vcpu->mmio_needed = 1;
1183 vcpu->mmio_is_write = 1;
1184 *(u8 *) data = vcpu->arch.gprs[rt];
1185 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1186 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1187 *(uint8_t *) data);
1188
1189 break;
1190
1191 case sw_op:
1192 bytes = 4;
1193 if (bytes > sizeof(run->mmio.data)) {
1194 kvm_err("%s: bad MMIO length: %d\n", __func__,
1195 run->mmio.len);
1196 }
1197 run->mmio.phys_addr =
1198 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1199 host_cp0_badvaddr);
1200 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1201 er = EMULATE_FAIL;
1202 break;
1203 }
1204
1205 run->mmio.len = bytes;
1206 run->mmio.is_write = 1;
1207 vcpu->mmio_needed = 1;
1208 vcpu->mmio_is_write = 1;
1209 *(uint32_t *) data = vcpu->arch.gprs[rt];
1210
1211 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1212 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1213 vcpu->arch.gprs[rt], *(uint32_t *) data);
1214 break;
1215
1216 case sh_op:
1217 bytes = 2;
1218 if (bytes > sizeof(run->mmio.data)) {
1219 kvm_err("%s: bad MMIO length: %d\n", __func__,
1220 run->mmio.len);
1221 }
1222 run->mmio.phys_addr =
1223 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1224 host_cp0_badvaddr);
1225 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1226 er = EMULATE_FAIL;
1227 break;
1228 }
1229
1230 run->mmio.len = bytes;
1231 run->mmio.is_write = 1;
1232 vcpu->mmio_needed = 1;
1233 vcpu->mmio_is_write = 1;
1234 *(uint16_t *) data = vcpu->arch.gprs[rt];
1235
1236 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1237 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1238 vcpu->arch.gprs[rt], *(uint32_t *) data);
1239 break;
1240
1241 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001242 kvm_err("Store not yet supported");
Sanjay Lale685c682012-11-21 18:34:04 -08001243 er = EMULATE_FAIL;
1244 break;
1245 }
1246
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001247 /* Rollback PC if emulation was unsuccessful */
1248 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08001249 vcpu->arch.pc = curr_pc;
Sanjay Lale685c682012-11-21 18:34:04 -08001250
1251 return er;
1252}
1253
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001254enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1255 struct kvm_run *run,
1256 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001257{
1258 enum emulation_result er = EMULATE_DO_MMIO;
1259 int32_t op, base, rt, offset;
1260 uint32_t bytes;
1261
1262 rt = (inst >> 16) & 0x1f;
1263 base = (inst >> 21) & 0x1f;
1264 offset = inst & 0xffff;
1265 op = (inst >> 26) & 0x3f;
1266
1267 vcpu->arch.pending_load_cause = cause;
1268 vcpu->arch.io_gpr = rt;
1269
1270 switch (op) {
1271 case lw_op:
1272 bytes = 4;
1273 if (bytes > sizeof(run->mmio.data)) {
1274 kvm_err("%s: bad MMIO length: %d\n", __func__,
1275 run->mmio.len);
1276 er = EMULATE_FAIL;
1277 break;
1278 }
1279 run->mmio.phys_addr =
1280 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1281 host_cp0_badvaddr);
1282 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1283 er = EMULATE_FAIL;
1284 break;
1285 }
1286
1287 run->mmio.len = bytes;
1288 run->mmio.is_write = 0;
1289 vcpu->mmio_needed = 1;
1290 vcpu->mmio_is_write = 0;
1291 break;
1292
1293 case lh_op:
1294 case lhu_op:
1295 bytes = 2;
1296 if (bytes > sizeof(run->mmio.data)) {
1297 kvm_err("%s: bad MMIO length: %d\n", __func__,
1298 run->mmio.len);
1299 er = EMULATE_FAIL;
1300 break;
1301 }
1302 run->mmio.phys_addr =
1303 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1304 host_cp0_badvaddr);
1305 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1306 er = EMULATE_FAIL;
1307 break;
1308 }
1309
1310 run->mmio.len = bytes;
1311 run->mmio.is_write = 0;
1312 vcpu->mmio_needed = 1;
1313 vcpu->mmio_is_write = 0;
1314
1315 if (op == lh_op)
1316 vcpu->mmio_needed = 2;
1317 else
1318 vcpu->mmio_needed = 1;
1319
1320 break;
1321
1322 case lbu_op:
1323 case lb_op:
1324 bytes = 1;
1325 if (bytes > sizeof(run->mmio.data)) {
1326 kvm_err("%s: bad MMIO length: %d\n", __func__,
1327 run->mmio.len);
1328 er = EMULATE_FAIL;
1329 break;
1330 }
1331 run->mmio.phys_addr =
1332 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1333 host_cp0_badvaddr);
1334 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1335 er = EMULATE_FAIL;
1336 break;
1337 }
1338
1339 run->mmio.len = bytes;
1340 run->mmio.is_write = 0;
1341 vcpu->mmio_is_write = 0;
1342
1343 if (op == lb_op)
1344 vcpu->mmio_needed = 2;
1345 else
1346 vcpu->mmio_needed = 1;
1347
1348 break;
1349
1350 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001351 kvm_err("Load not yet supported");
Sanjay Lale685c682012-11-21 18:34:04 -08001352 er = EMULATE_FAIL;
1353 break;
1354 }
1355
1356 return er;
1357}
1358
1359int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1360{
1361 unsigned long offset = (va & ~PAGE_MASK);
1362 struct kvm *kvm = vcpu->kvm;
1363 unsigned long pa;
1364 gfn_t gfn;
1365 pfn_t pfn;
1366
1367 gfn = va >> PAGE_SHIFT;
1368
1369 if (gfn >= kvm->arch.guest_pmap_npages) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001370 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
Sanjay Lale685c682012-11-21 18:34:04 -08001371 kvm_mips_dump_host_tlbs();
1372 kvm_arch_vcpu_dump_regs(vcpu);
1373 return -1;
1374 }
1375 pfn = kvm->arch.guest_pmap[gfn];
1376 pa = (pfn << PAGE_SHIFT) | offset;
1377
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001378 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1379 CKSEG0ADDR(pa));
Sanjay Lale685c682012-11-21 18:34:04 -08001380
James Hoganfacaaec2014-05-29 10:16:25 +01001381 local_flush_icache_range(CKSEG0ADDR(pa), 32);
Sanjay Lale685c682012-11-21 18:34:04 -08001382 return 0;
1383}
1384
1385#define MIPS_CACHE_OP_INDEX_INV 0x0
1386#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1387#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1388#define MIPS_CACHE_OP_IMP 0x3
1389#define MIPS_CACHE_OP_HIT_INV 0x4
1390#define MIPS_CACHE_OP_FILL_WB_INV 0x5
1391#define MIPS_CACHE_OP_HIT_HB 0x6
1392#define MIPS_CACHE_OP_FETCH_LOCK 0x7
1393
1394#define MIPS_CACHE_ICACHE 0x0
1395#define MIPS_CACHE_DCACHE 0x1
1396#define MIPS_CACHE_SEC 0x3
1397
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001398enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1399 uint32_t cause,
1400 struct kvm_run *run,
1401 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001402{
1403 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lale685c682012-11-21 18:34:04 -08001404 enum emulation_result er = EMULATE_DONE;
1405 int32_t offset, cache, op_inst, op, base;
1406 struct kvm_vcpu_arch *arch = &vcpu->arch;
1407 unsigned long va;
1408 unsigned long curr_pc;
1409
1410 /*
1411 * Update PC and hold onto current PC in case there is
1412 * an error and we want to rollback the PC
1413 */
1414 curr_pc = vcpu->arch.pc;
1415 er = update_pc(vcpu, cause);
1416 if (er == EMULATE_FAIL)
1417 return er;
1418
1419 base = (inst >> 21) & 0x1f;
1420 op_inst = (inst >> 16) & 0x1f;
1421 offset = inst & 0xffff;
1422 cache = (inst >> 16) & 0x3;
1423 op = (inst >> 18) & 0x7;
1424
1425 va = arch->gprs[base] + offset;
1426
1427 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1428 cache, op, base, arch->gprs[base], offset);
1429
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001430 /*
1431 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1432 * invalidate the caches entirely by stepping through all the
1433 * ways/indexes
Sanjay Lale685c682012-11-21 18:34:04 -08001434 */
1435 if (op == MIPS_CACHE_OP_INDEX_INV) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001436 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1437 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1438 arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001439
1440 if (cache == MIPS_CACHE_DCACHE)
1441 r4k_blast_dcache();
1442 else if (cache == MIPS_CACHE_ICACHE)
1443 r4k_blast_icache();
1444 else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001445 kvm_err("%s: unsupported CACHE INDEX operation\n",
1446 __func__);
Sanjay Lale685c682012-11-21 18:34:04 -08001447 return EMULATE_FAIL;
1448 }
1449
1450#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1451 kvm_mips_trans_cache_index(inst, opc, vcpu);
1452#endif
1453 goto done;
1454 }
1455
1456 preempt_disable();
1457 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001458 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
Sanjay Lale685c682012-11-21 18:34:04 -08001459 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001460 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1461 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1462 int index;
1463
1464 /* If an entry already exists then skip */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001465 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
Sanjay Lale685c682012-11-21 18:34:04 -08001466 goto skip_fault;
Sanjay Lale685c682012-11-21 18:34:04 -08001467
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001468 /*
1469 * If address not in the guest TLB, then give the guest a fault,
1470 * the resulting handler will do the right thing
Sanjay Lale685c682012-11-21 18:34:04 -08001471 */
1472 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001473 (kvm_read_c0_guest_entryhi
1474 (cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08001475
1476 if (index < 0) {
1477 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1478 vcpu->arch.host_cp0_badvaddr = va;
1479 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1480 vcpu);
1481 preempt_enable();
1482 goto dont_update_pc;
1483 } else {
1484 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001485 /*
1486 * Check if the entry is valid, if not then setup a TLB
1487 * invalid exception to the guest
1488 */
Sanjay Lale685c682012-11-21 18:34:04 -08001489 if (!TLB_IS_VALID(*tlb, va)) {
1490 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1491 run, vcpu);
1492 preempt_enable();
1493 goto dont_update_pc;
1494 } else {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001495 /*
1496 * We fault an entry from the guest tlb to the
1497 * shadow host TLB
1498 */
Sanjay Lale685c682012-11-21 18:34:04 -08001499 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1500 NULL,
1501 NULL);
1502 }
1503 }
1504 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001505 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1506 cache, op, base, arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001507 er = EMULATE_FAIL;
1508 preempt_enable();
1509 goto dont_update_pc;
1510
1511 }
1512
1513skip_fault:
1514 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1515 if (cache == MIPS_CACHE_DCACHE
1516 && (op == MIPS_CACHE_OP_FILL_WB_INV
1517 || op == MIPS_CACHE_OP_HIT_INV)) {
1518 flush_dcache_line(va);
1519
1520#ifdef CONFIG_KVM_MIPS_DYN_TRANS
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001521 /*
1522 * Replace the CACHE instruction, with a SYNCI, not the same,
1523 * but avoids a trap
1524 */
Sanjay Lale685c682012-11-21 18:34:04 -08001525 kvm_mips_trans_cache_va(inst, opc, vcpu);
1526#endif
1527 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1528 flush_dcache_line(va);
1529 flush_icache_line(va);
1530
1531#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1532 /* Replace the CACHE instruction, with a SYNCI */
1533 kvm_mips_trans_cache_va(inst, opc, vcpu);
1534#endif
1535 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001536 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1537 cache, op, base, arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001538 er = EMULATE_FAIL;
1539 preempt_enable();
1540 goto dont_update_pc;
1541 }
1542
1543 preempt_enable();
1544
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001545dont_update_pc:
1546 /* Rollback PC */
Sanjay Lale685c682012-11-21 18:34:04 -08001547 vcpu->arch.pc = curr_pc;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001548done:
Sanjay Lale685c682012-11-21 18:34:04 -08001549 return er;
1550}
1551
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001552enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1553 struct kvm_run *run,
1554 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001555{
1556 enum emulation_result er = EMULATE_DONE;
1557 uint32_t inst;
1558
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001559 /* Fetch the instruction. */
1560 if (cause & CAUSEF_BD)
Sanjay Lale685c682012-11-21 18:34:04 -08001561 opc += 1;
Sanjay Lale685c682012-11-21 18:34:04 -08001562
1563 inst = kvm_get_inst(opc, vcpu);
1564
1565 switch (((union mips_instruction)inst).r_format.opcode) {
1566 case cop0_op:
1567 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1568 break;
1569 case sb_op:
1570 case sh_op:
1571 case sw_op:
1572 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1573 break;
1574 case lb_op:
1575 case lbu_op:
1576 case lhu_op:
1577 case lh_op:
1578 case lw_op:
1579 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1580 break;
1581
1582 case cache_op:
1583 ++vcpu->stat.cache_exits;
1584 trace_kvm_exit(vcpu, CACHE_EXITS);
1585 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1586 break;
1587
1588 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001589 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1590 inst);
Sanjay Lale685c682012-11-21 18:34:04 -08001591 kvm_arch_vcpu_dump_regs(vcpu);
1592 er = EMULATE_FAIL;
1593 break;
1594 }
1595
1596 return er;
1597}
1598
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001599enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1600 uint32_t *opc,
1601 struct kvm_run *run,
1602 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001603{
1604 struct mips_coproc *cop0 = vcpu->arch.cop0;
1605 struct kvm_vcpu_arch *arch = &vcpu->arch;
1606 enum emulation_result er = EMULATE_DONE;
1607
1608 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1609 /* save old pc */
1610 kvm_write_c0_guest_epc(cop0, arch->pc);
1611 kvm_set_c0_guest_status(cop0, ST0_EXL);
1612
1613 if (cause & CAUSEF_BD)
1614 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1615 else
1616 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1617
1618 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1619
1620 kvm_change_c0_guest_cause(cop0, (0xff),
1621 (T_SYSCALL << CAUSEB_EXCCODE));
1622
1623 /* Set PC to the exception entry point */
1624 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1625
1626 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001627 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08001628 er = EMULATE_FAIL;
1629 }
1630
1631 return er;
1632}
1633
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001634enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1635 uint32_t *opc,
1636 struct kvm_run *run,
1637 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001638{
1639 struct mips_coproc *cop0 = vcpu->arch.cop0;
1640 struct kvm_vcpu_arch *arch = &vcpu->arch;
1641 enum emulation_result er = EMULATE_DONE;
1642 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001643 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001644
1645 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1646 /* save old pc */
1647 kvm_write_c0_guest_epc(cop0, arch->pc);
1648 kvm_set_c0_guest_status(cop0, ST0_EXL);
1649
1650 if (cause & CAUSEF_BD)
1651 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1652 else
1653 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1654
1655 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1656 arch->pc);
1657
1658 /* set pc to the exception entry point */
1659 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1660
1661 } else {
1662 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1663 arch->pc);
1664
1665 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1666 }
1667
1668 kvm_change_c0_guest_cause(cop0, (0xff),
1669 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1670
1671 /* setup badvaddr, context and entryhi registers for the guest */
1672 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1673 /* XXXKYMA: is the context register used by linux??? */
1674 kvm_write_c0_guest_entryhi(cop0, entryhi);
1675 /* Blow away the shadow host TLBs */
1676 kvm_mips_flush_host_tlb(1);
1677
1678 return er;
1679}
1680
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001681enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1682 uint32_t *opc,
1683 struct kvm_run *run,
1684 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001685{
1686 struct mips_coproc *cop0 = vcpu->arch.cop0;
1687 struct kvm_vcpu_arch *arch = &vcpu->arch;
1688 enum emulation_result er = EMULATE_DONE;
1689 unsigned long entryhi =
1690 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001691 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001692
1693 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1694 /* save old pc */
1695 kvm_write_c0_guest_epc(cop0, arch->pc);
1696 kvm_set_c0_guest_status(cop0, ST0_EXL);
1697
1698 if (cause & CAUSEF_BD)
1699 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1700 else
1701 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1702
1703 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1704 arch->pc);
1705
1706 /* set pc to the exception entry point */
1707 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1708
1709 } else {
1710 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1711 arch->pc);
1712 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1713 }
1714
1715 kvm_change_c0_guest_cause(cop0, (0xff),
1716 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1717
1718 /* setup badvaddr, context and entryhi registers for the guest */
1719 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1720 /* XXXKYMA: is the context register used by linux??? */
1721 kvm_write_c0_guest_entryhi(cop0, entryhi);
1722 /* Blow away the shadow host TLBs */
1723 kvm_mips_flush_host_tlb(1);
1724
1725 return er;
1726}
1727
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001728enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1729 uint32_t *opc,
1730 struct kvm_run *run,
1731 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001732{
1733 struct mips_coproc *cop0 = vcpu->arch.cop0;
1734 struct kvm_vcpu_arch *arch = &vcpu->arch;
1735 enum emulation_result er = EMULATE_DONE;
1736 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001737 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001738
1739 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1740 /* save old pc */
1741 kvm_write_c0_guest_epc(cop0, arch->pc);
1742 kvm_set_c0_guest_status(cop0, ST0_EXL);
1743
1744 if (cause & CAUSEF_BD)
1745 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1746 else
1747 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1748
1749 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1750 arch->pc);
1751
1752 /* Set PC to the exception entry point */
1753 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1754 } else {
1755 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1756 arch->pc);
1757 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1758 }
1759
1760 kvm_change_c0_guest_cause(cop0, (0xff),
1761 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1762
1763 /* setup badvaddr, context and entryhi registers for the guest */
1764 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1765 /* XXXKYMA: is the context register used by linux??? */
1766 kvm_write_c0_guest_entryhi(cop0, entryhi);
1767 /* Blow away the shadow host TLBs */
1768 kvm_mips_flush_host_tlb(1);
1769
1770 return er;
1771}
1772
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001773enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1774 uint32_t *opc,
1775 struct kvm_run *run,
1776 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001777{
1778 struct mips_coproc *cop0 = vcpu->arch.cop0;
1779 struct kvm_vcpu_arch *arch = &vcpu->arch;
1780 enum emulation_result er = EMULATE_DONE;
1781 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001782 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001783
1784 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1785 /* save old pc */
1786 kvm_write_c0_guest_epc(cop0, arch->pc);
1787 kvm_set_c0_guest_status(cop0, ST0_EXL);
1788
1789 if (cause & CAUSEF_BD)
1790 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1791 else
1792 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1793
1794 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1795 arch->pc);
1796
1797 /* Set PC to the exception entry point */
1798 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1799 } else {
1800 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1801 arch->pc);
1802 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1803 }
1804
1805 kvm_change_c0_guest_cause(cop0, (0xff),
1806 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1807
1808 /* setup badvaddr, context and entryhi registers for the guest */
1809 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1810 /* XXXKYMA: is the context register used by linux??? */
1811 kvm_write_c0_guest_entryhi(cop0, entryhi);
1812 /* Blow away the shadow host TLBs */
1813 kvm_mips_flush_host_tlb(1);
1814
1815 return er;
1816}
1817
1818/* TLBMOD: store into address matching TLB with Dirty bit off */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001819enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1820 struct kvm_run *run,
1821 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001822{
1823 enum emulation_result er = EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001824#ifdef DEBUG
James Hogan3d654832014-05-29 10:16:41 +01001825 struct mips_coproc *cop0 = vcpu->arch.cop0;
1826 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1827 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1828 int index;
1829
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001830 /* If address not in the guest TLB, then we are in trouble */
Sanjay Lale685c682012-11-21 18:34:04 -08001831 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1832 if (index < 0) {
1833 /* XXXKYMA Invalidate and retry */
1834 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1835 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1836 __func__, entryhi);
1837 kvm_mips_dump_guest_tlbs(vcpu);
1838 kvm_mips_dump_host_tlbs();
1839 return EMULATE_FAIL;
1840 }
1841#endif
1842
1843 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1844 return er;
1845}
1846
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001847enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1848 uint32_t *opc,
1849 struct kvm_run *run,
1850 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001851{
1852 struct mips_coproc *cop0 = vcpu->arch.cop0;
1853 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001854 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001855 struct kvm_vcpu_arch *arch = &vcpu->arch;
1856 enum emulation_result er = EMULATE_DONE;
1857
1858 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1859 /* save old pc */
1860 kvm_write_c0_guest_epc(cop0, arch->pc);
1861 kvm_set_c0_guest_status(cop0, ST0_EXL);
1862
1863 if (cause & CAUSEF_BD)
1864 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1865 else
1866 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1867
1868 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1869 arch->pc);
1870
1871 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1872 } else {
1873 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1874 arch->pc);
1875 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1876 }
1877
1878 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
1879
1880 /* setup badvaddr, context and entryhi registers for the guest */
1881 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1882 /* XXXKYMA: is the context register used by linux??? */
1883 kvm_write_c0_guest_entryhi(cop0, entryhi);
1884 /* Blow away the shadow host TLBs */
1885 kvm_mips_flush_host_tlb(1);
1886
1887 return er;
1888}
1889
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001890enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
1891 uint32_t *opc,
1892 struct kvm_run *run,
1893 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001894{
1895 struct mips_coproc *cop0 = vcpu->arch.cop0;
1896 struct kvm_vcpu_arch *arch = &vcpu->arch;
1897 enum emulation_result er = EMULATE_DONE;
1898
1899 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1900 /* save old pc */
1901 kvm_write_c0_guest_epc(cop0, arch->pc);
1902 kvm_set_c0_guest_status(cop0, ST0_EXL);
1903
1904 if (cause & CAUSEF_BD)
1905 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1906 else
1907 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1908
1909 }
1910
1911 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1912
1913 kvm_change_c0_guest_cause(cop0, (0xff),
1914 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
1915 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
1916
1917 return er;
1918}
1919
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001920enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
1921 uint32_t *opc,
1922 struct kvm_run *run,
1923 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001924{
1925 struct mips_coproc *cop0 = vcpu->arch.cop0;
1926 struct kvm_vcpu_arch *arch = &vcpu->arch;
1927 enum emulation_result er = EMULATE_DONE;
1928
1929 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1930 /* save old pc */
1931 kvm_write_c0_guest_epc(cop0, arch->pc);
1932 kvm_set_c0_guest_status(cop0, ST0_EXL);
1933
1934 if (cause & CAUSEF_BD)
1935 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1936 else
1937 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1938
1939 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
1940
1941 kvm_change_c0_guest_cause(cop0, (0xff),
1942 (T_RES_INST << CAUSEB_EXCCODE));
1943
1944 /* Set PC to the exception entry point */
1945 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1946
1947 } else {
1948 kvm_err("Trying to deliver RI when EXL is already set\n");
1949 er = EMULATE_FAIL;
1950 }
1951
1952 return er;
1953}
1954
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001955enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
1956 uint32_t *opc,
1957 struct kvm_run *run,
1958 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001959{
1960 struct mips_coproc *cop0 = vcpu->arch.cop0;
1961 struct kvm_vcpu_arch *arch = &vcpu->arch;
1962 enum emulation_result er = EMULATE_DONE;
1963
1964 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1965 /* save old pc */
1966 kvm_write_c0_guest_epc(cop0, arch->pc);
1967 kvm_set_c0_guest_status(cop0, ST0_EXL);
1968
1969 if (cause & CAUSEF_BD)
1970 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1971 else
1972 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1973
1974 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
1975
1976 kvm_change_c0_guest_cause(cop0, (0xff),
1977 (T_BREAK << CAUSEB_EXCCODE));
1978
1979 /* Set PC to the exception entry point */
1980 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1981
1982 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001983 kvm_err("Trying to deliver BP when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08001984 er = EMULATE_FAIL;
1985 }
1986
1987 return er;
1988}
1989
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001990/* ll/sc, rdhwr, sync emulation */
Sanjay Lale685c682012-11-21 18:34:04 -08001991
1992#define OPCODE 0xfc000000
1993#define BASE 0x03e00000
1994#define RT 0x001f0000
1995#define OFFSET 0x0000ffff
1996#define LL 0xc0000000
1997#define SC 0xe0000000
1998#define SPEC0 0x00000000
1999#define SPEC3 0x7c000000
2000#define RD 0x0000f800
2001#define FUNC 0x0000003f
2002#define SYNC 0x0000000f
2003#define RDHWR 0x0000003b
2004
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002005enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2006 struct kvm_run *run,
2007 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002008{
2009 struct mips_coproc *cop0 = vcpu->arch.cop0;
2010 struct kvm_vcpu_arch *arch = &vcpu->arch;
2011 enum emulation_result er = EMULATE_DONE;
2012 unsigned long curr_pc;
2013 uint32_t inst;
2014
2015 /*
2016 * Update PC and hold onto current PC in case there is
2017 * an error and we want to rollback the PC
2018 */
2019 curr_pc = vcpu->arch.pc;
2020 er = update_pc(vcpu, cause);
2021 if (er == EMULATE_FAIL)
2022 return er;
2023
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002024 /* Fetch the instruction. */
Sanjay Lale685c682012-11-21 18:34:04 -08002025 if (cause & CAUSEF_BD)
2026 opc += 1;
2027
2028 inst = kvm_get_inst(opc, vcpu);
2029
2030 if (inst == KVM_INVALID_INST) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002031 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
Sanjay Lale685c682012-11-21 18:34:04 -08002032 return EMULATE_FAIL;
2033 }
2034
2035 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
James Hogan26f4f3b2014-03-14 13:06:09 +00002036 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002037 int rd = (inst & RD) >> 11;
2038 int rt = (inst & RT) >> 16;
James Hogan26f4f3b2014-03-14 13:06:09 +00002039 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2040 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2041 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2042 rd, opc);
2043 goto emulate_ri;
2044 }
Sanjay Lale685c682012-11-21 18:34:04 -08002045 switch (rd) {
2046 case 0: /* CPU number */
2047 arch->gprs[rt] = 0;
2048 break;
2049 case 1: /* SYNCI length */
2050 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2051 current_cpu_data.icache.linesz);
2052 break;
2053 case 2: /* Read count register */
James Hogane30492b2014-05-29 10:16:35 +01002054 arch->gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002055 break;
2056 case 3: /* Count register resolution */
2057 switch (current_cpu_data.cputype) {
2058 case CPU_20KC:
2059 case CPU_25KF:
2060 arch->gprs[rt] = 1;
2061 break;
2062 default:
2063 arch->gprs[rt] = 2;
2064 }
2065 break;
2066 case 29:
Sanjay Lale685c682012-11-21 18:34:04 -08002067 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -08002068 break;
2069
2070 default:
James Hogan15505672014-03-14 13:06:07 +00002071 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
James Hogan26f4f3b2014-03-14 13:06:09 +00002072 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002073 }
2074 } else {
James Hogan15505672014-03-14 13:06:07 +00002075 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
James Hogan26f4f3b2014-03-14 13:06:09 +00002076 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002077 }
2078
James Hogan26f4f3b2014-03-14 13:06:09 +00002079 return EMULATE_DONE;
2080
2081emulate_ri:
Sanjay Lale685c682012-11-21 18:34:04 -08002082 /*
James Hogan26f4f3b2014-03-14 13:06:09 +00002083 * Rollback PC (if in branch delay slot then the PC already points to
2084 * branch target), and pass the RI exception to the guest OS.
Sanjay Lale685c682012-11-21 18:34:04 -08002085 */
James Hogan26f4f3b2014-03-14 13:06:09 +00002086 vcpu->arch.pc = curr_pc;
2087 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002088}
2089
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002090enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2091 struct kvm_run *run)
Sanjay Lale685c682012-11-21 18:34:04 -08002092{
2093 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2094 enum emulation_result er = EMULATE_DONE;
2095 unsigned long curr_pc;
2096
2097 if (run->mmio.len > sizeof(*gpr)) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002098 kvm_err("Bad MMIO length: %d", run->mmio.len);
Sanjay Lale685c682012-11-21 18:34:04 -08002099 er = EMULATE_FAIL;
2100 goto done;
2101 }
2102
2103 /*
2104 * Update PC and hold onto current PC in case there is
2105 * an error and we want to rollback the PC
2106 */
2107 curr_pc = vcpu->arch.pc;
2108 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2109 if (er == EMULATE_FAIL)
2110 return er;
2111
2112 switch (run->mmio.len) {
2113 case 4:
2114 *gpr = *(int32_t *) run->mmio.data;
2115 break;
2116
2117 case 2:
2118 if (vcpu->mmio_needed == 2)
2119 *gpr = *(int16_t *) run->mmio.data;
2120 else
2121 *gpr = *(int16_t *) run->mmio.data;
2122
2123 break;
2124 case 1:
2125 if (vcpu->mmio_needed == 2)
2126 *gpr = *(int8_t *) run->mmio.data;
2127 else
2128 *gpr = *(u8 *) run->mmio.data;
2129 break;
2130 }
2131
2132 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002133 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2134 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2135 vcpu->mmio_needed);
Sanjay Lale685c682012-11-21 18:34:04 -08002136
2137done:
2138 return er;
2139}
2140
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002141static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2142 uint32_t *opc,
2143 struct kvm_run *run,
2144 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002145{
2146 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2147 struct mips_coproc *cop0 = vcpu->arch.cop0;
2148 struct kvm_vcpu_arch *arch = &vcpu->arch;
2149 enum emulation_result er = EMULATE_DONE;
2150
2151 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2152 /* save old pc */
2153 kvm_write_c0_guest_epc(cop0, arch->pc);
2154 kvm_set_c0_guest_status(cop0, ST0_EXL);
2155
2156 if (cause & CAUSEF_BD)
2157 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2158 else
2159 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2160
2161 kvm_change_c0_guest_cause(cop0, (0xff),
2162 (exccode << CAUSEB_EXCCODE));
2163
2164 /* Set PC to the exception entry point */
2165 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2166 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2167
2168 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2169 exccode, kvm_read_c0_guest_epc(cop0),
2170 kvm_read_c0_guest_badvaddr(cop0));
2171 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002172 kvm_err("Trying to deliver EXC when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08002173 er = EMULATE_FAIL;
2174 }
2175
2176 return er;
2177}
2178
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002179enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2180 uint32_t *opc,
2181 struct kvm_run *run,
2182 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002183{
2184 enum emulation_result er = EMULATE_DONE;
2185 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2186 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2187
2188 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2189
2190 if (usermode) {
2191 switch (exccode) {
2192 case T_INT:
2193 case T_SYSCALL:
2194 case T_BREAK:
2195 case T_RES_INST:
2196 break;
2197
2198 case T_COP_UNUSABLE:
2199 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2200 er = EMULATE_PRIV_FAIL;
2201 break;
2202
2203 case T_TLB_MOD:
2204 break;
2205
2206 case T_TLB_LD_MISS:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002207 /*
2208 * We we are accessing Guest kernel space, then send an
2209 * address error exception to the guest
2210 */
Sanjay Lale685c682012-11-21 18:34:04 -08002211 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002212 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2213 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002214 cause &= ~0xff;
2215 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2216 er = EMULATE_PRIV_FAIL;
2217 }
2218 break;
2219
2220 case T_TLB_ST_MISS:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002221 /*
2222 * We we are accessing Guest kernel space, then send an
2223 * address error exception to the guest
2224 */
Sanjay Lale685c682012-11-21 18:34:04 -08002225 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002226 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2227 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002228 cause &= ~0xff;
2229 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2230 er = EMULATE_PRIV_FAIL;
2231 }
2232 break;
2233
2234 case T_ADDR_ERR_ST:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002235 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2236 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002237 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2238 cause &= ~0xff;
2239 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2240 }
2241 er = EMULATE_PRIV_FAIL;
2242 break;
2243 case T_ADDR_ERR_LD:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002244 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2245 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002246 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2247 cause &= ~0xff;
2248 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2249 }
2250 er = EMULATE_PRIV_FAIL;
2251 break;
2252 default:
2253 er = EMULATE_PRIV_FAIL;
2254 break;
2255 }
2256 }
2257
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002258 if (er == EMULATE_PRIV_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08002259 kvm_mips_emulate_exc(cause, opc, run, vcpu);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002260
Sanjay Lale685c682012-11-21 18:34:04 -08002261 return er;
2262}
2263
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002264/*
2265 * User Address (UA) fault, this could happen if
Sanjay Lale685c682012-11-21 18:34:04 -08002266 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2267 * case we pass on the fault to the guest kernel and let it handle it.
2268 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2269 * case we inject the TLB from the Guest TLB into the shadow host TLB
2270 */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002271enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2272 uint32_t *opc,
2273 struct kvm_run *run,
2274 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002275{
2276 enum emulation_result er = EMULATE_DONE;
2277 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2278 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2279 int index;
2280
2281 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2282 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2283
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002284 /*
2285 * KVM would not have got the exception if this entry was valid in the
2286 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2287 * send the guest an exception. The guest exc handler should then inject
2288 * an entry into the guest TLB.
Sanjay Lale685c682012-11-21 18:34:04 -08002289 */
2290 index = kvm_mips_guest_tlb_lookup(vcpu,
2291 (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07002292 (kvm_read_c0_guest_entryhi
2293 (vcpu->arch.cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08002294 if (index < 0) {
2295 if (exccode == T_TLB_LD_MISS) {
2296 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2297 } else if (exccode == T_TLB_ST_MISS) {
2298 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2299 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002300 kvm_err("%s: invalid exc code: %d\n", __func__,
2301 exccode);
Sanjay Lale685c682012-11-21 18:34:04 -08002302 er = EMULATE_FAIL;
2303 }
2304 } else {
2305 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2306
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002307 /*
2308 * Check if the entry is valid, if not then setup a TLB invalid
2309 * exception to the guest
2310 */
Sanjay Lale685c682012-11-21 18:34:04 -08002311 if (!TLB_IS_VALID(*tlb, va)) {
2312 if (exccode == T_TLB_LD_MISS) {
2313 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2314 vcpu);
2315 } else if (exccode == T_TLB_ST_MISS) {
2316 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2317 vcpu);
2318 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002319 kvm_err("%s: invalid exc code: %d\n", __func__,
2320 exccode);
Sanjay Lale685c682012-11-21 18:34:04 -08002321 er = EMULATE_FAIL;
2322 }
2323 } else {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002324 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2325 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2326 /*
2327 * OK we have a Guest TLB entry, now inject it into the
2328 * shadow host TLB
2329 */
Sanjay Lale685c682012-11-21 18:34:04 -08002330 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2331 NULL);
2332 }
2333 }
2334
2335 return er;
2336}