blob: 3511bb20fe0e768402384ec6031cbc200954c442 [file] [log] [blame]
Sanjay Lale685c682012-11-21 18:34:04 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Instruction/Exception emulation
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lale685c682012-11-21 18:34:04 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
James Hogane30492b2014-05-29 10:16:35 +010014#include <linux/ktime.h>
Sanjay Lale685c682012-11-21 18:34:04 -080015#include <linux/kvm_host.h>
16#include <linux/module.h>
17#include <linux/vmalloc.h>
18#include <linux/fs.h>
19#include <linux/bootmem.h>
20#include <linux/random.h>
21#include <asm/page.h>
22#include <asm/cacheflush.h>
23#include <asm/cpu-info.h>
24#include <asm/mmu_context.h>
25#include <asm/tlbflush.h>
26#include <asm/inst.h>
27
28#undef CONFIG_MIPS_MT
29#include <asm/r4kcache.h>
30#define CONFIG_MIPS_MT
31
Deng-Cheng Zhud7d5b052014-06-26 12:11:38 -070032#include "opcode.h"
33#include "interrupt.h"
34#include "commpage.h"
Sanjay Lale685c682012-11-21 18:34:04 -080035
36#include "trace.h"
37
38/*
39 * Compute the return address and do emulate branch simulation, if required.
40 * This function should be called only in branch delay slot active.
41 */
42unsigned long kvm_compute_return_epc(struct kvm_vcpu *vcpu,
43 unsigned long instpc)
44{
45 unsigned int dspcontrol;
46 union mips_instruction insn;
47 struct kvm_vcpu_arch *arch = &vcpu->arch;
48 long epc = instpc;
49 long nextpc = KVM_INVALID_INST;
50
51 if (epc & 3)
52 goto unaligned;
53
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070054 /* Read the instruction */
Sanjay Lale685c682012-11-21 18:34:04 -080055 insn.word = kvm_get_inst((uint32_t *) epc, vcpu);
56
57 if (insn.word == KVM_INVALID_INST)
58 return KVM_INVALID_INST;
59
60 switch (insn.i_format.opcode) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -070061 /* jr and jalr are in r_format format. */
Sanjay Lale685c682012-11-21 18:34:04 -080062 case spec_op:
63 switch (insn.r_format.func) {
64 case jalr_op:
65 arch->gprs[insn.r_format.rd] = epc + 8;
66 /* Fall through */
67 case jr_op:
68 nextpc = arch->gprs[insn.r_format.rs];
69 break;
70 }
71 break;
72
73 /*
74 * This group contains:
75 * bltz_op, bgez_op, bltzl_op, bgezl_op,
76 * bltzal_op, bgezal_op, bltzall_op, bgezall_op.
77 */
78 case bcond_op:
79 switch (insn.i_format.rt) {
80 case bltz_op:
81 case bltzl_op:
82 if ((long)arch->gprs[insn.i_format.rs] < 0)
83 epc = epc + 4 + (insn.i_format.simmediate << 2);
84 else
85 epc += 8;
86 nextpc = epc;
87 break;
88
89 case bgez_op:
90 case bgezl_op:
91 if ((long)arch->gprs[insn.i_format.rs] >= 0)
92 epc = epc + 4 + (insn.i_format.simmediate << 2);
93 else
94 epc += 8;
95 nextpc = epc;
96 break;
97
98 case bltzal_op:
99 case bltzall_op:
100 arch->gprs[31] = epc + 8;
101 if ((long)arch->gprs[insn.i_format.rs] < 0)
102 epc = epc + 4 + (insn.i_format.simmediate << 2);
103 else
104 epc += 8;
105 nextpc = epc;
106 break;
107
108 case bgezal_op:
109 case bgezall_op:
110 arch->gprs[31] = epc + 8;
111 if ((long)arch->gprs[insn.i_format.rs] >= 0)
112 epc = epc + 4 + (insn.i_format.simmediate << 2);
113 else
114 epc += 8;
115 nextpc = epc;
116 break;
117 case bposge32_op:
118 if (!cpu_has_dsp)
119 goto sigill;
120
121 dspcontrol = rddsp(0x01);
122
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700123 if (dspcontrol >= 32)
Sanjay Lale685c682012-11-21 18:34:04 -0800124 epc = epc + 4 + (insn.i_format.simmediate << 2);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700125 else
Sanjay Lale685c682012-11-21 18:34:04 -0800126 epc += 8;
127 nextpc = epc;
128 break;
129 }
130 break;
131
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700132 /* These are unconditional and in j_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800133 case jal_op:
134 arch->gprs[31] = instpc + 8;
135 case j_op:
136 epc += 4;
137 epc >>= 28;
138 epc <<= 28;
139 epc |= (insn.j_format.target << 2);
140 nextpc = epc;
141 break;
142
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700143 /* These are conditional and in i_format. */
Sanjay Lale685c682012-11-21 18:34:04 -0800144 case beq_op:
145 case beql_op:
146 if (arch->gprs[insn.i_format.rs] ==
147 arch->gprs[insn.i_format.rt])
148 epc = epc + 4 + (insn.i_format.simmediate << 2);
149 else
150 epc += 8;
151 nextpc = epc;
152 break;
153
154 case bne_op:
155 case bnel_op:
156 if (arch->gprs[insn.i_format.rs] !=
157 arch->gprs[insn.i_format.rt])
158 epc = epc + 4 + (insn.i_format.simmediate << 2);
159 else
160 epc += 8;
161 nextpc = epc;
162 break;
163
164 case blez_op: /* not really i_format */
165 case blezl_op:
166 /* rt field assumed to be zero */
167 if ((long)arch->gprs[insn.i_format.rs] <= 0)
168 epc = epc + 4 + (insn.i_format.simmediate << 2);
169 else
170 epc += 8;
171 nextpc = epc;
172 break;
173
174 case bgtz_op:
175 case bgtzl_op:
176 /* rt field assumed to be zero */
177 if ((long)arch->gprs[insn.i_format.rs] > 0)
178 epc = epc + 4 + (insn.i_format.simmediate << 2);
179 else
180 epc += 8;
181 nextpc = epc;
182 break;
183
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700184 /* And now the FPA/cp1 branch instructions. */
Sanjay Lale685c682012-11-21 18:34:04 -0800185 case cop1_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700186 kvm_err("%s: unsupported cop1_op\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800187 break;
188 }
189
190 return nextpc;
191
192unaligned:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700193 kvm_err("%s: unaligned epc\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800194 return nextpc;
195
196sigill:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700197 kvm_err("%s: DSP branch but not DSP ASE\n", __func__);
Sanjay Lale685c682012-11-21 18:34:04 -0800198 return nextpc;
199}
200
201enum emulation_result update_pc(struct kvm_vcpu *vcpu, uint32_t cause)
202{
203 unsigned long branch_pc;
204 enum emulation_result er = EMULATE_DONE;
205
206 if (cause & CAUSEF_BD) {
207 branch_pc = kvm_compute_return_epc(vcpu, vcpu->arch.pc);
208 if (branch_pc == KVM_INVALID_INST) {
209 er = EMULATE_FAIL;
210 } else {
211 vcpu->arch.pc = branch_pc;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700212 kvm_debug("BD update_pc(): New PC: %#lx\n",
213 vcpu->arch.pc);
Sanjay Lale685c682012-11-21 18:34:04 -0800214 }
215 } else
216 vcpu->arch.pc += 4;
217
218 kvm_debug("update_pc(): New PC: %#lx\n", vcpu->arch.pc);
219
220 return er;
221}
222
James Hogane30492b2014-05-29 10:16:35 +0100223/**
224 * kvm_mips_count_disabled() - Find whether the CP0_Count timer is disabled.
225 * @vcpu: Virtual CPU.
Sanjay Lale685c682012-11-21 18:34:04 -0800226 *
James Hoganf8239342014-05-29 10:16:37 +0100227 * Returns: 1 if the CP0_Count timer is disabled by either the guest
228 * CP0_Cause.DC bit or the count_ctl.DC bit.
James Hogane30492b2014-05-29 10:16:35 +0100229 * 0 otherwise (in which case CP0_Count timer is running).
Sanjay Lale685c682012-11-21 18:34:04 -0800230 */
James Hogane30492b2014-05-29 10:16:35 +0100231static inline int kvm_mips_count_disabled(struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800232{
233 struct mips_coproc *cop0 = vcpu->arch.cop0;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700234
James Hoganf8239342014-05-29 10:16:37 +0100235 return (vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) ||
236 (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC);
James Hogane30492b2014-05-29 10:16:35 +0100237}
Sanjay Lale685c682012-11-21 18:34:04 -0800238
James Hogane30492b2014-05-29 10:16:35 +0100239/**
240 * kvm_mips_ktime_to_count() - Scale ktime_t to a 32-bit count.
241 *
242 * Caches the dynamic nanosecond bias in vcpu->arch.count_dyn_bias.
243 *
244 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
245 */
246static uint32_t kvm_mips_ktime_to_count(struct kvm_vcpu *vcpu, ktime_t now)
247{
248 s64 now_ns, periods;
249 u64 delta;
250
251 now_ns = ktime_to_ns(now);
252 delta = now_ns + vcpu->arch.count_dyn_bias;
253
254 if (delta >= vcpu->arch.count_period) {
255 /* If delta is out of safe range the bias needs adjusting */
256 periods = div64_s64(now_ns, vcpu->arch.count_period);
257 vcpu->arch.count_dyn_bias = -periods * vcpu->arch.count_period;
258 /* Recalculate delta with new bias */
259 delta = now_ns + vcpu->arch.count_dyn_bias;
Sanjay Lale685c682012-11-21 18:34:04 -0800260 }
261
James Hogane30492b2014-05-29 10:16:35 +0100262 /*
263 * We've ensured that:
264 * delta < count_period
265 *
266 * Therefore the intermediate delta*count_hz will never overflow since
267 * at the boundary condition:
268 * delta = count_period
269 * delta = NSEC_PER_SEC * 2^32 / count_hz
270 * delta * count_hz = NSEC_PER_SEC * 2^32
271 */
272 return div_u64(delta * vcpu->arch.count_hz, NSEC_PER_SEC);
273}
274
275/**
James Hoganf8239342014-05-29 10:16:37 +0100276 * kvm_mips_count_time() - Get effective current time.
277 * @vcpu: Virtual CPU.
278 *
279 * Get effective monotonic ktime. This is usually a straightforward ktime_get(),
280 * except when the master disable bit is set in count_ctl, in which case it is
281 * count_resume, i.e. the time that the count was disabled.
282 *
283 * Returns: Effective monotonic ktime for CP0_Count.
284 */
285static inline ktime_t kvm_mips_count_time(struct kvm_vcpu *vcpu)
286{
287 if (unlikely(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
288 return vcpu->arch.count_resume;
289
290 return ktime_get();
291}
292
293/**
James Hogane30492b2014-05-29 10:16:35 +0100294 * kvm_mips_read_count_running() - Read the current count value as if running.
295 * @vcpu: Virtual CPU.
296 * @now: Kernel time to read CP0_Count at.
297 *
298 * Returns the current guest CP0_Count register at time @now and handles if the
299 * timer interrupt is pending and hasn't been handled yet.
300 *
301 * Returns: The current value of the guest CP0_Count register.
302 */
303static uint32_t kvm_mips_read_count_running(struct kvm_vcpu *vcpu, ktime_t now)
304{
305 ktime_t expires;
306 int running;
307
308 /* Is the hrtimer pending? */
309 expires = hrtimer_get_expires(&vcpu->arch.comparecount_timer);
310 if (ktime_compare(now, expires) >= 0) {
311 /*
312 * Cancel it while we handle it so there's no chance of
313 * interference with the timeout handler.
314 */
315 running = hrtimer_cancel(&vcpu->arch.comparecount_timer);
316
317 /* Nothing should be waiting on the timeout */
318 kvm_mips_callbacks->queue_timer_int(vcpu);
319
320 /*
321 * Restart the timer if it was running based on the expiry time
322 * we read, so that we don't push it back 2 periods.
323 */
324 if (running) {
325 expires = ktime_add_ns(expires,
326 vcpu->arch.count_period);
327 hrtimer_start(&vcpu->arch.comparecount_timer, expires,
328 HRTIMER_MODE_ABS);
329 }
330 }
331
332 /* Return the biased and scaled guest CP0_Count */
333 return vcpu->arch.count_bias + kvm_mips_ktime_to_count(vcpu, now);
334}
335
336/**
337 * kvm_mips_read_count() - Read the current count value.
338 * @vcpu: Virtual CPU.
339 *
340 * Read the current guest CP0_Count value, taking into account whether the timer
341 * is stopped.
342 *
343 * Returns: The current guest CP0_Count value.
344 */
345uint32_t kvm_mips_read_count(struct kvm_vcpu *vcpu)
346{
347 struct mips_coproc *cop0 = vcpu->arch.cop0;
348
349 /* If count disabled just read static copy of count */
350 if (kvm_mips_count_disabled(vcpu))
351 return kvm_read_c0_guest_count(cop0);
352
353 return kvm_mips_read_count_running(vcpu, ktime_get());
354}
355
356/**
357 * kvm_mips_freeze_hrtimer() - Safely stop the hrtimer.
358 * @vcpu: Virtual CPU.
359 * @count: Output pointer for CP0_Count value at point of freeze.
360 *
361 * Freeze the hrtimer safely and return both the ktime and the CP0_Count value
362 * at the point it was frozen. It is guaranteed that any pending interrupts at
363 * the point it was frozen are handled, and none after that point.
364 *
365 * This is useful where the time/CP0_Count is needed in the calculation of the
366 * new parameters.
367 *
368 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
369 *
370 * Returns: The ktime at the point of freeze.
371 */
372static ktime_t kvm_mips_freeze_hrtimer(struct kvm_vcpu *vcpu,
373 uint32_t *count)
374{
375 ktime_t now;
376
377 /* stop hrtimer before finding time */
378 hrtimer_cancel(&vcpu->arch.comparecount_timer);
379 now = ktime_get();
380
381 /* find count at this point and handle pending hrtimer */
382 *count = kvm_mips_read_count_running(vcpu, now);
383
384 return now;
385}
386
James Hogane30492b2014-05-29 10:16:35 +0100387/**
388 * kvm_mips_resume_hrtimer() - Resume hrtimer, updating expiry.
389 * @vcpu: Virtual CPU.
390 * @now: ktime at point of resume.
391 * @count: CP0_Count at point of resume.
392 *
393 * Resumes the timer and updates the timer expiry based on @now and @count.
394 * This can be used in conjunction with kvm_mips_freeze_timer() when timer
395 * parameters need to be changed.
396 *
397 * It is guaranteed that a timer interrupt immediately after resume will be
398 * handled, but not if CP_Compare is exactly at @count. That case is already
399 * handled by kvm_mips_freeze_timer().
400 *
401 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
402 */
403static void kvm_mips_resume_hrtimer(struct kvm_vcpu *vcpu,
404 ktime_t now, uint32_t count)
405{
406 struct mips_coproc *cop0 = vcpu->arch.cop0;
407 uint32_t compare;
408 u64 delta;
409 ktime_t expire;
410
411 /* Calculate timeout (wrap 0 to 2^32) */
412 compare = kvm_read_c0_guest_compare(cop0);
413 delta = (u64)(uint32_t)(compare - count - 1) + 1;
414 delta = div_u64(delta * NSEC_PER_SEC, vcpu->arch.count_hz);
415 expire = ktime_add_ns(now, delta);
416
417 /* Update hrtimer to use new timeout */
418 hrtimer_cancel(&vcpu->arch.comparecount_timer);
419 hrtimer_start(&vcpu->arch.comparecount_timer, expire, HRTIMER_MODE_ABS);
420}
421
422/**
423 * kvm_mips_update_hrtimer() - Update next expiry time of hrtimer.
424 * @vcpu: Virtual CPU.
425 *
426 * Recalculates and updates the expiry time of the hrtimer. This can be used
427 * after timer parameters have been altered which do not depend on the time that
428 * the change occurs (in those cases kvm_mips_freeze_hrtimer() and
429 * kvm_mips_resume_hrtimer() are used directly).
430 *
431 * It is guaranteed that no timer interrupts will be lost in the process.
432 *
433 * Assumes !kvm_mips_count_disabled(@vcpu) (guest CP0_Count timer is running).
434 */
435static void kvm_mips_update_hrtimer(struct kvm_vcpu *vcpu)
436{
437 ktime_t now;
438 uint32_t count;
439
440 /*
441 * freeze_hrtimer takes care of a timer interrupts <= count, and
442 * resume_hrtimer the hrtimer takes care of a timer interrupts > count.
443 */
444 now = kvm_mips_freeze_hrtimer(vcpu, &count);
445 kvm_mips_resume_hrtimer(vcpu, now, count);
446}
447
448/**
449 * kvm_mips_write_count() - Modify the count and update timer.
450 * @vcpu: Virtual CPU.
451 * @count: Guest CP0_Count value to set.
452 *
453 * Sets the CP0_Count value and updates the timer accordingly.
454 */
455void kvm_mips_write_count(struct kvm_vcpu *vcpu, uint32_t count)
456{
457 struct mips_coproc *cop0 = vcpu->arch.cop0;
458 ktime_t now;
459
460 /* Calculate bias */
James Hoganf8239342014-05-29 10:16:37 +0100461 now = kvm_mips_count_time(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100462 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
463
464 if (kvm_mips_count_disabled(vcpu))
465 /* The timer's disabled, adjust the static count */
466 kvm_write_c0_guest_count(cop0, count);
467 else
468 /* Update timeout */
469 kvm_mips_resume_hrtimer(vcpu, now, count);
470}
471
472/**
473 * kvm_mips_init_count() - Initialise timer.
474 * @vcpu: Virtual CPU.
475 *
476 * Initialise the timer to a sensible frequency, namely 100MHz, zero it, and set
477 * it going if it's enabled.
478 */
479void kvm_mips_init_count(struct kvm_vcpu *vcpu)
480{
481 /* 100 MHz */
482 vcpu->arch.count_hz = 100*1000*1000;
483 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32,
484 vcpu->arch.count_hz);
485 vcpu->arch.count_dyn_bias = 0;
486
487 /* Starting at 0 */
488 kvm_mips_write_count(vcpu, 0);
489}
490
491/**
James Hoganf74a8e22014-05-29 10:16:38 +0100492 * kvm_mips_set_count_hz() - Update the frequency of the timer.
493 * @vcpu: Virtual CPU.
494 * @count_hz: Frequency of CP0_Count timer in Hz.
495 *
496 * Change the frequency of the CP0_Count timer. This is done atomically so that
497 * CP0_Count is continuous and no timer interrupt is lost.
498 *
499 * Returns: -EINVAL if @count_hz is out of range.
500 * 0 on success.
501 */
502int kvm_mips_set_count_hz(struct kvm_vcpu *vcpu, s64 count_hz)
503{
504 struct mips_coproc *cop0 = vcpu->arch.cop0;
505 int dc;
506 ktime_t now;
507 u32 count;
508
509 /* ensure the frequency is in a sensible range... */
510 if (count_hz <= 0 || count_hz > NSEC_PER_SEC)
511 return -EINVAL;
512 /* ... and has actually changed */
513 if (vcpu->arch.count_hz == count_hz)
514 return 0;
515
516 /* Safely freeze timer so we can keep it continuous */
517 dc = kvm_mips_count_disabled(vcpu);
518 if (dc) {
519 now = kvm_mips_count_time(vcpu);
520 count = kvm_read_c0_guest_count(cop0);
521 } else {
522 now = kvm_mips_freeze_hrtimer(vcpu, &count);
523 }
524
525 /* Update the frequency */
526 vcpu->arch.count_hz = count_hz;
527 vcpu->arch.count_period = div_u64((u64)NSEC_PER_SEC << 32, count_hz);
528 vcpu->arch.count_dyn_bias = 0;
529
530 /* Calculate adjusted bias so dynamic count is unchanged */
531 vcpu->arch.count_bias = count - kvm_mips_ktime_to_count(vcpu, now);
532
533 /* Update and resume hrtimer */
534 if (!dc)
535 kvm_mips_resume_hrtimer(vcpu, now, count);
536 return 0;
537}
538
539/**
James Hogane30492b2014-05-29 10:16:35 +0100540 * kvm_mips_write_compare() - Modify compare and update timer.
541 * @vcpu: Virtual CPU.
542 * @compare: New CP0_Compare value.
543 *
544 * Update CP0_Compare to a new value and update the timeout.
545 */
546void kvm_mips_write_compare(struct kvm_vcpu *vcpu, uint32_t compare)
547{
548 struct mips_coproc *cop0 = vcpu->arch.cop0;
549
550 /* if unchanged, must just be an ack */
551 if (kvm_read_c0_guest_compare(cop0) == compare)
552 return;
553
554 /* Update compare */
555 kvm_write_c0_guest_compare(cop0, compare);
556
557 /* Update timeout if count enabled */
558 if (!kvm_mips_count_disabled(vcpu))
559 kvm_mips_update_hrtimer(vcpu);
560}
561
562/**
563 * kvm_mips_count_disable() - Disable count.
564 * @vcpu: Virtual CPU.
565 *
566 * Disable the CP0_Count timer. A timer interrupt on or before the final stop
567 * time will be handled but not after.
568 *
James Hoganf8239342014-05-29 10:16:37 +0100569 * Assumes CP0_Count was previously enabled but now Guest.CP0_Cause.DC or
570 * count_ctl.DC has been set (count disabled).
James Hogane30492b2014-05-29 10:16:35 +0100571 *
572 * Returns: The time that the timer was stopped.
573 */
574static ktime_t kvm_mips_count_disable(struct kvm_vcpu *vcpu)
575{
576 struct mips_coproc *cop0 = vcpu->arch.cop0;
577 uint32_t count;
578 ktime_t now;
579
580 /* Stop hrtimer */
581 hrtimer_cancel(&vcpu->arch.comparecount_timer);
582
583 /* Set the static count from the dynamic count, handling pending TI */
584 now = ktime_get();
585 count = kvm_mips_read_count_running(vcpu, now);
586 kvm_write_c0_guest_count(cop0, count);
587
588 return now;
589}
590
591/**
592 * kvm_mips_count_disable_cause() - Disable count using CP0_Cause.DC.
593 * @vcpu: Virtual CPU.
594 *
595 * Disable the CP0_Count timer and set CP0_Cause.DC. A timer interrupt on or
James Hoganf8239342014-05-29 10:16:37 +0100596 * before the final stop time will be handled if the timer isn't disabled by
597 * count_ctl.DC, but not after.
James Hogane30492b2014-05-29 10:16:35 +0100598 *
599 * Assumes CP0_Cause.DC is clear (count enabled).
600 */
601void kvm_mips_count_disable_cause(struct kvm_vcpu *vcpu)
602{
603 struct mips_coproc *cop0 = vcpu->arch.cop0;
604
605 kvm_set_c0_guest_cause(cop0, CAUSEF_DC);
James Hoganf8239342014-05-29 10:16:37 +0100606 if (!(vcpu->arch.count_ctl & KVM_REG_MIPS_COUNT_CTL_DC))
607 kvm_mips_count_disable(vcpu);
James Hogane30492b2014-05-29 10:16:35 +0100608}
609
610/**
611 * kvm_mips_count_enable_cause() - Enable count using CP0_Cause.DC.
612 * @vcpu: Virtual CPU.
613 *
614 * Enable the CP0_Count timer and clear CP0_Cause.DC. A timer interrupt after
James Hoganf8239342014-05-29 10:16:37 +0100615 * the start time will be handled if the timer isn't disabled by count_ctl.DC,
616 * potentially before even returning, so the caller should be careful with
617 * ordering of CP0_Cause modifications so as not to lose it.
James Hogane30492b2014-05-29 10:16:35 +0100618 *
619 * Assumes CP0_Cause.DC is set (count disabled).
620 */
621void kvm_mips_count_enable_cause(struct kvm_vcpu *vcpu)
622{
623 struct mips_coproc *cop0 = vcpu->arch.cop0;
624 uint32_t count;
625
626 kvm_clear_c0_guest_cause(cop0, CAUSEF_DC);
627
628 /*
629 * Set the dynamic count to match the static count.
James Hoganf8239342014-05-29 10:16:37 +0100630 * This starts the hrtimer if count_ctl.DC allows it.
631 * Otherwise it conveniently updates the biases.
James Hogane30492b2014-05-29 10:16:35 +0100632 */
633 count = kvm_read_c0_guest_count(cop0);
634 kvm_mips_write_count(vcpu, count);
635}
636
637/**
James Hoganf8239342014-05-29 10:16:37 +0100638 * kvm_mips_set_count_ctl() - Update the count control KVM register.
639 * @vcpu: Virtual CPU.
640 * @count_ctl: Count control register new value.
641 *
642 * Set the count control KVM register. The timer is updated accordingly.
643 *
644 * Returns: -EINVAL if reserved bits are set.
645 * 0 on success.
646 */
647int kvm_mips_set_count_ctl(struct kvm_vcpu *vcpu, s64 count_ctl)
648{
649 struct mips_coproc *cop0 = vcpu->arch.cop0;
650 s64 changed = count_ctl ^ vcpu->arch.count_ctl;
651 s64 delta;
652 ktime_t expire, now;
653 uint32_t count, compare;
654
655 /* Only allow defined bits to be changed */
656 if (changed & ~(s64)(KVM_REG_MIPS_COUNT_CTL_DC))
657 return -EINVAL;
658
659 /* Apply new value */
660 vcpu->arch.count_ctl = count_ctl;
661
662 /* Master CP0_Count disable */
663 if (changed & KVM_REG_MIPS_COUNT_CTL_DC) {
664 /* Is CP0_Cause.DC already disabling CP0_Count? */
665 if (kvm_read_c0_guest_cause(cop0) & CAUSEF_DC) {
666 if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC)
667 /* Just record the current time */
668 vcpu->arch.count_resume = ktime_get();
669 } else if (count_ctl & KVM_REG_MIPS_COUNT_CTL_DC) {
670 /* disable timer and record current time */
671 vcpu->arch.count_resume = kvm_mips_count_disable(vcpu);
672 } else {
673 /*
674 * Calculate timeout relative to static count at resume
675 * time (wrap 0 to 2^32).
676 */
677 count = kvm_read_c0_guest_count(cop0);
678 compare = kvm_read_c0_guest_compare(cop0);
679 delta = (u64)(uint32_t)(compare - count - 1) + 1;
680 delta = div_u64(delta * NSEC_PER_SEC,
681 vcpu->arch.count_hz);
682 expire = ktime_add_ns(vcpu->arch.count_resume, delta);
683
684 /* Handle pending interrupt */
685 now = ktime_get();
686 if (ktime_compare(now, expire) >= 0)
687 /* Nothing should be waiting on the timeout */
688 kvm_mips_callbacks->queue_timer_int(vcpu);
689
690 /* Resume hrtimer without changing bias */
691 count = kvm_mips_read_count_running(vcpu, now);
692 kvm_mips_resume_hrtimer(vcpu, now, count);
693 }
694 }
695
696 return 0;
697}
698
699/**
700 * kvm_mips_set_count_resume() - Update the count resume KVM register.
701 * @vcpu: Virtual CPU.
702 * @count_resume: Count resume register new value.
703 *
704 * Set the count resume KVM register.
705 *
706 * Returns: -EINVAL if out of valid range (0..now).
707 * 0 on success.
708 */
709int kvm_mips_set_count_resume(struct kvm_vcpu *vcpu, s64 count_resume)
710{
711 /*
712 * It doesn't make sense for the resume time to be in the future, as it
713 * would be possible for the next interrupt to be more than a full
714 * period in the future.
715 */
716 if (count_resume < 0 || count_resume > ktime_to_ns(ktime_get()))
717 return -EINVAL;
718
719 vcpu->arch.count_resume = ns_to_ktime(count_resume);
720 return 0;
721}
722
723/**
James Hogane30492b2014-05-29 10:16:35 +0100724 * kvm_mips_count_timeout() - Push timer forward on timeout.
725 * @vcpu: Virtual CPU.
726 *
727 * Handle an hrtimer event by push the hrtimer forward a period.
728 *
729 * Returns: The hrtimer_restart value to return to the hrtimer subsystem.
730 */
731enum hrtimer_restart kvm_mips_count_timeout(struct kvm_vcpu *vcpu)
732{
733 /* Add the Count period to the current expiry time */
734 hrtimer_add_expires_ns(&vcpu->arch.comparecount_timer,
735 vcpu->arch.count_period);
736 return HRTIMER_RESTART;
Sanjay Lale685c682012-11-21 18:34:04 -0800737}
738
739enum emulation_result kvm_mips_emul_eret(struct kvm_vcpu *vcpu)
740{
741 struct mips_coproc *cop0 = vcpu->arch.cop0;
742 enum emulation_result er = EMULATE_DONE;
743
744 if (kvm_read_c0_guest_status(cop0) & ST0_EXL) {
745 kvm_debug("[%#lx] ERET to %#lx\n", vcpu->arch.pc,
746 kvm_read_c0_guest_epc(cop0));
747 kvm_clear_c0_guest_status(cop0, ST0_EXL);
748 vcpu->arch.pc = kvm_read_c0_guest_epc(cop0);
749
750 } else if (kvm_read_c0_guest_status(cop0) & ST0_ERL) {
751 kvm_clear_c0_guest_status(cop0, ST0_ERL);
752 vcpu->arch.pc = kvm_read_c0_guest_errorepc(cop0);
753 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700754 kvm_err("[%#lx] ERET when MIPS_SR_EXL|MIPS_SR_ERL == 0\n",
755 vcpu->arch.pc);
Sanjay Lale685c682012-11-21 18:34:04 -0800756 er = EMULATE_FAIL;
757 }
758
759 return er;
760}
761
762enum emulation_result kvm_mips_emul_wait(struct kvm_vcpu *vcpu)
763{
Sanjay Lale685c682012-11-21 18:34:04 -0800764 kvm_debug("[%#lx] !!!WAIT!!! (%#lx)\n", vcpu->arch.pc,
765 vcpu->arch.pending_exceptions);
766
767 ++vcpu->stat.wait_exits;
768 trace_kvm_exit(vcpu, WAIT_EXITS);
769 if (!vcpu->arch.pending_exceptions) {
770 vcpu->arch.wait = 1;
771 kvm_vcpu_block(vcpu);
772
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700773 /*
774 * We we are runnable, then definitely go off to user space to
775 * check if any I/O interrupts are pending.
Sanjay Lale685c682012-11-21 18:34:04 -0800776 */
777 if (kvm_check_request(KVM_REQ_UNHALT, vcpu)) {
778 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
779 vcpu->run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
780 }
781 }
782
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700783 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800784}
785
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700786/*
787 * XXXKYMA: Linux doesn't seem to use TLBR, return EMULATE_FAIL for now so that
788 * we can catch this, if things ever change
Sanjay Lale685c682012-11-21 18:34:04 -0800789 */
790enum emulation_result kvm_mips_emul_tlbr(struct kvm_vcpu *vcpu)
791{
792 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lale685c682012-11-21 18:34:04 -0800793 uint32_t pc = vcpu->arch.pc;
794
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700795 kvm_err("[%#x] COP0_TLBR [%ld]\n", pc, kvm_read_c0_guest_index(cop0));
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700796 return EMULATE_FAIL;
Sanjay Lale685c682012-11-21 18:34:04 -0800797}
798
799/* Write Guest TLB Entry @ Index */
800enum emulation_result kvm_mips_emul_tlbwi(struct kvm_vcpu *vcpu)
801{
802 struct mips_coproc *cop0 = vcpu->arch.cop0;
803 int index = kvm_read_c0_guest_index(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -0800804 struct kvm_mips_tlb *tlb = NULL;
805 uint32_t pc = vcpu->arch.pc;
806
807 if (index < 0 || index >= KVM_MIPS_GUEST_TLB_SIZE) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700808 kvm_debug("%s: illegal index: %d\n", __func__, index);
809 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
810 pc, index, kvm_read_c0_guest_entryhi(cop0),
811 kvm_read_c0_guest_entrylo0(cop0),
812 kvm_read_c0_guest_entrylo1(cop0),
813 kvm_read_c0_guest_pagemask(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800814 index = (index & ~0x80000000) % KVM_MIPS_GUEST_TLB_SIZE;
815 }
816
817 tlb = &vcpu->arch.guest_tlb[index];
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700818 /*
819 * Probe the shadow host TLB for the entry being overwritten, if one
820 * matches, invalidate it
821 */
Sanjay Lale685c682012-11-21 18:34:04 -0800822 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
Sanjay Lale685c682012-11-21 18:34:04 -0800823
824 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
825 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
826 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
827 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
828
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700829 kvm_debug("[%#x] COP0_TLBWI [%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx, mask: %#lx)\n",
830 pc, index, kvm_read_c0_guest_entryhi(cop0),
831 kvm_read_c0_guest_entrylo0(cop0),
832 kvm_read_c0_guest_entrylo1(cop0),
833 kvm_read_c0_guest_pagemask(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800834
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700835 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800836}
837
838/* Write Guest TLB Entry @ Random Index */
839enum emulation_result kvm_mips_emul_tlbwr(struct kvm_vcpu *vcpu)
840{
841 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lale685c682012-11-21 18:34:04 -0800842 struct kvm_mips_tlb *tlb = NULL;
843 uint32_t pc = vcpu->arch.pc;
844 int index;
845
Sanjay Lale685c682012-11-21 18:34:04 -0800846 get_random_bytes(&index, sizeof(index));
847 index &= (KVM_MIPS_GUEST_TLB_SIZE - 1);
Sanjay Lale685c682012-11-21 18:34:04 -0800848
Sanjay Lale685c682012-11-21 18:34:04 -0800849 tlb = &vcpu->arch.guest_tlb[index];
850
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700851 /*
852 * Probe the shadow host TLB for the entry being overwritten, if one
853 * matches, invalidate it
854 */
Sanjay Lale685c682012-11-21 18:34:04 -0800855 kvm_mips_host_tlb_inv(vcpu, tlb->tlb_hi);
Sanjay Lale685c682012-11-21 18:34:04 -0800856
857 tlb->tlb_mask = kvm_read_c0_guest_pagemask(cop0);
858 tlb->tlb_hi = kvm_read_c0_guest_entryhi(cop0);
859 tlb->tlb_lo0 = kvm_read_c0_guest_entrylo0(cop0);
860 tlb->tlb_lo1 = kvm_read_c0_guest_entrylo1(cop0);
861
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700862 kvm_debug("[%#x] COP0_TLBWR[%d] (entryhi: %#lx, entrylo0: %#lx entrylo1: %#lx)\n",
863 pc, index, kvm_read_c0_guest_entryhi(cop0),
864 kvm_read_c0_guest_entrylo0(cop0),
865 kvm_read_c0_guest_entrylo1(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -0800866
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700867 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800868}
869
870enum emulation_result kvm_mips_emul_tlbp(struct kvm_vcpu *vcpu)
871{
872 struct mips_coproc *cop0 = vcpu->arch.cop0;
873 long entryhi = kvm_read_c0_guest_entryhi(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -0800874 uint32_t pc = vcpu->arch.pc;
875 int index = -1;
876
877 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
878
879 kvm_write_c0_guest_index(cop0, index);
880
881 kvm_debug("[%#x] COP0_TLBP (entryhi: %#lx), index: %d\n", pc, entryhi,
882 index);
883
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -0700884 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -0800885}
886
James Hoganc7716072014-06-26 15:11:29 +0100887/**
888 * kvm_mips_config1_wrmask() - Find mask of writable bits in guest Config1
889 * @vcpu: Virtual CPU.
890 *
891 * Finds the mask of bits which are writable in the guest's Config1 CP0
892 * register, by userland (currently read-only to the guest).
893 */
894unsigned int kvm_mips_config1_wrmask(struct kvm_vcpu *vcpu)
895{
James Hogan6cdc65e2015-02-03 13:59:38 +0000896 unsigned int mask = 0;
897
898 /* Permit FPU to be present if FPU is supported */
899 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
900 mask |= MIPS_CONF1_FP;
901
902 return mask;
James Hoganc7716072014-06-26 15:11:29 +0100903}
904
905/**
906 * kvm_mips_config3_wrmask() - Find mask of writable bits in guest Config3
907 * @vcpu: Virtual CPU.
908 *
909 * Finds the mask of bits which are writable in the guest's Config3 CP0
910 * register, by userland (currently read-only to the guest).
911 */
912unsigned int kvm_mips_config3_wrmask(struct kvm_vcpu *vcpu)
913{
914 /* Config4 is optional */
915 return MIPS_CONF_M;
916}
917
918/**
919 * kvm_mips_config4_wrmask() - Find mask of writable bits in guest Config4
920 * @vcpu: Virtual CPU.
921 *
922 * Finds the mask of bits which are writable in the guest's Config4 CP0
923 * register, by userland (currently read-only to the guest).
924 */
925unsigned int kvm_mips_config4_wrmask(struct kvm_vcpu *vcpu)
926{
927 /* Config5 is optional */
928 return MIPS_CONF_M;
929}
930
931/**
932 * kvm_mips_config5_wrmask() - Find mask of writable bits in guest Config5
933 * @vcpu: Virtual CPU.
934 *
935 * Finds the mask of bits which are writable in the guest's Config5 CP0
936 * register, by the guest itself.
937 */
938unsigned int kvm_mips_config5_wrmask(struct kvm_vcpu *vcpu)
939{
James Hogan6cdc65e2015-02-03 13:59:38 +0000940 unsigned int mask = 0;
941
942 /*
943 * Permit guest FPU mode changes if FPU is enabled and the relevant
944 * feature exists according to FIR register.
945 */
946 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
947 if (cpu_has_fre)
948 mask |= MIPS_CONF5_FRE;
949 /* We don't support UFR or UFE */
950 }
951
952 return mask;
James Hoganc7716072014-06-26 15:11:29 +0100953}
954
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700955enum emulation_result kvm_mips_emulate_CP0(uint32_t inst, uint32_t *opc,
956 uint32_t cause, struct kvm_run *run,
957 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -0800958{
959 struct mips_coproc *cop0 = vcpu->arch.cop0;
960 enum emulation_result er = EMULATE_DONE;
961 int32_t rt, rd, copz, sel, co_bit, op;
962 uint32_t pc = vcpu->arch.pc;
963 unsigned long curr_pc;
964
965 /*
966 * Update PC and hold onto current PC in case there is
967 * an error and we want to rollback the PC
968 */
969 curr_pc = vcpu->arch.pc;
970 er = update_pc(vcpu, cause);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700971 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -0800972 return er;
Sanjay Lale685c682012-11-21 18:34:04 -0800973
974 copz = (inst >> 21) & 0x1f;
975 rt = (inst >> 16) & 0x1f;
976 rd = (inst >> 11) & 0x1f;
977 sel = inst & 0x7;
978 co_bit = (inst >> 25) & 1;
979
Sanjay Lale685c682012-11-21 18:34:04 -0800980 if (co_bit) {
981 op = (inst) & 0xff;
982
983 switch (op) {
984 case tlbr_op: /* Read indexed TLB entry */
985 er = kvm_mips_emul_tlbr(vcpu);
986 break;
987 case tlbwi_op: /* Write indexed */
988 er = kvm_mips_emul_tlbwi(vcpu);
989 break;
990 case tlbwr_op: /* Write random */
991 er = kvm_mips_emul_tlbwr(vcpu);
992 break;
993 case tlbp_op: /* TLB Probe */
994 er = kvm_mips_emul_tlbp(vcpu);
995 break;
996 case rfe_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700997 kvm_err("!!!COP0_RFE!!!\n");
Sanjay Lale685c682012-11-21 18:34:04 -0800998 break;
999 case eret_op:
1000 er = kvm_mips_emul_eret(vcpu);
1001 goto dont_update_pc;
1002 break;
1003 case wait_op:
1004 er = kvm_mips_emul_wait(vcpu);
1005 break;
1006 }
1007 } else {
1008 switch (copz) {
1009 case mfc_op:
1010#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1011 cop0->stat[rd][sel]++;
1012#endif
1013 /* Get reg */
1014 if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +01001015 vcpu->arch.gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001016 } else if ((rd == MIPS_CP0_ERRCTL) && (sel == 0)) {
1017 vcpu->arch.gprs[rt] = 0x0;
1018#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1019 kvm_mips_trans_mfc0(inst, opc, vcpu);
1020#endif
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001021 } else {
Sanjay Lale685c682012-11-21 18:34:04 -08001022 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1023
1024#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1025 kvm_mips_trans_mfc0(inst, opc, vcpu);
1026#endif
1027 }
1028
1029 kvm_debug
1030 ("[%#x] MFCz[%d][%d], vcpu->arch.gprs[%d]: %#lx\n",
1031 pc, rd, sel, rt, vcpu->arch.gprs[rt]);
1032
1033 break;
1034
1035 case dmfc_op:
1036 vcpu->arch.gprs[rt] = cop0->reg[rd][sel];
1037 break;
1038
1039 case mtc_op:
1040#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
1041 cop0->stat[rd][sel]++;
1042#endif
1043 if ((rd == MIPS_CP0_TLB_INDEX)
1044 && (vcpu->arch.gprs[rt] >=
1045 KVM_MIPS_GUEST_TLB_SIZE)) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001046 kvm_err("Invalid TLB Index: %ld",
1047 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001048 er = EMULATE_FAIL;
1049 break;
1050 }
1051#define C0_EBASE_CORE_MASK 0xff
1052 if ((rd == MIPS_CP0_PRID) && (sel == 1)) {
1053 /* Preserve CORE number */
1054 kvm_change_c0_guest_ebase(cop0,
1055 ~(C0_EBASE_CORE_MASK),
1056 vcpu->arch.gprs[rt]);
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001057 kvm_err("MTCz, cop0->reg[EBASE]: %#lx\n",
1058 kvm_read_c0_guest_ebase(cop0));
Sanjay Lale685c682012-11-21 18:34:04 -08001059 } else if (rd == MIPS_CP0_TLB_HI && sel == 0) {
David Daney48c4ac92013-05-13 13:56:44 -07001060 uint32_t nasid =
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001061 vcpu->arch.gprs[rt] & ASID_MASK;
1062 if ((KSEGX(vcpu->arch.gprs[rt]) != CKSEG0) &&
David Daney48c4ac92013-05-13 13:56:44 -07001063 ((kvm_read_c0_guest_entryhi(cop0) &
1064 ASID_MASK) != nasid)) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001065 kvm_debug("MTCz, change ASID from %#lx to %#lx\n",
1066 kvm_read_c0_guest_entryhi(cop0)
1067 & ASID_MASK,
1068 vcpu->arch.gprs[rt]
1069 & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001070
1071 /* Blow away the shadow host TLBs */
1072 kvm_mips_flush_host_tlb(1);
1073 }
1074 kvm_write_c0_guest_entryhi(cop0,
1075 vcpu->arch.gprs[rt]);
1076 }
1077 /* Are we writing to COUNT */
1078 else if ((rd == MIPS_CP0_COUNT) && (sel == 0)) {
James Hogane30492b2014-05-29 10:16:35 +01001079 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001080 goto done;
1081 } else if ((rd == MIPS_CP0_COMPARE) && (sel == 0)) {
1082 kvm_debug("[%#x] MTCz, COMPARE %#lx <- %#lx\n",
1083 pc, kvm_read_c0_guest_compare(cop0),
1084 vcpu->arch.gprs[rt]);
1085
1086 /* If we are writing to COMPARE */
1087 /* Clear pending timer interrupt, if any */
1088 kvm_mips_callbacks->dequeue_timer_int(vcpu);
James Hogane30492b2014-05-29 10:16:35 +01001089 kvm_mips_write_compare(vcpu,
1090 vcpu->arch.gprs[rt]);
Sanjay Lale685c682012-11-21 18:34:04 -08001091 } else if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
James Hogan6cdc65e2015-02-03 13:59:38 +00001092 unsigned int old_val, val, change;
1093
1094 old_val = kvm_read_c0_guest_status(cop0);
1095 val = vcpu->arch.gprs[rt];
1096 change = val ^ old_val;
1097
1098 /* Make sure that the NMI bit is never set */
1099 val &= ~ST0_NMI;
1100
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001101 /*
James Hogan6cdc65e2015-02-03 13:59:38 +00001102 * Don't allow CU1 or FR to be set unless FPU
1103 * capability enabled and exists in guest
1104 * configuration.
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001105 */
James Hogan6cdc65e2015-02-03 13:59:38 +00001106 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1107 val &= ~(ST0_CU1 | ST0_FR);
1108
1109 /*
1110 * Also don't allow FR to be set if host doesn't
1111 * support it.
1112 */
1113 if (!(current_cpu_data.fpu_id & MIPS_FPIR_F64))
1114 val &= ~ST0_FR;
1115
1116
1117 /* Handle changes in FPU mode */
1118 preempt_disable();
1119
1120 /*
1121 * FPU and Vector register state is made
1122 * UNPREDICTABLE by a change of FR, so don't
1123 * even bother saving it.
1124 */
1125 if (change & ST0_FR)
1126 kvm_drop_fpu(vcpu);
1127
1128 /*
1129 * Propagate CU1 (FPU enable) changes
1130 * immediately if the FPU context is already
1131 * loaded. When disabling we leave the context
1132 * loaded so it can be quickly enabled again in
1133 * the near future.
1134 */
1135 if (change & ST0_CU1 &&
1136 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1137 change_c0_status(ST0_CU1, val);
1138
1139 preempt_enable();
1140
1141 kvm_write_c0_guest_status(cop0, val);
Sanjay Lale685c682012-11-21 18:34:04 -08001142
1143#ifdef CONFIG_KVM_MIPS_DYN_TRANS
James Hogan6cdc65e2015-02-03 13:59:38 +00001144 /*
1145 * If FPU present, we need CU1/FR bits to take
1146 * effect fairly soon.
1147 */
1148 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1149 kvm_mips_trans_mtc0(inst, opc, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001150#endif
James Hogan6cdc65e2015-02-03 13:59:38 +00001151 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1152 unsigned int old_val, val, change, wrmask;
1153
1154 old_val = kvm_read_c0_guest_config5(cop0);
1155 val = vcpu->arch.gprs[rt];
1156
1157 /* Only a few bits are writable in Config5 */
1158 wrmask = kvm_mips_config5_wrmask(vcpu);
1159 change = (val ^ old_val) & wrmask;
1160 val = old_val ^ change;
1161
1162
1163 /* Handle changes in FPU modes */
1164 preempt_disable();
1165
1166 /*
1167 * Propagate FRE changes immediately if the FPU
1168 * context is already loaded.
1169 */
1170 if (change & MIPS_CONF5_FRE &&
1171 vcpu->arch.fpu_inuse & KVM_MIPS_FPU_FPU)
1172 change_c0_config5(MIPS_CONF5_FRE, val);
1173
1174 preempt_enable();
1175
1176 kvm_write_c0_guest_config5(cop0, val);
James Hogane30492b2014-05-29 10:16:35 +01001177 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1178 uint32_t old_cause, new_cause;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001179
James Hogane30492b2014-05-29 10:16:35 +01001180 old_cause = kvm_read_c0_guest_cause(cop0);
1181 new_cause = vcpu->arch.gprs[rt];
1182 /* Update R/W bits */
1183 kvm_change_c0_guest_cause(cop0, 0x08800300,
1184 new_cause);
1185 /* DC bit enabling/disabling timer? */
1186 if ((old_cause ^ new_cause) & CAUSEF_DC) {
1187 if (new_cause & CAUSEF_DC)
1188 kvm_mips_count_disable_cause(vcpu);
1189 else
1190 kvm_mips_count_enable_cause(vcpu);
1191 }
Sanjay Lale685c682012-11-21 18:34:04 -08001192 } else {
1193 cop0->reg[rd][sel] = vcpu->arch.gprs[rt];
1194#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1195 kvm_mips_trans_mtc0(inst, opc, vcpu);
1196#endif
1197 }
1198
1199 kvm_debug("[%#x] MTCz, cop0->reg[%d][%d]: %#lx\n", pc,
1200 rd, sel, cop0->reg[rd][sel]);
1201 break;
1202
1203 case dmtc_op:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001204 kvm_err("!!!!!!![%#lx]dmtc_op: rt: %d, rd: %d, sel: %d!!!!!!\n",
1205 vcpu->arch.pc, rt, rd, sel);
Sanjay Lale685c682012-11-21 18:34:04 -08001206 er = EMULATE_FAIL;
1207 break;
1208
1209 case mfmcz_op:
1210#ifdef KVM_MIPS_DEBUG_COP0_COUNTERS
1211 cop0->stat[MIPS_CP0_STATUS][0]++;
1212#endif
1213 if (rt != 0) {
1214 vcpu->arch.gprs[rt] =
1215 kvm_read_c0_guest_status(cop0);
1216 }
1217 /* EI */
1218 if (inst & 0x20) {
1219 kvm_debug("[%#lx] mfmcz_op: EI\n",
1220 vcpu->arch.pc);
1221 kvm_set_c0_guest_status(cop0, ST0_IE);
1222 } else {
1223 kvm_debug("[%#lx] mfmcz_op: DI\n",
1224 vcpu->arch.pc);
1225 kvm_clear_c0_guest_status(cop0, ST0_IE);
1226 }
1227
1228 break;
1229
1230 case wrpgpr_op:
1231 {
1232 uint32_t css =
1233 cop0->reg[MIPS_CP0_STATUS][2] & 0xf;
1234 uint32_t pss =
1235 (cop0->reg[MIPS_CP0_STATUS][2] >> 6) & 0xf;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001236 /*
1237 * We don't support any shadow register sets, so
1238 * SRSCtl[PSS] == SRSCtl[CSS] = 0
1239 */
Sanjay Lale685c682012-11-21 18:34:04 -08001240 if (css || pss) {
1241 er = EMULATE_FAIL;
1242 break;
1243 }
1244 kvm_debug("WRPGPR[%d][%d] = %#lx\n", pss, rd,
1245 vcpu->arch.gprs[rt]);
1246 vcpu->arch.gprs[rd] = vcpu->arch.gprs[rt];
1247 }
1248 break;
1249 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001250 kvm_err("[%#lx]MachEmulateCP0: unsupported COP0, copz: 0x%x\n",
1251 vcpu->arch.pc, copz);
Sanjay Lale685c682012-11-21 18:34:04 -08001252 er = EMULATE_FAIL;
1253 break;
1254 }
1255 }
1256
1257done:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001258 /* Rollback PC only if emulation was unsuccessful */
1259 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08001260 vcpu->arch.pc = curr_pc;
Sanjay Lale685c682012-11-21 18:34:04 -08001261
1262dont_update_pc:
1263 /*
1264 * This is for special instructions whose emulation
1265 * updates the PC, so do not overwrite the PC under
1266 * any circumstances
1267 */
1268
1269 return er;
1270}
1271
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001272enum emulation_result kvm_mips_emulate_store(uint32_t inst, uint32_t cause,
1273 struct kvm_run *run,
1274 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001275{
1276 enum emulation_result er = EMULATE_DO_MMIO;
1277 int32_t op, base, rt, offset;
1278 uint32_t bytes;
1279 void *data = run->mmio.data;
1280 unsigned long curr_pc;
1281
1282 /*
1283 * Update PC and hold onto current PC in case there is
1284 * an error and we want to rollback the PC
1285 */
1286 curr_pc = vcpu->arch.pc;
1287 er = update_pc(vcpu, cause);
1288 if (er == EMULATE_FAIL)
1289 return er;
1290
1291 rt = (inst >> 16) & 0x1f;
1292 base = (inst >> 21) & 0x1f;
1293 offset = inst & 0xffff;
1294 op = (inst >> 26) & 0x3f;
1295
1296 switch (op) {
1297 case sb_op:
1298 bytes = 1;
1299 if (bytes > sizeof(run->mmio.data)) {
1300 kvm_err("%s: bad MMIO length: %d\n", __func__,
1301 run->mmio.len);
1302 }
1303 run->mmio.phys_addr =
1304 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1305 host_cp0_badvaddr);
1306 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1307 er = EMULATE_FAIL;
1308 break;
1309 }
1310 run->mmio.len = bytes;
1311 run->mmio.is_write = 1;
1312 vcpu->mmio_needed = 1;
1313 vcpu->mmio_is_write = 1;
1314 *(u8 *) data = vcpu->arch.gprs[rt];
1315 kvm_debug("OP_SB: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1316 vcpu->arch.host_cp0_badvaddr, vcpu->arch.gprs[rt],
1317 *(uint8_t *) data);
1318
1319 break;
1320
1321 case sw_op:
1322 bytes = 4;
1323 if (bytes > sizeof(run->mmio.data)) {
1324 kvm_err("%s: bad MMIO length: %d\n", __func__,
1325 run->mmio.len);
1326 }
1327 run->mmio.phys_addr =
1328 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1329 host_cp0_badvaddr);
1330 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1331 er = EMULATE_FAIL;
1332 break;
1333 }
1334
1335 run->mmio.len = bytes;
1336 run->mmio.is_write = 1;
1337 vcpu->mmio_needed = 1;
1338 vcpu->mmio_is_write = 1;
1339 *(uint32_t *) data = vcpu->arch.gprs[rt];
1340
1341 kvm_debug("[%#lx] OP_SW: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1342 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1343 vcpu->arch.gprs[rt], *(uint32_t *) data);
1344 break;
1345
1346 case sh_op:
1347 bytes = 2;
1348 if (bytes > sizeof(run->mmio.data)) {
1349 kvm_err("%s: bad MMIO length: %d\n", __func__,
1350 run->mmio.len);
1351 }
1352 run->mmio.phys_addr =
1353 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1354 host_cp0_badvaddr);
1355 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1356 er = EMULATE_FAIL;
1357 break;
1358 }
1359
1360 run->mmio.len = bytes;
1361 run->mmio.is_write = 1;
1362 vcpu->mmio_needed = 1;
1363 vcpu->mmio_is_write = 1;
1364 *(uint16_t *) data = vcpu->arch.gprs[rt];
1365
1366 kvm_debug("[%#lx] OP_SH: eaddr: %#lx, gpr: %#lx, data: %#x\n",
1367 vcpu->arch.pc, vcpu->arch.host_cp0_badvaddr,
1368 vcpu->arch.gprs[rt], *(uint32_t *) data);
1369 break;
1370
1371 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001372 kvm_err("Store not yet supported");
Sanjay Lale685c682012-11-21 18:34:04 -08001373 er = EMULATE_FAIL;
1374 break;
1375 }
1376
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001377 /* Rollback PC if emulation was unsuccessful */
1378 if (er == EMULATE_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08001379 vcpu->arch.pc = curr_pc;
Sanjay Lale685c682012-11-21 18:34:04 -08001380
1381 return er;
1382}
1383
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001384enum emulation_result kvm_mips_emulate_load(uint32_t inst, uint32_t cause,
1385 struct kvm_run *run,
1386 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001387{
1388 enum emulation_result er = EMULATE_DO_MMIO;
1389 int32_t op, base, rt, offset;
1390 uint32_t bytes;
1391
1392 rt = (inst >> 16) & 0x1f;
1393 base = (inst >> 21) & 0x1f;
1394 offset = inst & 0xffff;
1395 op = (inst >> 26) & 0x3f;
1396
1397 vcpu->arch.pending_load_cause = cause;
1398 vcpu->arch.io_gpr = rt;
1399
1400 switch (op) {
1401 case lw_op:
1402 bytes = 4;
1403 if (bytes > sizeof(run->mmio.data)) {
1404 kvm_err("%s: bad MMIO length: %d\n", __func__,
1405 run->mmio.len);
1406 er = EMULATE_FAIL;
1407 break;
1408 }
1409 run->mmio.phys_addr =
1410 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1411 host_cp0_badvaddr);
1412 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1413 er = EMULATE_FAIL;
1414 break;
1415 }
1416
1417 run->mmio.len = bytes;
1418 run->mmio.is_write = 0;
1419 vcpu->mmio_needed = 1;
1420 vcpu->mmio_is_write = 0;
1421 break;
1422
1423 case lh_op:
1424 case lhu_op:
1425 bytes = 2;
1426 if (bytes > sizeof(run->mmio.data)) {
1427 kvm_err("%s: bad MMIO length: %d\n", __func__,
1428 run->mmio.len);
1429 er = EMULATE_FAIL;
1430 break;
1431 }
1432 run->mmio.phys_addr =
1433 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1434 host_cp0_badvaddr);
1435 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1436 er = EMULATE_FAIL;
1437 break;
1438 }
1439
1440 run->mmio.len = bytes;
1441 run->mmio.is_write = 0;
1442 vcpu->mmio_needed = 1;
1443 vcpu->mmio_is_write = 0;
1444
1445 if (op == lh_op)
1446 vcpu->mmio_needed = 2;
1447 else
1448 vcpu->mmio_needed = 1;
1449
1450 break;
1451
1452 case lbu_op:
1453 case lb_op:
1454 bytes = 1;
1455 if (bytes > sizeof(run->mmio.data)) {
1456 kvm_err("%s: bad MMIO length: %d\n", __func__,
1457 run->mmio.len);
1458 er = EMULATE_FAIL;
1459 break;
1460 }
1461 run->mmio.phys_addr =
1462 kvm_mips_callbacks->gva_to_gpa(vcpu->arch.
1463 host_cp0_badvaddr);
1464 if (run->mmio.phys_addr == KVM_INVALID_ADDR) {
1465 er = EMULATE_FAIL;
1466 break;
1467 }
1468
1469 run->mmio.len = bytes;
1470 run->mmio.is_write = 0;
1471 vcpu->mmio_is_write = 0;
1472
1473 if (op == lb_op)
1474 vcpu->mmio_needed = 2;
1475 else
1476 vcpu->mmio_needed = 1;
1477
1478 break;
1479
1480 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001481 kvm_err("Load not yet supported");
Sanjay Lale685c682012-11-21 18:34:04 -08001482 er = EMULATE_FAIL;
1483 break;
1484 }
1485
1486 return er;
1487}
1488
1489int kvm_mips_sync_icache(unsigned long va, struct kvm_vcpu *vcpu)
1490{
1491 unsigned long offset = (va & ~PAGE_MASK);
1492 struct kvm *kvm = vcpu->kvm;
1493 unsigned long pa;
1494 gfn_t gfn;
1495 pfn_t pfn;
1496
1497 gfn = va >> PAGE_SHIFT;
1498
1499 if (gfn >= kvm->arch.guest_pmap_npages) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001500 kvm_err("%s: Invalid gfn: %#llx\n", __func__, gfn);
Sanjay Lale685c682012-11-21 18:34:04 -08001501 kvm_mips_dump_host_tlbs();
1502 kvm_arch_vcpu_dump_regs(vcpu);
1503 return -1;
1504 }
1505 pfn = kvm->arch.guest_pmap[gfn];
1506 pa = (pfn << PAGE_SHIFT) | offset;
1507
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001508 kvm_debug("%s: va: %#lx, unmapped: %#x\n", __func__, va,
1509 CKSEG0ADDR(pa));
Sanjay Lale685c682012-11-21 18:34:04 -08001510
James Hoganfacaaec2014-05-29 10:16:25 +01001511 local_flush_icache_range(CKSEG0ADDR(pa), 32);
Sanjay Lale685c682012-11-21 18:34:04 -08001512 return 0;
1513}
1514
1515#define MIPS_CACHE_OP_INDEX_INV 0x0
1516#define MIPS_CACHE_OP_INDEX_LD_TAG 0x1
1517#define MIPS_CACHE_OP_INDEX_ST_TAG 0x2
1518#define MIPS_CACHE_OP_IMP 0x3
1519#define MIPS_CACHE_OP_HIT_INV 0x4
1520#define MIPS_CACHE_OP_FILL_WB_INV 0x5
1521#define MIPS_CACHE_OP_HIT_HB 0x6
1522#define MIPS_CACHE_OP_FETCH_LOCK 0x7
1523
1524#define MIPS_CACHE_ICACHE 0x0
1525#define MIPS_CACHE_DCACHE 0x1
1526#define MIPS_CACHE_SEC 0x3
1527
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001528enum emulation_result kvm_mips_emulate_cache(uint32_t inst, uint32_t *opc,
1529 uint32_t cause,
1530 struct kvm_run *run,
1531 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001532{
1533 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lale685c682012-11-21 18:34:04 -08001534 enum emulation_result er = EMULATE_DONE;
1535 int32_t offset, cache, op_inst, op, base;
1536 struct kvm_vcpu_arch *arch = &vcpu->arch;
1537 unsigned long va;
1538 unsigned long curr_pc;
1539
1540 /*
1541 * Update PC and hold onto current PC in case there is
1542 * an error and we want to rollback the PC
1543 */
1544 curr_pc = vcpu->arch.pc;
1545 er = update_pc(vcpu, cause);
1546 if (er == EMULATE_FAIL)
1547 return er;
1548
1549 base = (inst >> 21) & 0x1f;
1550 op_inst = (inst >> 16) & 0x1f;
1551 offset = inst & 0xffff;
1552 cache = (inst >> 16) & 0x3;
1553 op = (inst >> 18) & 0x7;
1554
1555 va = arch->gprs[base] + offset;
1556
1557 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1558 cache, op, base, arch->gprs[base], offset);
1559
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001560 /*
1561 * Treat INDEX_INV as a nop, basically issued by Linux on startup to
1562 * invalidate the caches entirely by stepping through all the
1563 * ways/indexes
Sanjay Lale685c682012-11-21 18:34:04 -08001564 */
1565 if (op == MIPS_CACHE_OP_INDEX_INV) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001566 kvm_debug("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1567 vcpu->arch.pc, vcpu->arch.gprs[31], cache, op, base,
1568 arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001569
1570 if (cache == MIPS_CACHE_DCACHE)
1571 r4k_blast_dcache();
1572 else if (cache == MIPS_CACHE_ICACHE)
1573 r4k_blast_icache();
1574 else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001575 kvm_err("%s: unsupported CACHE INDEX operation\n",
1576 __func__);
Sanjay Lale685c682012-11-21 18:34:04 -08001577 return EMULATE_FAIL;
1578 }
1579
1580#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1581 kvm_mips_trans_cache_index(inst, opc, vcpu);
1582#endif
1583 goto done;
1584 }
1585
1586 preempt_disable();
1587 if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001588 if (kvm_mips_host_tlb_lookup(vcpu, va) < 0)
Sanjay Lale685c682012-11-21 18:34:04 -08001589 kvm_mips_handle_kseg0_tlb_fault(va, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08001590 } else if ((KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0) ||
1591 KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) {
1592 int index;
1593
1594 /* If an entry already exists then skip */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001595 if (kvm_mips_host_tlb_lookup(vcpu, va) >= 0)
Sanjay Lale685c682012-11-21 18:34:04 -08001596 goto skip_fault;
Sanjay Lale685c682012-11-21 18:34:04 -08001597
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001598 /*
1599 * If address not in the guest TLB, then give the guest a fault,
1600 * the resulting handler will do the right thing
Sanjay Lale685c682012-11-21 18:34:04 -08001601 */
1602 index = kvm_mips_guest_tlb_lookup(vcpu, (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001603 (kvm_read_c0_guest_entryhi
1604 (cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08001605
1606 if (index < 0) {
1607 vcpu->arch.host_cp0_entryhi = (va & VPN2_MASK);
1608 vcpu->arch.host_cp0_badvaddr = va;
1609 er = kvm_mips_emulate_tlbmiss_ld(cause, NULL, run,
1610 vcpu);
1611 preempt_enable();
1612 goto dont_update_pc;
1613 } else {
1614 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001615 /*
1616 * Check if the entry is valid, if not then setup a TLB
1617 * invalid exception to the guest
1618 */
Sanjay Lale685c682012-11-21 18:34:04 -08001619 if (!TLB_IS_VALID(*tlb, va)) {
1620 er = kvm_mips_emulate_tlbinv_ld(cause, NULL,
1621 run, vcpu);
1622 preempt_enable();
1623 goto dont_update_pc;
1624 } else {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001625 /*
1626 * We fault an entry from the guest tlb to the
1627 * shadow host TLB
1628 */
Sanjay Lale685c682012-11-21 18:34:04 -08001629 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb,
1630 NULL,
1631 NULL);
1632 }
1633 }
1634 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001635 kvm_err("INVALID CACHE INDEX/ADDRESS (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1636 cache, op, base, arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001637 er = EMULATE_FAIL;
1638 preempt_enable();
1639 goto dont_update_pc;
1640
1641 }
1642
1643skip_fault:
1644 /* XXXKYMA: Only a subset of cache ops are supported, used by Linux */
1645 if (cache == MIPS_CACHE_DCACHE
1646 && (op == MIPS_CACHE_OP_FILL_WB_INV
1647 || op == MIPS_CACHE_OP_HIT_INV)) {
1648 flush_dcache_line(va);
1649
1650#ifdef CONFIG_KVM_MIPS_DYN_TRANS
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001651 /*
1652 * Replace the CACHE instruction, with a SYNCI, not the same,
1653 * but avoids a trap
1654 */
Sanjay Lale685c682012-11-21 18:34:04 -08001655 kvm_mips_trans_cache_va(inst, opc, vcpu);
1656#endif
1657 } else if (op == MIPS_CACHE_OP_HIT_INV && cache == MIPS_CACHE_ICACHE) {
1658 flush_dcache_line(va);
1659 flush_icache_line(va);
1660
1661#ifdef CONFIG_KVM_MIPS_DYN_TRANS
1662 /* Replace the CACHE instruction, with a SYNCI */
1663 kvm_mips_trans_cache_va(inst, opc, vcpu);
1664#endif
1665 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001666 kvm_err("NO-OP CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
1667 cache, op, base, arch->gprs[base], offset);
Sanjay Lale685c682012-11-21 18:34:04 -08001668 er = EMULATE_FAIL;
1669 preempt_enable();
1670 goto dont_update_pc;
1671 }
1672
1673 preempt_enable();
1674
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001675dont_update_pc:
1676 /* Rollback PC */
Sanjay Lale685c682012-11-21 18:34:04 -08001677 vcpu->arch.pc = curr_pc;
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001678done:
Sanjay Lale685c682012-11-21 18:34:04 -08001679 return er;
1680}
1681
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001682enum emulation_result kvm_mips_emulate_inst(unsigned long cause, uint32_t *opc,
1683 struct kvm_run *run,
1684 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001685{
1686 enum emulation_result er = EMULATE_DONE;
1687 uint32_t inst;
1688
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001689 /* Fetch the instruction. */
1690 if (cause & CAUSEF_BD)
Sanjay Lale685c682012-11-21 18:34:04 -08001691 opc += 1;
Sanjay Lale685c682012-11-21 18:34:04 -08001692
1693 inst = kvm_get_inst(opc, vcpu);
1694
1695 switch (((union mips_instruction)inst).r_format.opcode) {
1696 case cop0_op:
1697 er = kvm_mips_emulate_CP0(inst, opc, cause, run, vcpu);
1698 break;
1699 case sb_op:
1700 case sh_op:
1701 case sw_op:
1702 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1703 break;
1704 case lb_op:
1705 case lbu_op:
1706 case lhu_op:
1707 case lh_op:
1708 case lw_op:
1709 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1710 break;
1711
1712 case cache_op:
1713 ++vcpu->stat.cache_exits;
1714 trace_kvm_exit(vcpu, CACHE_EXITS);
1715 er = kvm_mips_emulate_cache(inst, opc, cause, run, vcpu);
1716 break;
1717
1718 default:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001719 kvm_err("Instruction emulation not supported (%p/%#x)\n", opc,
1720 inst);
Sanjay Lale685c682012-11-21 18:34:04 -08001721 kvm_arch_vcpu_dump_regs(vcpu);
1722 er = EMULATE_FAIL;
1723 break;
1724 }
1725
1726 return er;
1727}
1728
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001729enum emulation_result kvm_mips_emulate_syscall(unsigned long cause,
1730 uint32_t *opc,
1731 struct kvm_run *run,
1732 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001733{
1734 struct mips_coproc *cop0 = vcpu->arch.cop0;
1735 struct kvm_vcpu_arch *arch = &vcpu->arch;
1736 enum emulation_result er = EMULATE_DONE;
1737
1738 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1739 /* save old pc */
1740 kvm_write_c0_guest_epc(cop0, arch->pc);
1741 kvm_set_c0_guest_status(cop0, ST0_EXL);
1742
1743 if (cause & CAUSEF_BD)
1744 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1745 else
1746 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1747
1748 kvm_debug("Delivering SYSCALL @ pc %#lx\n", arch->pc);
1749
1750 kvm_change_c0_guest_cause(cop0, (0xff),
1751 (T_SYSCALL << CAUSEB_EXCCODE));
1752
1753 /* Set PC to the exception entry point */
1754 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1755
1756 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07001757 kvm_err("Trying to deliver SYSCALL when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08001758 er = EMULATE_FAIL;
1759 }
1760
1761 return er;
1762}
1763
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001764enum emulation_result kvm_mips_emulate_tlbmiss_ld(unsigned long cause,
1765 uint32_t *opc,
1766 struct kvm_run *run,
1767 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001768{
1769 struct mips_coproc *cop0 = vcpu->arch.cop0;
1770 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08001771 unsigned long entryhi = (vcpu->arch. host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001772 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001773
1774 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1775 /* save old pc */
1776 kvm_write_c0_guest_epc(cop0, arch->pc);
1777 kvm_set_c0_guest_status(cop0, ST0_EXL);
1778
1779 if (cause & CAUSEF_BD)
1780 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1781 else
1782 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1783
1784 kvm_debug("[EXL == 0] delivering TLB MISS @ pc %#lx\n",
1785 arch->pc);
1786
1787 /* set pc to the exception entry point */
1788 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1789
1790 } else {
1791 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1792 arch->pc);
1793
1794 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1795 }
1796
1797 kvm_change_c0_guest_cause(cop0, (0xff),
1798 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1799
1800 /* setup badvaddr, context and entryhi registers for the guest */
1801 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1802 /* XXXKYMA: is the context register used by linux??? */
1803 kvm_write_c0_guest_entryhi(cop0, entryhi);
1804 /* Blow away the shadow host TLBs */
1805 kvm_mips_flush_host_tlb(1);
1806
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07001807 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001808}
1809
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001810enum emulation_result kvm_mips_emulate_tlbinv_ld(unsigned long cause,
1811 uint32_t *opc,
1812 struct kvm_run *run,
1813 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001814{
1815 struct mips_coproc *cop0 = vcpu->arch.cop0;
1816 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08001817 unsigned long entryhi =
1818 (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001819 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001820
1821 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1822 /* save old pc */
1823 kvm_write_c0_guest_epc(cop0, arch->pc);
1824 kvm_set_c0_guest_status(cop0, ST0_EXL);
1825
1826 if (cause & CAUSEF_BD)
1827 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1828 else
1829 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1830
1831 kvm_debug("[EXL == 0] delivering TLB INV @ pc %#lx\n",
1832 arch->pc);
1833
1834 /* set pc to the exception entry point */
1835 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1836
1837 } else {
1838 kvm_debug("[EXL == 1] delivering TLB MISS @ pc %#lx\n",
1839 arch->pc);
1840 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1841 }
1842
1843 kvm_change_c0_guest_cause(cop0, (0xff),
1844 (T_TLB_LD_MISS << CAUSEB_EXCCODE));
1845
1846 /* setup badvaddr, context and entryhi registers for the guest */
1847 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1848 /* XXXKYMA: is the context register used by linux??? */
1849 kvm_write_c0_guest_entryhi(cop0, entryhi);
1850 /* Blow away the shadow host TLBs */
1851 kvm_mips_flush_host_tlb(1);
1852
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07001853 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001854}
1855
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001856enum emulation_result kvm_mips_emulate_tlbmiss_st(unsigned long cause,
1857 uint32_t *opc,
1858 struct kvm_run *run,
1859 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001860{
1861 struct mips_coproc *cop0 = vcpu->arch.cop0;
1862 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08001863 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001864 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001865
1866 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1867 /* save old pc */
1868 kvm_write_c0_guest_epc(cop0, arch->pc);
1869 kvm_set_c0_guest_status(cop0, ST0_EXL);
1870
1871 if (cause & CAUSEF_BD)
1872 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1873 else
1874 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1875
1876 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1877 arch->pc);
1878
1879 /* Set PC to the exception entry point */
1880 arch->pc = KVM_GUEST_KSEG0 + 0x0;
1881 } else {
1882 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1883 arch->pc);
1884 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1885 }
1886
1887 kvm_change_c0_guest_cause(cop0, (0xff),
1888 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1889
1890 /* setup badvaddr, context and entryhi registers for the guest */
1891 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1892 /* XXXKYMA: is the context register used by linux??? */
1893 kvm_write_c0_guest_entryhi(cop0, entryhi);
1894 /* Blow away the shadow host TLBs */
1895 kvm_mips_flush_host_tlb(1);
1896
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07001897 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001898}
1899
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001900enum emulation_result kvm_mips_emulate_tlbinv_st(unsigned long cause,
1901 uint32_t *opc,
1902 struct kvm_run *run,
1903 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001904{
1905 struct mips_coproc *cop0 = vcpu->arch.cop0;
1906 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08001907 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001908 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001909
1910 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1911 /* save old pc */
1912 kvm_write_c0_guest_epc(cop0, arch->pc);
1913 kvm_set_c0_guest_status(cop0, ST0_EXL);
1914
1915 if (cause & CAUSEF_BD)
1916 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1917 else
1918 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1919
1920 kvm_debug("[EXL == 0] Delivering TLB MISS @ pc %#lx\n",
1921 arch->pc);
1922
1923 /* Set PC to the exception entry point */
1924 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1925 } else {
1926 kvm_debug("[EXL == 1] Delivering TLB MISS @ pc %#lx\n",
1927 arch->pc);
1928 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1929 }
1930
1931 kvm_change_c0_guest_cause(cop0, (0xff),
1932 (T_TLB_ST_MISS << CAUSEB_EXCCODE));
1933
1934 /* setup badvaddr, context and entryhi registers for the guest */
1935 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
1936 /* XXXKYMA: is the context register used by linux??? */
1937 kvm_write_c0_guest_entryhi(cop0, entryhi);
1938 /* Blow away the shadow host TLBs */
1939 kvm_mips_flush_host_tlb(1);
1940
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07001941 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001942}
1943
1944/* TLBMOD: store into address matching TLB with Dirty bit off */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001945enum emulation_result kvm_mips_handle_tlbmod(unsigned long cause, uint32_t *opc,
1946 struct kvm_run *run,
1947 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001948{
1949 enum emulation_result er = EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08001950#ifdef DEBUG
James Hogan3d654832014-05-29 10:16:41 +01001951 struct mips_coproc *cop0 = vcpu->arch.cop0;
1952 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
1953 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
1954 int index;
1955
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001956 /* If address not in the guest TLB, then we are in trouble */
Sanjay Lale685c682012-11-21 18:34:04 -08001957 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
1958 if (index < 0) {
1959 /* XXXKYMA Invalidate and retry */
1960 kvm_mips_host_tlb_inv(vcpu, vcpu->arch.host_cp0_badvaddr);
1961 kvm_err("%s: host got TLBMOD for %#lx but entry not present in Guest TLB\n",
1962 __func__, entryhi);
1963 kvm_mips_dump_guest_tlbs(vcpu);
1964 kvm_mips_dump_host_tlbs();
1965 return EMULATE_FAIL;
1966 }
1967#endif
1968
1969 er = kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
1970 return er;
1971}
1972
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07001973enum emulation_result kvm_mips_emulate_tlbmod(unsigned long cause,
1974 uint32_t *opc,
1975 struct kvm_run *run,
1976 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08001977{
1978 struct mips_coproc *cop0 = vcpu->arch.cop0;
1979 unsigned long entryhi = (vcpu->arch.host_cp0_badvaddr & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07001980 (kvm_read_c0_guest_entryhi(cop0) & ASID_MASK);
Sanjay Lale685c682012-11-21 18:34:04 -08001981 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08001982
1983 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
1984 /* save old pc */
1985 kvm_write_c0_guest_epc(cop0, arch->pc);
1986 kvm_set_c0_guest_status(cop0, ST0_EXL);
1987
1988 if (cause & CAUSEF_BD)
1989 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
1990 else
1991 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
1992
1993 kvm_debug("[EXL == 0] Delivering TLB MOD @ pc %#lx\n",
1994 arch->pc);
1995
1996 arch->pc = KVM_GUEST_KSEG0 + 0x180;
1997 } else {
1998 kvm_debug("[EXL == 1] Delivering TLB MOD @ pc %#lx\n",
1999 arch->pc);
2000 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2001 }
2002
2003 kvm_change_c0_guest_cause(cop0, (0xff), (T_TLB_MOD << CAUSEB_EXCCODE));
2004
2005 /* setup badvaddr, context and entryhi registers for the guest */
2006 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2007 /* XXXKYMA: is the context register used by linux??? */
2008 kvm_write_c0_guest_entryhi(cop0, entryhi);
2009 /* Blow away the shadow host TLBs */
2010 kvm_mips_flush_host_tlb(1);
2011
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07002012 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08002013}
2014
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002015enum emulation_result kvm_mips_emulate_fpu_exc(unsigned long cause,
2016 uint32_t *opc,
2017 struct kvm_run *run,
2018 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002019{
2020 struct mips_coproc *cop0 = vcpu->arch.cop0;
2021 struct kvm_vcpu_arch *arch = &vcpu->arch;
Sanjay Lale685c682012-11-21 18:34:04 -08002022
2023 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2024 /* save old pc */
2025 kvm_write_c0_guest_epc(cop0, arch->pc);
2026 kvm_set_c0_guest_status(cop0, ST0_EXL);
2027
2028 if (cause & CAUSEF_BD)
2029 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2030 else
2031 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2032
2033 }
2034
2035 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2036
2037 kvm_change_c0_guest_cause(cop0, (0xff),
2038 (T_COP_UNUSABLE << CAUSEB_EXCCODE));
2039 kvm_change_c0_guest_cause(cop0, (CAUSEF_CE), (0x1 << CAUSEB_CE));
2040
Deng-Cheng Zhud98403a2014-06-26 12:11:36 -07002041 return EMULATE_DONE;
Sanjay Lale685c682012-11-21 18:34:04 -08002042}
2043
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002044enum emulation_result kvm_mips_emulate_ri_exc(unsigned long cause,
2045 uint32_t *opc,
2046 struct kvm_run *run,
2047 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002048{
2049 struct mips_coproc *cop0 = vcpu->arch.cop0;
2050 struct kvm_vcpu_arch *arch = &vcpu->arch;
2051 enum emulation_result er = EMULATE_DONE;
2052
2053 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2054 /* save old pc */
2055 kvm_write_c0_guest_epc(cop0, arch->pc);
2056 kvm_set_c0_guest_status(cop0, ST0_EXL);
2057
2058 if (cause & CAUSEF_BD)
2059 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2060 else
2061 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2062
2063 kvm_debug("Delivering RI @ pc %#lx\n", arch->pc);
2064
2065 kvm_change_c0_guest_cause(cop0, (0xff),
2066 (T_RES_INST << CAUSEB_EXCCODE));
2067
2068 /* Set PC to the exception entry point */
2069 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2070
2071 } else {
2072 kvm_err("Trying to deliver RI when EXL is already set\n");
2073 er = EMULATE_FAIL;
2074 }
2075
2076 return er;
2077}
2078
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002079enum emulation_result kvm_mips_emulate_bp_exc(unsigned long cause,
2080 uint32_t *opc,
2081 struct kvm_run *run,
2082 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002083{
2084 struct mips_coproc *cop0 = vcpu->arch.cop0;
2085 struct kvm_vcpu_arch *arch = &vcpu->arch;
2086 enum emulation_result er = EMULATE_DONE;
2087
2088 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2089 /* save old pc */
2090 kvm_write_c0_guest_epc(cop0, arch->pc);
2091 kvm_set_c0_guest_status(cop0, ST0_EXL);
2092
2093 if (cause & CAUSEF_BD)
2094 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2095 else
2096 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2097
2098 kvm_debug("Delivering BP @ pc %#lx\n", arch->pc);
2099
2100 kvm_change_c0_guest_cause(cop0, (0xff),
2101 (T_BREAK << CAUSEB_EXCCODE));
2102
2103 /* Set PC to the exception entry point */
2104 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2105
2106 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002107 kvm_err("Trying to deliver BP when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08002108 er = EMULATE_FAIL;
2109 }
2110
2111 return er;
2112}
2113
James Hogan0a560422015-02-06 16:03:57 +00002114enum emulation_result kvm_mips_emulate_trap_exc(unsigned long cause,
2115 uint32_t *opc,
2116 struct kvm_run *run,
2117 struct kvm_vcpu *vcpu)
2118{
2119 struct mips_coproc *cop0 = vcpu->arch.cop0;
2120 struct kvm_vcpu_arch *arch = &vcpu->arch;
2121 enum emulation_result er = EMULATE_DONE;
2122
2123 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2124 /* save old pc */
2125 kvm_write_c0_guest_epc(cop0, arch->pc);
2126 kvm_set_c0_guest_status(cop0, ST0_EXL);
2127
2128 if (cause & CAUSEF_BD)
2129 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2130 else
2131 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2132
2133 kvm_debug("Delivering TRAP @ pc %#lx\n", arch->pc);
2134
2135 kvm_change_c0_guest_cause(cop0, (0xff),
2136 (T_TRAP << CAUSEB_EXCCODE));
2137
2138 /* Set PC to the exception entry point */
2139 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2140
2141 } else {
2142 kvm_err("Trying to deliver TRAP when EXL is already set\n");
2143 er = EMULATE_FAIL;
2144 }
2145
2146 return er;
2147}
2148
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002149/* ll/sc, rdhwr, sync emulation */
Sanjay Lale685c682012-11-21 18:34:04 -08002150
2151#define OPCODE 0xfc000000
2152#define BASE 0x03e00000
2153#define RT 0x001f0000
2154#define OFFSET 0x0000ffff
2155#define LL 0xc0000000
2156#define SC 0xe0000000
2157#define SPEC0 0x00000000
2158#define SPEC3 0x7c000000
2159#define RD 0x0000f800
2160#define FUNC 0x0000003f
2161#define SYNC 0x0000000f
2162#define RDHWR 0x0000003b
2163
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002164enum emulation_result kvm_mips_handle_ri(unsigned long cause, uint32_t *opc,
2165 struct kvm_run *run,
2166 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002167{
2168 struct mips_coproc *cop0 = vcpu->arch.cop0;
2169 struct kvm_vcpu_arch *arch = &vcpu->arch;
2170 enum emulation_result er = EMULATE_DONE;
2171 unsigned long curr_pc;
2172 uint32_t inst;
2173
2174 /*
2175 * Update PC and hold onto current PC in case there is
2176 * an error and we want to rollback the PC
2177 */
2178 curr_pc = vcpu->arch.pc;
2179 er = update_pc(vcpu, cause);
2180 if (er == EMULATE_FAIL)
2181 return er;
2182
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002183 /* Fetch the instruction. */
Sanjay Lale685c682012-11-21 18:34:04 -08002184 if (cause & CAUSEF_BD)
2185 opc += 1;
2186
2187 inst = kvm_get_inst(opc, vcpu);
2188
2189 if (inst == KVM_INVALID_INST) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002190 kvm_err("%s: Cannot get inst @ %p\n", __func__, opc);
Sanjay Lale685c682012-11-21 18:34:04 -08002191 return EMULATE_FAIL;
2192 }
2193
2194 if ((inst & OPCODE) == SPEC3 && (inst & FUNC) == RDHWR) {
James Hogan26f4f3b2014-03-14 13:06:09 +00002195 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002196 int rd = (inst & RD) >> 11;
2197 int rt = (inst & RT) >> 16;
James Hogan26f4f3b2014-03-14 13:06:09 +00002198 /* If usermode, check RDHWR rd is allowed by guest HWREna */
2199 if (usermode && !(kvm_read_c0_guest_hwrena(cop0) & BIT(rd))) {
2200 kvm_debug("RDHWR %#x disallowed by HWREna @ %p\n",
2201 rd, opc);
2202 goto emulate_ri;
2203 }
Sanjay Lale685c682012-11-21 18:34:04 -08002204 switch (rd) {
2205 case 0: /* CPU number */
2206 arch->gprs[rt] = 0;
2207 break;
2208 case 1: /* SYNCI length */
2209 arch->gprs[rt] = min(current_cpu_data.dcache.linesz,
2210 current_cpu_data.icache.linesz);
2211 break;
2212 case 2: /* Read count register */
James Hogane30492b2014-05-29 10:16:35 +01002213 arch->gprs[rt] = kvm_mips_read_count(vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002214 break;
2215 case 3: /* Count register resolution */
2216 switch (current_cpu_data.cputype) {
2217 case CPU_20KC:
2218 case CPU_25KF:
2219 arch->gprs[rt] = 1;
2220 break;
2221 default:
2222 arch->gprs[rt] = 2;
2223 }
2224 break;
2225 case 29:
Sanjay Lale685c682012-11-21 18:34:04 -08002226 arch->gprs[rt] = kvm_read_c0_guest_userlocal(cop0);
Sanjay Lale685c682012-11-21 18:34:04 -08002227 break;
2228
2229 default:
James Hogan15505672014-03-14 13:06:07 +00002230 kvm_debug("RDHWR %#x not supported @ %p\n", rd, opc);
James Hogan26f4f3b2014-03-14 13:06:09 +00002231 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002232 }
2233 } else {
James Hogan15505672014-03-14 13:06:07 +00002234 kvm_debug("Emulate RI not supported @ %p: %#x\n", opc, inst);
James Hogan26f4f3b2014-03-14 13:06:09 +00002235 goto emulate_ri;
Sanjay Lale685c682012-11-21 18:34:04 -08002236 }
2237
James Hogan26f4f3b2014-03-14 13:06:09 +00002238 return EMULATE_DONE;
2239
2240emulate_ri:
Sanjay Lale685c682012-11-21 18:34:04 -08002241 /*
James Hogan26f4f3b2014-03-14 13:06:09 +00002242 * Rollback PC (if in branch delay slot then the PC already points to
2243 * branch target), and pass the RI exception to the guest OS.
Sanjay Lale685c682012-11-21 18:34:04 -08002244 */
James Hogan26f4f3b2014-03-14 13:06:09 +00002245 vcpu->arch.pc = curr_pc;
2246 return kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
Sanjay Lale685c682012-11-21 18:34:04 -08002247}
2248
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002249enum emulation_result kvm_mips_complete_mmio_load(struct kvm_vcpu *vcpu,
2250 struct kvm_run *run)
Sanjay Lale685c682012-11-21 18:34:04 -08002251{
2252 unsigned long *gpr = &vcpu->arch.gprs[vcpu->arch.io_gpr];
2253 enum emulation_result er = EMULATE_DONE;
2254 unsigned long curr_pc;
2255
2256 if (run->mmio.len > sizeof(*gpr)) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002257 kvm_err("Bad MMIO length: %d", run->mmio.len);
Sanjay Lale685c682012-11-21 18:34:04 -08002258 er = EMULATE_FAIL;
2259 goto done;
2260 }
2261
2262 /*
2263 * Update PC and hold onto current PC in case there is
2264 * an error and we want to rollback the PC
2265 */
2266 curr_pc = vcpu->arch.pc;
2267 er = update_pc(vcpu, vcpu->arch.pending_load_cause);
2268 if (er == EMULATE_FAIL)
2269 return er;
2270
2271 switch (run->mmio.len) {
2272 case 4:
2273 *gpr = *(int32_t *) run->mmio.data;
2274 break;
2275
2276 case 2:
2277 if (vcpu->mmio_needed == 2)
2278 *gpr = *(int16_t *) run->mmio.data;
2279 else
2280 *gpr = *(int16_t *) run->mmio.data;
2281
2282 break;
2283 case 1:
2284 if (vcpu->mmio_needed == 2)
2285 *gpr = *(int8_t *) run->mmio.data;
2286 else
2287 *gpr = *(u8 *) run->mmio.data;
2288 break;
2289 }
2290
2291 if (vcpu->arch.pending_load_cause & CAUSEF_BD)
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002292 kvm_debug("[%#lx] Completing %d byte BD Load to gpr %d (0x%08lx) type %d\n",
2293 vcpu->arch.pc, run->mmio.len, vcpu->arch.io_gpr, *gpr,
2294 vcpu->mmio_needed);
Sanjay Lale685c682012-11-21 18:34:04 -08002295
2296done:
2297 return er;
2298}
2299
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002300static enum emulation_result kvm_mips_emulate_exc(unsigned long cause,
2301 uint32_t *opc,
2302 struct kvm_run *run,
2303 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002304{
2305 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2306 struct mips_coproc *cop0 = vcpu->arch.cop0;
2307 struct kvm_vcpu_arch *arch = &vcpu->arch;
2308 enum emulation_result er = EMULATE_DONE;
2309
2310 if ((kvm_read_c0_guest_status(cop0) & ST0_EXL) == 0) {
2311 /* save old pc */
2312 kvm_write_c0_guest_epc(cop0, arch->pc);
2313 kvm_set_c0_guest_status(cop0, ST0_EXL);
2314
2315 if (cause & CAUSEF_BD)
2316 kvm_set_c0_guest_cause(cop0, CAUSEF_BD);
2317 else
2318 kvm_clear_c0_guest_cause(cop0, CAUSEF_BD);
2319
2320 kvm_change_c0_guest_cause(cop0, (0xff),
2321 (exccode << CAUSEB_EXCCODE));
2322
2323 /* Set PC to the exception entry point */
2324 arch->pc = KVM_GUEST_KSEG0 + 0x180;
2325 kvm_write_c0_guest_badvaddr(cop0, vcpu->arch.host_cp0_badvaddr);
2326
2327 kvm_debug("Delivering EXC %d @ pc %#lx, badVaddr: %#lx\n",
2328 exccode, kvm_read_c0_guest_epc(cop0),
2329 kvm_read_c0_guest_badvaddr(cop0));
2330 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002331 kvm_err("Trying to deliver EXC when EXL is already set\n");
Sanjay Lale685c682012-11-21 18:34:04 -08002332 er = EMULATE_FAIL;
2333 }
2334
2335 return er;
2336}
2337
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002338enum emulation_result kvm_mips_check_privilege(unsigned long cause,
2339 uint32_t *opc,
2340 struct kvm_run *run,
2341 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002342{
2343 enum emulation_result er = EMULATE_DONE;
2344 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2345 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
2346
2347 int usermode = !KVM_GUEST_KERNEL_MODE(vcpu);
2348
2349 if (usermode) {
2350 switch (exccode) {
2351 case T_INT:
2352 case T_SYSCALL:
2353 case T_BREAK:
2354 case T_RES_INST:
James Hogan0a560422015-02-06 16:03:57 +00002355 case T_TRAP:
James Hogan98119ad2015-02-06 11:11:56 +00002356 case T_MSADIS:
Sanjay Lale685c682012-11-21 18:34:04 -08002357 break;
2358
2359 case T_COP_UNUSABLE:
2360 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 0)
2361 er = EMULATE_PRIV_FAIL;
2362 break;
2363
2364 case T_TLB_MOD:
2365 break;
2366
2367 case T_TLB_LD_MISS:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002368 /*
2369 * We we are accessing Guest kernel space, then send an
2370 * address error exception to the guest
2371 */
Sanjay Lale685c682012-11-21 18:34:04 -08002372 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002373 kvm_debug("%s: LD MISS @ %#lx\n", __func__,
2374 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002375 cause &= ~0xff;
2376 cause |= (T_ADDR_ERR_LD << CAUSEB_EXCCODE);
2377 er = EMULATE_PRIV_FAIL;
2378 }
2379 break;
2380
2381 case T_TLB_ST_MISS:
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002382 /*
2383 * We we are accessing Guest kernel space, then send an
2384 * address error exception to the guest
2385 */
Sanjay Lale685c682012-11-21 18:34:04 -08002386 if (badvaddr >= (unsigned long) KVM_GUEST_KSEG0) {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002387 kvm_debug("%s: ST MISS @ %#lx\n", __func__,
2388 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002389 cause &= ~0xff;
2390 cause |= (T_ADDR_ERR_ST << CAUSEB_EXCCODE);
2391 er = EMULATE_PRIV_FAIL;
2392 }
2393 break;
2394
2395 case T_ADDR_ERR_ST:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002396 kvm_debug("%s: address error ST @ %#lx\n", __func__,
2397 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002398 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2399 cause &= ~0xff;
2400 cause |= (T_TLB_ST_MISS << CAUSEB_EXCCODE);
2401 }
2402 er = EMULATE_PRIV_FAIL;
2403 break;
2404 case T_ADDR_ERR_LD:
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002405 kvm_debug("%s: address error LD @ %#lx\n", __func__,
2406 badvaddr);
Sanjay Lale685c682012-11-21 18:34:04 -08002407 if ((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR) {
2408 cause &= ~0xff;
2409 cause |= (T_TLB_LD_MISS << CAUSEB_EXCCODE);
2410 }
2411 er = EMULATE_PRIV_FAIL;
2412 break;
2413 default:
2414 er = EMULATE_PRIV_FAIL;
2415 break;
2416 }
2417 }
2418
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002419 if (er == EMULATE_PRIV_FAIL)
Sanjay Lale685c682012-11-21 18:34:04 -08002420 kvm_mips_emulate_exc(cause, opc, run, vcpu);
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002421
Sanjay Lale685c682012-11-21 18:34:04 -08002422 return er;
2423}
2424
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002425/*
2426 * User Address (UA) fault, this could happen if
Sanjay Lale685c682012-11-21 18:34:04 -08002427 * (1) TLB entry not present/valid in both Guest and shadow host TLBs, in this
2428 * case we pass on the fault to the guest kernel and let it handle it.
2429 * (2) TLB entry is present in the Guest TLB but not in the shadow, in this
2430 * case we inject the TLB from the Guest TLB into the shadow host TLB
2431 */
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002432enum emulation_result kvm_mips_handle_tlbmiss(unsigned long cause,
2433 uint32_t *opc,
2434 struct kvm_run *run,
2435 struct kvm_vcpu *vcpu)
Sanjay Lale685c682012-11-21 18:34:04 -08002436{
2437 enum emulation_result er = EMULATE_DONE;
2438 uint32_t exccode = (cause >> CAUSEB_EXCCODE) & 0x1f;
2439 unsigned long va = vcpu->arch.host_cp0_badvaddr;
2440 int index;
2441
2442 kvm_debug("kvm_mips_handle_tlbmiss: badvaddr: %#lx, entryhi: %#lx\n",
2443 vcpu->arch.host_cp0_badvaddr, vcpu->arch.host_cp0_entryhi);
2444
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002445 /*
2446 * KVM would not have got the exception if this entry was valid in the
2447 * shadow host TLB. Check the Guest TLB, if the entry is not there then
2448 * send the guest an exception. The guest exc handler should then inject
2449 * an entry into the guest TLB.
Sanjay Lale685c682012-11-21 18:34:04 -08002450 */
2451 index = kvm_mips_guest_tlb_lookup(vcpu,
2452 (va & VPN2_MASK) |
David Daney48c4ac92013-05-13 13:56:44 -07002453 (kvm_read_c0_guest_entryhi
2454 (vcpu->arch.cop0) & ASID_MASK));
Sanjay Lale685c682012-11-21 18:34:04 -08002455 if (index < 0) {
2456 if (exccode == T_TLB_LD_MISS) {
2457 er = kvm_mips_emulate_tlbmiss_ld(cause, opc, run, vcpu);
2458 } else if (exccode == T_TLB_ST_MISS) {
2459 er = kvm_mips_emulate_tlbmiss_st(cause, opc, run, vcpu);
2460 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002461 kvm_err("%s: invalid exc code: %d\n", __func__,
2462 exccode);
Sanjay Lale685c682012-11-21 18:34:04 -08002463 er = EMULATE_FAIL;
2464 }
2465 } else {
2466 struct kvm_mips_tlb *tlb = &vcpu->arch.guest_tlb[index];
2467
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002468 /*
2469 * Check if the entry is valid, if not then setup a TLB invalid
2470 * exception to the guest
2471 */
Sanjay Lale685c682012-11-21 18:34:04 -08002472 if (!TLB_IS_VALID(*tlb, va)) {
2473 if (exccode == T_TLB_LD_MISS) {
2474 er = kvm_mips_emulate_tlbinv_ld(cause, opc, run,
2475 vcpu);
2476 } else if (exccode == T_TLB_ST_MISS) {
2477 er = kvm_mips_emulate_tlbinv_st(cause, opc, run,
2478 vcpu);
2479 } else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -07002480 kvm_err("%s: invalid exc code: %d\n", __func__,
2481 exccode);
Sanjay Lale685c682012-11-21 18:34:04 -08002482 er = EMULATE_FAIL;
2483 }
2484 } else {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002485 kvm_debug("Injecting hi: %#lx, lo0: %#lx, lo1: %#lx into shadow host TLB\n",
2486 tlb->tlb_hi, tlb->tlb_lo0, tlb->tlb_lo1);
2487 /*
2488 * OK we have a Guest TLB entry, now inject it into the
2489 * shadow host TLB
2490 */
Sanjay Lale685c682012-11-21 18:34:04 -08002491 kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, NULL,
2492 NULL);
2493 }
2494 }
2495
2496 return er;
2497}