blob: cbc6850cff021fb5eb5f9c9d313b6fe0d8827df6 [file] [log] [blame]
James Hoganc992a4f2017-03-14 10:15:31 +00001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Support for hardware virtualization extensions
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Yann Le Du <ledu@kymasys.com>
10 */
11
12#include <linux/errno.h>
13#include <linux/err.h>
14#include <linux/module.h>
15#include <linux/preempt.h>
16#include <linux/vmalloc.h>
17#include <asm/cacheflush.h>
18#include <asm/cacheops.h>
19#include <asm/cmpxchg.h>
20#include <asm/fpu.h>
21#include <asm/hazards.h>
22#include <asm/inst.h>
23#include <asm/mmu_context.h>
24#include <asm/r4kcache.h>
25#include <asm/time.h>
26#include <asm/tlb.h>
27#include <asm/tlbex.h>
28
29#include <linux/kvm_host.h>
30
31#include "interrupt.h"
32
33#include "trace.h"
34
35/* Pointers to last VCPU loaded on each physical CPU */
36static struct kvm_vcpu *last_vcpu[NR_CPUS];
37/* Pointers to last VCPU executed on each physical CPU */
38static struct kvm_vcpu *last_exec_vcpu[NR_CPUS];
39
40/*
41 * Number of guest VTLB entries to use, so we can catch inconsistency between
42 * CPUs.
43 */
44static unsigned int kvm_vz_guest_vtlb_size;
45
46static inline long kvm_vz_read_gc0_ebase(void)
47{
48 if (sizeof(long) == 8 && cpu_has_ebase_wg)
49 return read_gc0_ebase_64();
50 else
51 return read_gc0_ebase();
52}
53
54static inline void kvm_vz_write_gc0_ebase(long v)
55{
56 /*
57 * First write with WG=1 to write upper bits, then write again in case
58 * WG should be left at 0.
59 * write_gc0_ebase_64() is no longer UNDEFINED since R6.
60 */
61 if (sizeof(long) == 8 &&
62 (cpu_has_mips64r6 || cpu_has_ebase_wg)) {
63 write_gc0_ebase_64(v | MIPS_EBASE_WG);
64 write_gc0_ebase_64(v);
65 } else {
66 write_gc0_ebase(v | MIPS_EBASE_WG);
67 write_gc0_ebase(v);
68 }
69}
70
71/*
72 * These Config bits may be writable by the guest:
73 * Config: [K23, KU] (!TLB), K0
74 * Config1: (none)
75 * Config2: [TU, SU] (impl)
76 * Config3: ISAOnExc
77 * Config4: FTLBPageSize
78 * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR
79 */
80
81static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu)
82{
83 return CONF_CM_CMASK;
84}
85
86static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu)
87{
88 return 0;
89}
90
91static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu)
92{
93 return 0;
94}
95
96static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu)
97{
98 return MIPS_CONF3_ISA_OE;
99}
100
101static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu)
102{
103 /* no need to be exact */
104 return MIPS_CONF4_VFTLBPAGESIZE;
105}
106
107static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu)
108{
109 unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI;
110
111 /* Permit MSAEn changes if MSA supported and enabled */
112 if (kvm_mips_guest_has_msa(&vcpu->arch))
113 mask |= MIPS_CONF5_MSAEN;
114
115 /*
116 * Permit guest FPU mode changes if FPU is enabled and the relevant
117 * feature exists according to FIR register.
118 */
119 if (kvm_mips_guest_has_fpu(&vcpu->arch)) {
120 if (cpu_has_ufr)
121 mask |= MIPS_CONF5_UFR;
122 if (cpu_has_fre)
123 mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE;
124 }
125
126 return mask;
127}
128
129/*
130 * VZ optionally allows these additional Config bits to be written by root:
131 * Config: M, [MT]
132 * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP
133 * Config2: M
James Hogandffe0422017-03-14 10:15:34 +0000134 * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC,
James Hoganc992a4f2017-03-14 10:15:31 +0000135 * VInt, SP, CDMM, MT, SM, TL]
136 * Config4: M, [VTLBSizeExt, MMUSizeExt]
James Hogand42a0082017-03-14 10:15:38 +0000137 * Config5: MRP
James Hoganc992a4f2017-03-14 10:15:31 +0000138 */
139
140static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu)
141{
142 return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M;
143}
144
145static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu)
146{
147 unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M;
148
149 /* Permit FPU to be present if FPU is supported */
150 if (kvm_mips_guest_can_have_fpu(&vcpu->arch))
151 mask |= MIPS_CONF1_FP;
152
153 return mask;
154}
155
156static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu)
157{
158 return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M;
159}
160
161static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu)
162{
163 unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M |
James Hogandffe0422017-03-14 10:15:34 +0000164 MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC;
James Hoganc992a4f2017-03-14 10:15:31 +0000165
166 /* Permit MSA to be present if MSA is supported */
167 if (kvm_mips_guest_can_have_msa(&vcpu->arch))
168 mask |= MIPS_CONF3_MSA;
169
170 return mask;
171}
172
173static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu)
174{
175 return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M;
176}
177
178static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu)
179{
James Hogand42a0082017-03-14 10:15:38 +0000180 return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP;
James Hoganc992a4f2017-03-14 10:15:31 +0000181}
182
183static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva)
184{
185 /* VZ guest has already converted gva to gpa */
186 return gva;
187}
188
189static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
190{
191 set_bit(priority, &vcpu->arch.pending_exceptions);
192 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
193}
194
195static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority)
196{
197 clear_bit(priority, &vcpu->arch.pending_exceptions);
198 set_bit(priority, &vcpu->arch.pending_exceptions_clr);
199}
200
201static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu)
202{
203 /*
204 * timer expiry is asynchronous to vcpu execution therefore defer guest
205 * cp0 accesses
206 */
207 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER);
208}
209
210static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu)
211{
212 /*
213 * timer expiry is asynchronous to vcpu execution therefore defer guest
214 * cp0 accesses
215 */
216 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER);
217}
218
219static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu,
220 struct kvm_mips_interrupt *irq)
221{
222 int intr = (int)irq->irq;
223
224 /*
225 * interrupts are asynchronous to vcpu execution therefore defer guest
226 * cp0 accesses
227 */
228 switch (intr) {
229 case 2:
230 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO);
231 break;
232
233 case 3:
234 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1);
235 break;
236
237 case 4:
238 kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2);
239 break;
240
241 default:
242 break;
243 }
244
245}
246
247static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu,
248 struct kvm_mips_interrupt *irq)
249{
250 int intr = (int)irq->irq;
251
252 /*
253 * interrupts are asynchronous to vcpu execution therefore defer guest
254 * cp0 accesses
255 */
256 switch (intr) {
257 case -2:
258 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO);
259 break;
260
261 case -3:
262 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1);
263 break;
264
265 case -4:
266 kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2);
267 break;
268
269 default:
270 break;
271 }
272
273}
274
275static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = {
276 [MIPS_EXC_INT_TIMER] = C_IRQ5,
277 [MIPS_EXC_INT_IO] = C_IRQ0,
278 [MIPS_EXC_INT_IPI_1] = C_IRQ1,
279 [MIPS_EXC_INT_IPI_2] = C_IRQ2,
280};
281
282static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority,
283 u32 cause)
284{
285 u32 irq = (priority < MIPS_EXC_MAX) ?
286 kvm_vz_priority_to_irq[priority] : 0;
287
288 switch (priority) {
289 case MIPS_EXC_INT_TIMER:
290 set_gc0_cause(C_TI);
291 break;
292
293 case MIPS_EXC_INT_IO:
294 case MIPS_EXC_INT_IPI_1:
295 case MIPS_EXC_INT_IPI_2:
296 if (cpu_has_guestctl2)
297 set_c0_guestctl2(irq);
298 else
299 set_gc0_cause(irq);
300 break;
301
302 default:
303 break;
304 }
305
306 clear_bit(priority, &vcpu->arch.pending_exceptions);
307 return 1;
308}
309
310static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority,
311 u32 cause)
312{
313 u32 irq = (priority < MIPS_EXC_MAX) ?
314 kvm_vz_priority_to_irq[priority] : 0;
315
316 switch (priority) {
317 case MIPS_EXC_INT_TIMER:
318 /*
319 * Call to kvm_write_c0_guest_compare() clears Cause.TI in
320 * kvm_mips_emulate_CP0(). Explicitly clear irq associated with
321 * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not
322 * supported or if not using GuestCtl2 Hardware Clear.
323 */
324 if (cpu_has_guestctl2) {
325 if (!(read_c0_guestctl2() & (irq << 14)))
326 clear_c0_guestctl2(irq);
327 } else {
328 clear_gc0_cause(irq);
329 }
330 break;
331
332 case MIPS_EXC_INT_IO:
333 case MIPS_EXC_INT_IPI_1:
334 case MIPS_EXC_INT_IPI_2:
335 /* Clear GuestCtl2.VIP irq if not using Hardware Clear */
336 if (cpu_has_guestctl2) {
337 if (!(read_c0_guestctl2() & (irq << 14)))
338 clear_c0_guestctl2(irq);
339 } else {
340 clear_gc0_cause(irq);
341 }
342 break;
343
344 default:
345 break;
346 }
347
348 clear_bit(priority, &vcpu->arch.pending_exceptions_clr);
349 return 1;
350}
351
352/*
353 * VZ guest timer handling.
354 */
355
356/**
357 * _kvm_vz_restore_stimer() - Restore soft timer state.
358 * @vcpu: Virtual CPU.
359 * @compare: CP0_Compare register value, restored by caller.
360 * @cause: CP0_Cause register to restore.
361 *
362 * Restore VZ state relating to the soft timer.
363 */
364static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare,
365 u32 cause)
366{
367 /*
368 * Avoid spurious counter interrupts by setting Guest CP0_Count to just
369 * after Guest CP0_Compare.
370 */
371 write_c0_gtoffset(compare - read_c0_count());
372
373 back_to_back_c0_hazard();
374 write_gc0_cause(cause);
375}
376
377/**
378 * kvm_vz_restore_timer() - Restore guest timer state.
379 * @vcpu: Virtual CPU.
380 *
381 * Restore soft timer state from saved context.
382 */
383static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu)
384{
385 struct mips_coproc *cop0 = vcpu->arch.cop0;
386 u32 cause, compare;
387
388 compare = kvm_read_sw_gc0_compare(cop0);
389 cause = kvm_read_sw_gc0_cause(cop0);
390
391 write_gc0_compare(compare);
392 _kvm_vz_restore_stimer(vcpu, compare, cause);
393}
394
395/**
396 * kvm_vz_save_timer() - Save guest timer state.
397 * @vcpu: Virtual CPU.
398 *
399 * Save VZ guest timer state.
400 */
401static void kvm_vz_save_timer(struct kvm_vcpu *vcpu)
402{
403 struct mips_coproc *cop0 = vcpu->arch.cop0;
404 u32 compare, cause;
405
406 compare = read_gc0_compare();
407 cause = read_gc0_cause();
408
409 /* save timer-related state to VCPU context */
410 kvm_write_sw_gc0_cause(cop0, cause);
411 kvm_write_sw_gc0_compare(cop0, compare);
412}
413
414/**
James Hogan4b7de022017-03-14 10:15:35 +0000415 * is_eva_access() - Find whether an instruction is an EVA memory accessor.
416 * @inst: 32-bit instruction encoding.
417 *
418 * Finds whether @inst encodes an EVA memory access instruction, which would
419 * indicate that emulation of it should access the user mode address space
420 * instead of the kernel mode address space. This matters for MUSUK segments
421 * which are TLB mapped for user mode but unmapped for kernel mode.
422 *
423 * Returns: Whether @inst encodes an EVA accessor instruction.
424 */
425static bool is_eva_access(union mips_instruction inst)
426{
427 if (inst.spec3_format.opcode != spec3_op)
428 return false;
429
430 switch (inst.spec3_format.func) {
431 case lwle_op:
432 case lwre_op:
433 case cachee_op:
434 case sbe_op:
435 case she_op:
436 case sce_op:
437 case swe_op:
438 case swle_op:
439 case swre_op:
440 case prefe_op:
441 case lbue_op:
442 case lhue_op:
443 case lbe_op:
444 case lhe_op:
445 case lle_op:
446 case lwe_op:
447 return true;
448 default:
449 return false;
450 }
451}
452
453/**
454 * is_eva_am_mapped() - Find whether an access mode is mapped.
455 * @vcpu: KVM VCPU state.
456 * @am: 3-bit encoded access mode.
457 * @eu: Segment becomes unmapped and uncached when Status.ERL=1.
458 *
459 * Decode @am to find whether it encodes a mapped segment for the current VCPU
460 * state. Where necessary @eu and the actual instruction causing the fault are
461 * taken into account to make the decision.
462 *
463 * Returns: Whether the VCPU faulted on a TLB mapped address.
464 */
465static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu)
466{
467 u32 am_lookup;
468 int err;
469
470 /*
471 * Interpret access control mode. We assume address errors will already
472 * have been caught by the guest, leaving us with:
473 * AM UM SM KM 31..24 23..16
474 * UK 0 000 Unm 0 0
475 * MK 1 001 TLB 1
476 * MSK 2 010 TLB TLB 1
477 * MUSK 3 011 TLB TLB TLB 1
478 * MUSUK 4 100 TLB TLB Unm 0 1
479 * USK 5 101 Unm Unm 0 0
480 * - 6 110 0 0
481 * UUSK 7 111 Unm Unm Unm 0 0
482 *
483 * We shift a magic value by AM across the sign bit to find if always
484 * TLB mapped, and if not shift by 8 again to find if it depends on KM.
485 */
486 am_lookup = 0x70080000 << am;
487 if ((s32)am_lookup < 0) {
488 /*
489 * MK, MSK, MUSK
490 * Always TLB mapped, unless SegCtl.EU && ERL
491 */
492 if (!eu || !(read_gc0_status() & ST0_ERL))
493 return true;
494 } else {
495 am_lookup <<= 8;
496 if ((s32)am_lookup < 0) {
497 union mips_instruction inst;
498 unsigned int status;
499 u32 *opc;
500
501 /*
502 * MUSUK
503 * TLB mapped if not in kernel mode
504 */
505 status = read_gc0_status();
506 if (!(status & (ST0_EXL | ST0_ERL)) &&
507 (status & ST0_KSU))
508 return true;
509 /*
510 * EVA access instructions in kernel
511 * mode access user address space.
512 */
513 opc = (u32 *)vcpu->arch.pc;
514 if (vcpu->arch.host_cp0_cause & CAUSEF_BD)
515 opc += 1;
516 err = kvm_get_badinstr(opc, vcpu, &inst.word);
517 if (!err && is_eva_access(inst))
518 return true;
519 }
520 }
521
522 return false;
523}
524
525/**
James Hoganc992a4f2017-03-14 10:15:31 +0000526 * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA.
527 * @vcpu: KVM VCPU state.
528 * @gva: Guest virtual address to convert.
529 * @gpa: Output guest physical address.
530 *
531 * Convert a guest virtual address (GVA) which is valid according to the guest
532 * context, to a guest physical address (GPA).
533 *
534 * Returns: 0 on success.
535 * -errno on failure.
536 */
537static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva,
538 unsigned long *gpa)
539{
540 u32 gva32 = gva;
James Hogan4b7de022017-03-14 10:15:35 +0000541 unsigned long segctl;
James Hoganc992a4f2017-03-14 10:15:31 +0000542
543 if ((long)gva == (s32)gva32) {
544 /* Handle canonical 32-bit virtual address */
James Hogan4b7de022017-03-14 10:15:35 +0000545 if (cpu_guest_has_segments) {
546 unsigned long mask, pa;
547
548 switch (gva32 >> 29) {
549 case 0:
550 case 1: /* CFG5 (1GB) */
551 segctl = read_gc0_segctl2() >> 16;
552 mask = (unsigned long)0xfc0000000ull;
553 break;
554 case 2:
555 case 3: /* CFG4 (1GB) */
556 segctl = read_gc0_segctl2();
557 mask = (unsigned long)0xfc0000000ull;
558 break;
559 case 4: /* CFG3 (512MB) */
560 segctl = read_gc0_segctl1() >> 16;
561 mask = (unsigned long)0xfe0000000ull;
562 break;
563 case 5: /* CFG2 (512MB) */
564 segctl = read_gc0_segctl1();
565 mask = (unsigned long)0xfe0000000ull;
566 break;
567 case 6: /* CFG1 (512MB) */
568 segctl = read_gc0_segctl0() >> 16;
569 mask = (unsigned long)0xfe0000000ull;
570 break;
571 case 7: /* CFG0 (512MB) */
572 segctl = read_gc0_segctl0();
573 mask = (unsigned long)0xfe0000000ull;
574 break;
575 default:
576 /*
577 * GCC 4.9 isn't smart enough to figure out that
578 * segctl and mask are always initialised.
579 */
580 unreachable();
581 }
582
583 if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7,
584 segctl & 0x0008))
585 goto tlb_mapped;
586
587 /* Unmapped, find guest physical address */
588 pa = (segctl << 20) & mask;
589 pa |= gva32 & ~mask;
590 *gpa = pa;
591 return 0;
592 } else if ((s32)gva32 < (s32)0xc0000000) {
James Hoganc992a4f2017-03-14 10:15:31 +0000593 /* legacy unmapped KSeg0 or KSeg1 */
594 *gpa = gva32 & 0x1fffffff;
595 return 0;
596 }
597#ifdef CONFIG_64BIT
598 } else if ((gva & 0xc000000000000000) == 0x8000000000000000) {
599 /* XKPHYS */
James Hogan4b7de022017-03-14 10:15:35 +0000600 if (cpu_guest_has_segments) {
601 /*
602 * Each of the 8 regions can be overridden by SegCtl2.XR
603 * to use SegCtl1.XAM.
604 */
605 segctl = read_gc0_segctl2();
606 if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) {
607 segctl = read_gc0_segctl1();
608 if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7,
609 0))
610 goto tlb_mapped;
611 }
612
613 }
James Hoganc992a4f2017-03-14 10:15:31 +0000614 /*
615 * Traditionally fully unmapped.
616 * Bits 61:59 specify the CCA, which we can just mask off here.
617 * Bits 58:PABITS should be zero, but we shouldn't have got here
618 * if it wasn't.
619 */
620 *gpa = gva & 0x07ffffffffffffff;
621 return 0;
622#endif
623 }
624
James Hogan4b7de022017-03-14 10:15:35 +0000625tlb_mapped:
James Hoganc992a4f2017-03-14 10:15:31 +0000626 return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa);
627}
628
629/**
630 * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA.
631 * @vcpu: KVM VCPU state.
632 * @badvaddr: Root BadVAddr.
633 * @gpa: Output guest physical address.
634 *
635 * VZ implementations are permitted to report guest virtual addresses (GVA) in
636 * BadVAddr on a root exception during guest execution, instead of the more
637 * convenient guest physical addresses (GPA). When we get a GVA, this function
638 * converts it to a GPA, taking into account guest segmentation and guest TLB
639 * state.
640 *
641 * Returns: 0 on success.
642 * -errno on failure.
643 */
644static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr,
645 unsigned long *gpa)
646{
647 unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 &
648 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
649
650 /* If BadVAddr is GPA, then all is well in the world */
651 if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) {
652 *gpa = badvaddr;
653 return 0;
654 }
655
656 /* Otherwise we'd expect it to be GVA ... */
657 if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA,
658 "Unexpected gexccode %#x\n", gexccode))
659 return -EINVAL;
660
661 /* ... and we need to perform the GVA->GPA translation in software */
662 return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa);
663}
664
665static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu)
666{
667 u32 *opc = (u32 *) vcpu->arch.pc;
668 u32 cause = vcpu->arch.host_cp0_cause;
669 u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE;
670 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
671 u32 inst = 0;
672
673 /*
674 * Fetch the instruction.
675 */
676 if (cause & CAUSEF_BD)
677 opc += 1;
678 kvm_get_badinstr(opc, vcpu, &inst);
679
680 kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n",
681 exccode, opc, inst, badvaddr,
682 read_gc0_status());
683 kvm_arch_vcpu_dump_regs(vcpu);
684 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
685 return RESUME_HOST;
686}
687
James Hogand42a0082017-03-14 10:15:38 +0000688static unsigned long mips_process_maar(unsigned int op, unsigned long val)
689{
690 /* Mask off unused bits */
691 unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL;
692
693 if (read_gc0_pagegrain() & PG_ELPA)
694 mask |= 0x00ffffff00000000ull;
695 if (cpu_guest_has_mvh)
696 mask |= MIPS_MAAR_VH;
697
698 /* Set or clear VH */
699 if (op == mtc_op) {
700 /* clear VH */
701 val &= ~MIPS_MAAR_VH;
702 } else if (op == dmtc_op) {
703 /* set VH to match VL */
704 val &= ~MIPS_MAAR_VH;
705 if (val & MIPS_MAAR_VL)
706 val |= MIPS_MAAR_VH;
707 }
708
709 return val & mask;
710}
711
712static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val)
713{
714 struct mips_coproc *cop0 = vcpu->arch.cop0;
715
716 val &= MIPS_MAARI_INDEX;
717 if (val == MIPS_MAARI_INDEX)
718 kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1);
719 else if (val < ARRAY_SIZE(vcpu->arch.maar))
720 kvm_write_sw_gc0_maari(cop0, val);
721}
722
James Hoganc992a4f2017-03-14 10:15:31 +0000723static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst,
724 u32 *opc, u32 cause,
725 struct kvm_run *run,
726 struct kvm_vcpu *vcpu)
727{
728 struct mips_coproc *cop0 = vcpu->arch.cop0;
729 enum emulation_result er = EMULATE_DONE;
730 u32 rt, rd, sel;
731 unsigned long curr_pc;
732 unsigned long val;
733
734 /*
735 * Update PC and hold onto current PC in case there is
736 * an error and we want to rollback the PC
737 */
738 curr_pc = vcpu->arch.pc;
739 er = update_pc(vcpu, cause);
740 if (er == EMULATE_FAIL)
741 return er;
742
743 if (inst.co_format.co) {
744 switch (inst.co_format.func) {
745 case wait_op:
746 er = kvm_mips_emul_wait(vcpu);
747 break;
748 default:
749 er = EMULATE_FAIL;
750 }
751 } else {
752 rt = inst.c0r_format.rt;
753 rd = inst.c0r_format.rd;
754 sel = inst.c0r_format.sel;
755
756 switch (inst.c0r_format.rs) {
757 case dmfc_op:
758 case mfc_op:
759#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
760 cop0->stat[rd][sel]++;
761#endif
762 if (rd == MIPS_CP0_COUNT &&
763 sel == 0) { /* Count */
764 val = kvm_mips_read_count(vcpu);
765 } else if (rd == MIPS_CP0_COMPARE &&
766 sel == 0) { /* Compare */
767 val = read_gc0_compare();
James Hogan273819a62017-03-14 10:15:37 +0000768 } else if (rd == MIPS_CP0_LLADDR &&
769 sel == 0) { /* LLAddr */
770 if (cpu_guest_has_rw_llb)
771 val = read_gc0_lladdr() &
772 MIPS_LLADDR_LLB;
773 else
774 val = 0;
James Hogand42a0082017-03-14 10:15:38 +0000775 } else if (rd == MIPS_CP0_LLADDR &&
776 sel == 1 && /* MAAR */
777 cpu_guest_has_maar &&
778 !cpu_guest_has_dyn_maar) {
779 /* MAARI must be in range */
780 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
781 ARRAY_SIZE(vcpu->arch.maar));
782 val = vcpu->arch.maar[
783 kvm_read_sw_gc0_maari(cop0)];
James Hoganc992a4f2017-03-14 10:15:31 +0000784 } else if ((rd == MIPS_CP0_PRID &&
785 (sel == 0 || /* PRid */
786 sel == 2 || /* CDMMBase */
787 sel == 3)) || /* CMGCRBase */
788 (rd == MIPS_CP0_STATUS &&
789 (sel == 2 || /* SRSCtl */
790 sel == 3)) || /* SRSMap */
791 (rd == MIPS_CP0_CONFIG &&
792 (sel == 7)) || /* Config7 */
James Hogand42a0082017-03-14 10:15:38 +0000793 (rd == MIPS_CP0_LLADDR &&
794 (sel == 2) && /* MAARI */
795 cpu_guest_has_maar &&
796 !cpu_guest_has_dyn_maar) ||
James Hoganc992a4f2017-03-14 10:15:31 +0000797 (rd == MIPS_CP0_ERRCTL &&
798 (sel == 0))) { /* ErrCtl */
799 val = cop0->reg[rd][sel];
800 } else {
801 val = 0;
802 er = EMULATE_FAIL;
803 }
804
805 if (er != EMULATE_FAIL) {
806 /* Sign extend */
807 if (inst.c0r_format.rs == mfc_op)
808 val = (int)val;
809 vcpu->arch.gprs[rt] = val;
810 }
811
812 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ?
813 KVM_TRACE_MFC0 : KVM_TRACE_DMFC0,
814 KVM_TRACE_COP0(rd, sel), val);
815 break;
816
817 case dmtc_op:
818 case mtc_op:
819#ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS
820 cop0->stat[rd][sel]++;
821#endif
822 val = vcpu->arch.gprs[rt];
823 trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ?
824 KVM_TRACE_MTC0 : KVM_TRACE_DMTC0,
825 KVM_TRACE_COP0(rd, sel), val);
826
827 if (rd == MIPS_CP0_COUNT &&
828 sel == 0) { /* Count */
829 kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]);
830 } else if (rd == MIPS_CP0_COMPARE &&
831 sel == 0) { /* Compare */
832 kvm_mips_write_compare(vcpu,
833 vcpu->arch.gprs[rt],
834 true);
James Hogan273819a62017-03-14 10:15:37 +0000835 } else if (rd == MIPS_CP0_LLADDR &&
836 sel == 0) { /* LLAddr */
837 /*
838 * P5600 generates GPSI on guest MTC0 LLAddr.
839 * Only allow the guest to clear LLB.
840 */
841 if (cpu_guest_has_rw_llb &&
842 !(val & MIPS_LLADDR_LLB))
843 write_gc0_lladdr(0);
James Hogand42a0082017-03-14 10:15:38 +0000844 } else if (rd == MIPS_CP0_LLADDR &&
845 sel == 1 && /* MAAR */
846 cpu_guest_has_maar &&
847 !cpu_guest_has_dyn_maar) {
848 val = mips_process_maar(inst.c0r_format.rs,
849 val);
850
851 /* MAARI must be in range */
852 BUG_ON(kvm_read_sw_gc0_maari(cop0) >=
853 ARRAY_SIZE(vcpu->arch.maar));
854 vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] =
855 val;
856 } else if (rd == MIPS_CP0_LLADDR &&
857 (sel == 2) && /* MAARI */
858 cpu_guest_has_maar &&
859 !cpu_guest_has_dyn_maar) {
860 kvm_write_maari(vcpu, val);
James Hoganc992a4f2017-03-14 10:15:31 +0000861 } else if (rd == MIPS_CP0_ERRCTL &&
862 (sel == 0)) { /* ErrCtl */
863 /* ignore the written value */
864 } else {
865 er = EMULATE_FAIL;
866 }
867 break;
868
869 default:
870 er = EMULATE_FAIL;
871 break;
872 }
873 }
874 /* Rollback PC only if emulation was unsuccessful */
875 if (er == EMULATE_FAIL) {
876 kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n",
877 curr_pc, __func__, inst.word);
878
879 vcpu->arch.pc = curr_pc;
880 }
881
882 return er;
883}
884
885static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst,
886 u32 *opc, u32 cause,
887 struct kvm_run *run,
888 struct kvm_vcpu *vcpu)
889{
890 enum emulation_result er = EMULATE_DONE;
891 u32 cache, op_inst, op, base;
892 s16 offset;
893 struct kvm_vcpu_arch *arch = &vcpu->arch;
894 unsigned long va, curr_pc;
895
896 /*
897 * Update PC and hold onto current PC in case there is
898 * an error and we want to rollback the PC
899 */
900 curr_pc = vcpu->arch.pc;
901 er = update_pc(vcpu, cause);
902 if (er == EMULATE_FAIL)
903 return er;
904
905 base = inst.i_format.rs;
906 op_inst = inst.i_format.rt;
907 if (cpu_has_mips_r6)
908 offset = inst.spec3_format.simmediate;
909 else
910 offset = inst.i_format.simmediate;
911 cache = op_inst & CacheOp_Cache;
912 op = op_inst & CacheOp_Op;
913
914 va = arch->gprs[base] + offset;
915
916 kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
917 cache, op, base, arch->gprs[base], offset);
918
919 /* Secondary or tirtiary cache ops ignored */
920 if (cache != Cache_I && cache != Cache_D)
921 return EMULATE_DONE;
922
923 switch (op_inst) {
924 case Index_Invalidate_I:
925 flush_icache_line_indexed(va);
926 return EMULATE_DONE;
927 case Index_Writeback_Inv_D:
928 flush_dcache_line_indexed(va);
929 return EMULATE_DONE;
930 default:
931 break;
932 };
933
934 kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n",
935 curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base],
936 offset);
937 /* Rollback PC */
938 vcpu->arch.pc = curr_pc;
939
940 return EMULATE_FAIL;
941}
942
943static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc,
944 struct kvm_vcpu *vcpu)
945{
946 enum emulation_result er = EMULATE_DONE;
947 struct kvm_vcpu_arch *arch = &vcpu->arch;
948 struct kvm_run *run = vcpu->run;
949 union mips_instruction inst;
950 int rd, rt, sel;
951 int err;
952
953 /*
954 * Fetch the instruction.
955 */
956 if (cause & CAUSEF_BD)
957 opc += 1;
958 err = kvm_get_badinstr(opc, vcpu, &inst.word);
959 if (err)
960 return EMULATE_FAIL;
961
962 switch (inst.r_format.opcode) {
963 case cop0_op:
964 er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu);
965 break;
966#ifndef CONFIG_CPU_MIPSR6
967 case cache_op:
968 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
969 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
970 break;
971#endif
972 case spec3_op:
973 switch (inst.spec3_format.func) {
974#ifdef CONFIG_CPU_MIPSR6
975 case cache6_op:
976 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE);
977 er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu);
978 break;
979#endif
980 case rdhwr_op:
981 if (inst.r_format.rs || (inst.r_format.re >> 3))
982 goto unknown;
983
984 rd = inst.r_format.rd;
985 rt = inst.r_format.rt;
986 sel = inst.r_format.re & 0x7;
987
988 switch (rd) {
989 case MIPS_HWR_CC: /* Read count register */
990 arch->gprs[rt] =
991 (long)(int)kvm_mips_read_count(vcpu);
992 break;
993 default:
994 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
995 KVM_TRACE_HWR(rd, sel), 0);
996 goto unknown;
997 };
998
999 trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR,
1000 KVM_TRACE_HWR(rd, sel), arch->gprs[rt]);
1001
1002 er = update_pc(vcpu, cause);
1003 break;
1004 default:
1005 goto unknown;
1006 };
1007 break;
1008unknown:
1009
1010 default:
1011 kvm_err("GPSI exception not supported (%p/%#x)\n",
1012 opc, inst.word);
1013 kvm_arch_vcpu_dump_regs(vcpu);
1014 er = EMULATE_FAIL;
1015 break;
1016 }
1017
1018 return er;
1019}
1020
1021static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc,
1022 struct kvm_vcpu *vcpu)
1023{
1024 enum emulation_result er = EMULATE_DONE;
1025 struct kvm_vcpu_arch *arch = &vcpu->arch;
1026 union mips_instruction inst;
1027 int err;
1028
1029 /*
1030 * Fetch the instruction.
1031 */
1032 if (cause & CAUSEF_BD)
1033 opc += 1;
1034 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1035 if (err)
1036 return EMULATE_FAIL;
1037
1038 /* complete MTC0 on behalf of guest and advance EPC */
1039 if (inst.c0r_format.opcode == cop0_op &&
1040 inst.c0r_format.rs == mtc_op &&
1041 inst.c0r_format.z == 0) {
1042 int rt = inst.c0r_format.rt;
1043 int rd = inst.c0r_format.rd;
1044 int sel = inst.c0r_format.sel;
1045 unsigned int val = arch->gprs[rt];
1046 unsigned int old_val, change;
1047
1048 trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel),
1049 val);
1050
1051 if ((rd == MIPS_CP0_STATUS) && (sel == 0)) {
1052 /* FR bit should read as zero if no FPU */
1053 if (!kvm_mips_guest_has_fpu(&vcpu->arch))
1054 val &= ~(ST0_CU1 | ST0_FR);
1055
1056 /*
1057 * Also don't allow FR to be set if host doesn't support
1058 * it.
1059 */
1060 if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64))
1061 val &= ~ST0_FR;
1062
1063 old_val = read_gc0_status();
1064 change = val ^ old_val;
1065
1066 if (change & ST0_FR) {
1067 /*
1068 * FPU and Vector register state is made
1069 * UNPREDICTABLE by a change of FR, so don't
1070 * even bother saving it.
1071 */
1072 kvm_drop_fpu(vcpu);
1073 }
1074
1075 /*
1076 * If MSA state is already live, it is undefined how it
1077 * interacts with FR=0 FPU state, and we don't want to
1078 * hit reserved instruction exceptions trying to save
1079 * the MSA state later when CU=1 && FR=1, so play it
1080 * safe and save it first.
1081 */
1082 if (change & ST0_CU1 && !(val & ST0_FR) &&
1083 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA)
1084 kvm_lose_fpu(vcpu);
1085
1086 write_gc0_status(val);
1087 } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) {
1088 u32 old_cause = read_gc0_cause();
1089 u32 change = old_cause ^ val;
1090
1091 /* DC bit enabling/disabling timer? */
1092 if (change & CAUSEF_DC) {
1093 if (val & CAUSEF_DC)
1094 kvm_mips_count_disable_cause(vcpu);
1095 else
1096 kvm_mips_count_enable_cause(vcpu);
1097 }
1098
1099 /* Only certain bits are RW to the guest */
1100 change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP |
1101 CAUSEF_IP0 | CAUSEF_IP1);
1102
1103 /* WP can only be cleared */
1104 change &= ~CAUSEF_WP | old_cause;
1105
1106 write_gc0_cause(old_cause ^ change);
1107 } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */
1108 write_gc0_intctl(val);
1109 } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) {
1110 old_val = read_gc0_config5();
1111 change = val ^ old_val;
1112 /* Handle changes in FPU/MSA modes */
1113 preempt_disable();
1114
1115 /*
1116 * Propagate FRE changes immediately if the FPU
1117 * context is already loaded.
1118 */
1119 if (change & MIPS_CONF5_FRE &&
1120 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)
1121 change_c0_config5(MIPS_CONF5_FRE, val);
1122
1123 preempt_enable();
1124
1125 val = old_val ^
1126 (change & kvm_vz_config5_guest_wrmask(vcpu));
1127 write_gc0_config5(val);
1128 } else {
1129 kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n",
1130 opc, inst.word);
1131 er = EMULATE_FAIL;
1132 }
1133
1134 if (er != EMULATE_FAIL)
1135 er = update_pc(vcpu, cause);
1136 } else {
1137 kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n",
1138 opc, inst.word);
1139 er = EMULATE_FAIL;
1140 }
1141
1142 return er;
1143}
1144
1145static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc,
1146 struct kvm_vcpu *vcpu)
1147{
1148 enum emulation_result er;
1149 union mips_instruction inst;
1150 unsigned long curr_pc;
1151 int err;
1152
1153 if (cause & CAUSEF_BD)
1154 opc += 1;
1155 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1156 if (err)
1157 return EMULATE_FAIL;
1158
1159 /*
1160 * Update PC and hold onto current PC in case there is
1161 * an error and we want to rollback the PC
1162 */
1163 curr_pc = vcpu->arch.pc;
1164 er = update_pc(vcpu, cause);
1165 if (er == EMULATE_FAIL)
1166 return er;
1167
1168 er = kvm_mips_emul_hypcall(vcpu, inst);
1169 if (er == EMULATE_FAIL)
1170 vcpu->arch.pc = curr_pc;
1171
1172 return er;
1173}
1174
1175static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode,
1176 u32 cause,
1177 u32 *opc,
1178 struct kvm_vcpu *vcpu)
1179{
1180 u32 inst;
1181
1182 /*
1183 * Fetch the instruction.
1184 */
1185 if (cause & CAUSEF_BD)
1186 opc += 1;
1187 kvm_get_badinstr(opc, vcpu, &inst);
1188
1189 kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n",
1190 gexccode, opc, inst, read_gc0_status());
1191
1192 return EMULATE_FAIL;
1193}
1194
1195static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu)
1196{
1197 u32 *opc = (u32 *) vcpu->arch.pc;
1198 u32 cause = vcpu->arch.host_cp0_cause;
1199 enum emulation_result er = EMULATE_DONE;
1200 u32 gexccode = (vcpu->arch.host_cp0_guestctl0 &
1201 MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT;
1202 int ret = RESUME_GUEST;
1203
1204 trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode);
1205 switch (gexccode) {
1206 case MIPS_GCTL0_GEXC_GPSI:
1207 ++vcpu->stat.vz_gpsi_exits;
1208 er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu);
1209 break;
1210 case MIPS_GCTL0_GEXC_GSFC:
1211 ++vcpu->stat.vz_gsfc_exits;
1212 er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu);
1213 break;
1214 case MIPS_GCTL0_GEXC_HC:
1215 ++vcpu->stat.vz_hc_exits;
1216 er = kvm_trap_vz_handle_hc(cause, opc, vcpu);
1217 break;
1218 case MIPS_GCTL0_GEXC_GRR:
1219 ++vcpu->stat.vz_grr_exits;
1220 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1221 vcpu);
1222 break;
1223 case MIPS_GCTL0_GEXC_GVA:
1224 ++vcpu->stat.vz_gva_exits;
1225 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1226 vcpu);
1227 break;
1228 case MIPS_GCTL0_GEXC_GHFC:
1229 ++vcpu->stat.vz_ghfc_exits;
1230 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1231 vcpu);
1232 break;
1233 case MIPS_GCTL0_GEXC_GPA:
1234 ++vcpu->stat.vz_gpa_exits;
1235 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1236 vcpu);
1237 break;
1238 default:
1239 ++vcpu->stat.vz_resvd_exits;
1240 er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc,
1241 vcpu);
1242 break;
1243
1244 }
1245
1246 if (er == EMULATE_DONE) {
1247 ret = RESUME_GUEST;
1248 } else if (er == EMULATE_HYPERCALL) {
1249 ret = kvm_mips_handle_hypcall(vcpu);
1250 } else {
1251 vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1252 ret = RESUME_HOST;
1253 }
1254 return ret;
1255}
1256
1257/**
1258 * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor.
1259 * @vcpu: Virtual CPU context.
1260 *
1261 * Handle when the guest attempts to use a coprocessor which hasn't been allowed
1262 * by the root context.
1263 */
1264static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu)
1265{
1266 struct kvm_run *run = vcpu->run;
1267 u32 cause = vcpu->arch.host_cp0_cause;
1268 enum emulation_result er = EMULATE_FAIL;
1269 int ret = RESUME_GUEST;
1270
1271 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
1272 /*
1273 * If guest FPU not present, the FPU operation should have been
1274 * treated as a reserved instruction!
1275 * If FPU already in use, we shouldn't get this at all.
1276 */
1277 if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) ||
1278 vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) {
1279 preempt_enable();
1280 return EMULATE_FAIL;
1281 }
1282
1283 kvm_own_fpu(vcpu);
1284 er = EMULATE_DONE;
1285 }
1286 /* other coprocessors not handled */
1287
1288 switch (er) {
1289 case EMULATE_DONE:
1290 ret = RESUME_GUEST;
1291 break;
1292
1293 case EMULATE_FAIL:
1294 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1295 ret = RESUME_HOST;
1296 break;
1297
1298 default:
1299 BUG();
1300 }
1301 return ret;
1302}
1303
1304/**
1305 * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root.
1306 * @vcpu: Virtual CPU context.
1307 *
1308 * Handle when the guest attempts to use MSA when it is disabled in the root
1309 * context.
1310 */
1311static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu)
1312{
1313 struct kvm_run *run = vcpu->run;
1314
1315 /*
1316 * If MSA not present or not exposed to guest or FR=0, the MSA operation
1317 * should have been treated as a reserved instruction!
1318 * Same if CU1=1, FR=0.
1319 * If MSA already in use, we shouldn't get this at all.
1320 */
1321 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
1322 (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 ||
1323 !(read_gc0_config5() & MIPS_CONF5_MSAEN) ||
1324 vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) {
1325 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1326 return RESUME_HOST;
1327 }
1328
1329 kvm_own_msa(vcpu);
1330
1331 return RESUME_GUEST;
1332}
1333
1334static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
1335{
1336 struct kvm_run *run = vcpu->run;
1337 u32 *opc = (u32 *) vcpu->arch.pc;
1338 u32 cause = vcpu->arch.host_cp0_cause;
1339 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1340 union mips_instruction inst;
1341 enum emulation_result er = EMULATE_DONE;
1342 int err, ret = RESUME_GUEST;
1343
1344 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) {
1345 /* A code fetch fault doesn't count as an MMIO */
1346 if (kvm_is_ifetch_fault(&vcpu->arch)) {
1347 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1348 return RESUME_HOST;
1349 }
1350
1351 /* Fetch the instruction */
1352 if (cause & CAUSEF_BD)
1353 opc += 1;
1354 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1355 if (err) {
1356 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1357 return RESUME_HOST;
1358 }
1359
1360 /* Treat as MMIO */
1361 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
1362 if (er == EMULATE_FAIL) {
1363 kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1364 opc, badvaddr);
1365 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1366 }
1367 }
1368
1369 if (er == EMULATE_DONE) {
1370 ret = RESUME_GUEST;
1371 } else if (er == EMULATE_DO_MMIO) {
1372 run->exit_reason = KVM_EXIT_MMIO;
1373 ret = RESUME_HOST;
1374 } else {
1375 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1376 ret = RESUME_HOST;
1377 }
1378 return ret;
1379}
1380
1381static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
1382{
1383 struct kvm_run *run = vcpu->run;
1384 u32 *opc = (u32 *) vcpu->arch.pc;
1385 u32 cause = vcpu->arch.host_cp0_cause;
1386 ulong badvaddr = vcpu->arch.host_cp0_badvaddr;
1387 union mips_instruction inst;
1388 enum emulation_result er = EMULATE_DONE;
1389 int err;
1390 int ret = RESUME_GUEST;
1391
1392 /* Just try the access again if we couldn't do the translation */
1393 if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr))
1394 return RESUME_GUEST;
1395 vcpu->arch.host_cp0_badvaddr = badvaddr;
1396
1397 if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) {
1398 /* Fetch the instruction */
1399 if (cause & CAUSEF_BD)
1400 opc += 1;
1401 err = kvm_get_badinstr(opc, vcpu, &inst.word);
1402 if (err) {
1403 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1404 return RESUME_HOST;
1405 }
1406
1407 /* Treat as MMIO */
1408 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
1409 if (er == EMULATE_FAIL) {
1410 kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n",
1411 opc, badvaddr);
1412 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1413 }
1414 }
1415
1416 if (er == EMULATE_DONE) {
1417 ret = RESUME_GUEST;
1418 } else if (er == EMULATE_DO_MMIO) {
1419 run->exit_reason = KVM_EXIT_MMIO;
1420 ret = RESUME_HOST;
1421 } else {
1422 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
1423 ret = RESUME_HOST;
1424 }
1425 return ret;
1426}
1427
1428static u64 kvm_vz_get_one_regs[] = {
1429 KVM_REG_MIPS_CP0_INDEX,
1430 KVM_REG_MIPS_CP0_ENTRYLO0,
1431 KVM_REG_MIPS_CP0_ENTRYLO1,
1432 KVM_REG_MIPS_CP0_CONTEXT,
1433 KVM_REG_MIPS_CP0_PAGEMASK,
1434 KVM_REG_MIPS_CP0_PAGEGRAIN,
1435 KVM_REG_MIPS_CP0_WIRED,
1436 KVM_REG_MIPS_CP0_HWRENA,
1437 KVM_REG_MIPS_CP0_BADVADDR,
1438 KVM_REG_MIPS_CP0_COUNT,
1439 KVM_REG_MIPS_CP0_ENTRYHI,
1440 KVM_REG_MIPS_CP0_COMPARE,
1441 KVM_REG_MIPS_CP0_STATUS,
1442 KVM_REG_MIPS_CP0_INTCTL,
1443 KVM_REG_MIPS_CP0_CAUSE,
1444 KVM_REG_MIPS_CP0_EPC,
1445 KVM_REG_MIPS_CP0_PRID,
1446 KVM_REG_MIPS_CP0_EBASE,
1447 KVM_REG_MIPS_CP0_CONFIG,
1448 KVM_REG_MIPS_CP0_CONFIG1,
1449 KVM_REG_MIPS_CP0_CONFIG2,
1450 KVM_REG_MIPS_CP0_CONFIG3,
1451 KVM_REG_MIPS_CP0_CONFIG4,
1452 KVM_REG_MIPS_CP0_CONFIG5,
1453#ifdef CONFIG_64BIT
1454 KVM_REG_MIPS_CP0_XCONTEXT,
1455#endif
1456 KVM_REG_MIPS_CP0_ERROREPC,
1457
1458 KVM_REG_MIPS_COUNT_CTL,
1459 KVM_REG_MIPS_COUNT_RESUME,
1460 KVM_REG_MIPS_COUNT_HZ,
1461};
1462
James Hogandffe0422017-03-14 10:15:34 +00001463static u64 kvm_vz_get_one_regs_contextconfig[] = {
1464 KVM_REG_MIPS_CP0_CONTEXTCONFIG,
1465#ifdef CONFIG_64BIT
1466 KVM_REG_MIPS_CP0_XCONTEXTCONFIG,
1467#endif
1468};
1469
James Hogan4b7de022017-03-14 10:15:35 +00001470static u64 kvm_vz_get_one_regs_segments[] = {
1471 KVM_REG_MIPS_CP0_SEGCTL0,
1472 KVM_REG_MIPS_CP0_SEGCTL1,
1473 KVM_REG_MIPS_CP0_SEGCTL2,
1474};
1475
James Hogan5a2f3522017-03-14 10:15:36 +00001476static u64 kvm_vz_get_one_regs_htw[] = {
1477 KVM_REG_MIPS_CP0_PWBASE,
1478 KVM_REG_MIPS_CP0_PWFIELD,
1479 KVM_REG_MIPS_CP0_PWSIZE,
1480 KVM_REG_MIPS_CP0_PWCTL,
1481};
1482
James Hoganc992a4f2017-03-14 10:15:31 +00001483static u64 kvm_vz_get_one_regs_kscratch[] = {
1484 KVM_REG_MIPS_CP0_KSCRATCH1,
1485 KVM_REG_MIPS_CP0_KSCRATCH2,
1486 KVM_REG_MIPS_CP0_KSCRATCH3,
1487 KVM_REG_MIPS_CP0_KSCRATCH4,
1488 KVM_REG_MIPS_CP0_KSCRATCH5,
1489 KVM_REG_MIPS_CP0_KSCRATCH6,
1490};
1491
1492static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu)
1493{
1494 unsigned long ret;
1495
1496 ret = ARRAY_SIZE(kvm_vz_get_one_regs);
1497 if (cpu_guest_has_userlocal)
1498 ++ret;
James Hoganedc89262017-03-14 10:15:33 +00001499 if (cpu_guest_has_badinstr)
1500 ++ret;
1501 if (cpu_guest_has_badinstrp)
1502 ++ret;
James Hogandffe0422017-03-14 10:15:34 +00001503 if (cpu_guest_has_contextconfig)
1504 ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
James Hogan4b7de022017-03-14 10:15:35 +00001505 if (cpu_guest_has_segments)
1506 ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
James Hogan5a2f3522017-03-14 10:15:36 +00001507 if (cpu_guest_has_htw)
1508 ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
James Hogand42a0082017-03-14 10:15:38 +00001509 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar)
1510 ret += 1 + ARRAY_SIZE(vcpu->arch.maar);
James Hoganc992a4f2017-03-14 10:15:31 +00001511 ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask);
1512
1513 return ret;
1514}
1515
1516static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices)
1517{
1518 u64 index;
1519 unsigned int i;
1520
1521 if (copy_to_user(indices, kvm_vz_get_one_regs,
1522 sizeof(kvm_vz_get_one_regs)))
1523 return -EFAULT;
1524 indices += ARRAY_SIZE(kvm_vz_get_one_regs);
1525
1526 if (cpu_guest_has_userlocal) {
1527 index = KVM_REG_MIPS_CP0_USERLOCAL;
1528 if (copy_to_user(indices, &index, sizeof(index)))
1529 return -EFAULT;
1530 ++indices;
1531 }
James Hoganedc89262017-03-14 10:15:33 +00001532 if (cpu_guest_has_badinstr) {
1533 index = KVM_REG_MIPS_CP0_BADINSTR;
1534 if (copy_to_user(indices, &index, sizeof(index)))
1535 return -EFAULT;
1536 ++indices;
1537 }
1538 if (cpu_guest_has_badinstrp) {
1539 index = KVM_REG_MIPS_CP0_BADINSTRP;
1540 if (copy_to_user(indices, &index, sizeof(index)))
1541 return -EFAULT;
1542 ++indices;
1543 }
James Hogandffe0422017-03-14 10:15:34 +00001544 if (cpu_guest_has_contextconfig) {
1545 if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig,
1546 sizeof(kvm_vz_get_one_regs_contextconfig)))
1547 return -EFAULT;
1548 indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig);
1549 }
James Hogan4b7de022017-03-14 10:15:35 +00001550 if (cpu_guest_has_segments) {
1551 if (copy_to_user(indices, kvm_vz_get_one_regs_segments,
1552 sizeof(kvm_vz_get_one_regs_segments)))
1553 return -EFAULT;
1554 indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments);
1555 }
James Hogan5a2f3522017-03-14 10:15:36 +00001556 if (cpu_guest_has_htw) {
1557 if (copy_to_user(indices, kvm_vz_get_one_regs_htw,
1558 sizeof(kvm_vz_get_one_regs_htw)))
1559 return -EFAULT;
1560 indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw);
1561 }
James Hogand42a0082017-03-14 10:15:38 +00001562 if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) {
1563 for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) {
1564 index = KVM_REG_MIPS_CP0_MAAR(i);
1565 if (copy_to_user(indices, &index, sizeof(index)))
1566 return -EFAULT;
1567 ++indices;
1568 }
1569
1570 index = KVM_REG_MIPS_CP0_MAARI;
1571 if (copy_to_user(indices, &index, sizeof(index)))
1572 return -EFAULT;
1573 ++indices;
1574 }
James Hoganc992a4f2017-03-14 10:15:31 +00001575 for (i = 0; i < 6; ++i) {
1576 if (!cpu_guest_has_kscr(i + 2))
1577 continue;
1578
1579 if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i],
1580 sizeof(kvm_vz_get_one_regs_kscratch[i])))
1581 return -EFAULT;
1582 ++indices;
1583 }
1584
1585 return 0;
1586}
1587
1588static inline s64 entrylo_kvm_to_user(unsigned long v)
1589{
1590 s64 mask, ret = v;
1591
1592 if (BITS_PER_LONG == 32) {
1593 /*
1594 * KVM API exposes 64-bit version of the register, so move the
1595 * RI/XI bits up into place.
1596 */
1597 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1598 ret &= ~mask;
1599 ret |= ((s64)v & mask) << 32;
1600 }
1601 return ret;
1602}
1603
1604static inline unsigned long entrylo_user_to_kvm(s64 v)
1605{
1606 unsigned long mask, ret = v;
1607
1608 if (BITS_PER_LONG == 32) {
1609 /*
1610 * KVM API exposes 64-bit versiono of the register, so move the
1611 * RI/XI bits down into place.
1612 */
1613 mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI;
1614 ret &= ~mask;
1615 ret |= (v >> 32) & mask;
1616 }
1617 return ret;
1618}
1619
1620static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu,
1621 const struct kvm_one_reg *reg,
1622 s64 *v)
1623{
1624 struct mips_coproc *cop0 = vcpu->arch.cop0;
1625 unsigned int idx;
1626
1627 switch (reg->id) {
1628 case KVM_REG_MIPS_CP0_INDEX:
1629 *v = (long)read_gc0_index();
1630 break;
1631 case KVM_REG_MIPS_CP0_ENTRYLO0:
1632 *v = entrylo_kvm_to_user(read_gc0_entrylo0());
1633 break;
1634 case KVM_REG_MIPS_CP0_ENTRYLO1:
1635 *v = entrylo_kvm_to_user(read_gc0_entrylo1());
1636 break;
1637 case KVM_REG_MIPS_CP0_CONTEXT:
1638 *v = (long)read_gc0_context();
1639 break;
James Hogandffe0422017-03-14 10:15:34 +00001640 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1641 if (!cpu_guest_has_contextconfig)
1642 return -EINVAL;
1643 *v = read_gc0_contextconfig();
1644 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001645 case KVM_REG_MIPS_CP0_USERLOCAL:
1646 if (!cpu_guest_has_userlocal)
1647 return -EINVAL;
1648 *v = read_gc0_userlocal();
1649 break;
James Hogandffe0422017-03-14 10:15:34 +00001650#ifdef CONFIG_64BIT
1651 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1652 if (!cpu_guest_has_contextconfig)
1653 return -EINVAL;
1654 *v = read_gc0_xcontextconfig();
1655 break;
1656#endif
James Hoganc992a4f2017-03-14 10:15:31 +00001657 case KVM_REG_MIPS_CP0_PAGEMASK:
1658 *v = (long)read_gc0_pagemask();
1659 break;
1660 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1661 *v = (long)read_gc0_pagegrain();
1662 break;
James Hogan4b7de022017-03-14 10:15:35 +00001663 case KVM_REG_MIPS_CP0_SEGCTL0:
1664 if (!cpu_guest_has_segments)
1665 return -EINVAL;
1666 *v = read_gc0_segctl0();
1667 break;
1668 case KVM_REG_MIPS_CP0_SEGCTL1:
1669 if (!cpu_guest_has_segments)
1670 return -EINVAL;
1671 *v = read_gc0_segctl1();
1672 break;
1673 case KVM_REG_MIPS_CP0_SEGCTL2:
1674 if (!cpu_guest_has_segments)
1675 return -EINVAL;
1676 *v = read_gc0_segctl2();
1677 break;
James Hogan5a2f3522017-03-14 10:15:36 +00001678 case KVM_REG_MIPS_CP0_PWBASE:
1679 if (!cpu_guest_has_htw)
1680 return -EINVAL;
1681 *v = read_gc0_pwbase();
1682 break;
1683 case KVM_REG_MIPS_CP0_PWFIELD:
1684 if (!cpu_guest_has_htw)
1685 return -EINVAL;
1686 *v = read_gc0_pwfield();
1687 break;
1688 case KVM_REG_MIPS_CP0_PWSIZE:
1689 if (!cpu_guest_has_htw)
1690 return -EINVAL;
1691 *v = read_gc0_pwsize();
1692 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001693 case KVM_REG_MIPS_CP0_WIRED:
1694 *v = (long)read_gc0_wired();
1695 break;
James Hogan5a2f3522017-03-14 10:15:36 +00001696 case KVM_REG_MIPS_CP0_PWCTL:
1697 if (!cpu_guest_has_htw)
1698 return -EINVAL;
1699 *v = read_gc0_pwctl();
1700 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001701 case KVM_REG_MIPS_CP0_HWRENA:
1702 *v = (long)read_gc0_hwrena();
1703 break;
1704 case KVM_REG_MIPS_CP0_BADVADDR:
1705 *v = (long)read_gc0_badvaddr();
1706 break;
James Hoganedc89262017-03-14 10:15:33 +00001707 case KVM_REG_MIPS_CP0_BADINSTR:
1708 if (!cpu_guest_has_badinstr)
1709 return -EINVAL;
1710 *v = read_gc0_badinstr();
1711 break;
1712 case KVM_REG_MIPS_CP0_BADINSTRP:
1713 if (!cpu_guest_has_badinstrp)
1714 return -EINVAL;
1715 *v = read_gc0_badinstrp();
1716 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001717 case KVM_REG_MIPS_CP0_COUNT:
1718 *v = kvm_mips_read_count(vcpu);
1719 break;
1720 case KVM_REG_MIPS_CP0_ENTRYHI:
1721 *v = (long)read_gc0_entryhi();
1722 break;
1723 case KVM_REG_MIPS_CP0_COMPARE:
1724 *v = (long)read_gc0_compare();
1725 break;
1726 case KVM_REG_MIPS_CP0_STATUS:
1727 *v = (long)read_gc0_status();
1728 break;
1729 case KVM_REG_MIPS_CP0_INTCTL:
1730 *v = read_gc0_intctl();
1731 break;
1732 case KVM_REG_MIPS_CP0_CAUSE:
1733 *v = (long)read_gc0_cause();
1734 break;
1735 case KVM_REG_MIPS_CP0_EPC:
1736 *v = (long)read_gc0_epc();
1737 break;
1738 case KVM_REG_MIPS_CP0_PRID:
1739 *v = (long)kvm_read_c0_guest_prid(cop0);
1740 break;
1741 case KVM_REG_MIPS_CP0_EBASE:
1742 *v = kvm_vz_read_gc0_ebase();
1743 break;
1744 case KVM_REG_MIPS_CP0_CONFIG:
1745 *v = read_gc0_config();
1746 break;
1747 case KVM_REG_MIPS_CP0_CONFIG1:
1748 if (!cpu_guest_has_conf1)
1749 return -EINVAL;
1750 *v = read_gc0_config1();
1751 break;
1752 case KVM_REG_MIPS_CP0_CONFIG2:
1753 if (!cpu_guest_has_conf2)
1754 return -EINVAL;
1755 *v = read_gc0_config2();
1756 break;
1757 case KVM_REG_MIPS_CP0_CONFIG3:
1758 if (!cpu_guest_has_conf3)
1759 return -EINVAL;
1760 *v = read_gc0_config3();
1761 break;
1762 case KVM_REG_MIPS_CP0_CONFIG4:
1763 if (!cpu_guest_has_conf4)
1764 return -EINVAL;
1765 *v = read_gc0_config4();
1766 break;
1767 case KVM_REG_MIPS_CP0_CONFIG5:
1768 if (!cpu_guest_has_conf5)
1769 return -EINVAL;
1770 *v = read_gc0_config5();
1771 break;
James Hogand42a0082017-03-14 10:15:38 +00001772 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
1773 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
1774 return -EINVAL;
1775 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
1776 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
1777 return -EINVAL;
1778 *v = vcpu->arch.maar[idx];
1779 break;
1780 case KVM_REG_MIPS_CP0_MAARI:
1781 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
1782 return -EINVAL;
1783 *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0);
1784 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001785#ifdef CONFIG_64BIT
1786 case KVM_REG_MIPS_CP0_XCONTEXT:
1787 *v = read_gc0_xcontext();
1788 break;
1789#endif
1790 case KVM_REG_MIPS_CP0_ERROREPC:
1791 *v = (long)read_gc0_errorepc();
1792 break;
1793 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
1794 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
1795 if (!cpu_guest_has_kscr(idx))
1796 return -EINVAL;
1797 switch (idx) {
1798 case 2:
1799 *v = (long)read_gc0_kscratch1();
1800 break;
1801 case 3:
1802 *v = (long)read_gc0_kscratch2();
1803 break;
1804 case 4:
1805 *v = (long)read_gc0_kscratch3();
1806 break;
1807 case 5:
1808 *v = (long)read_gc0_kscratch4();
1809 break;
1810 case 6:
1811 *v = (long)read_gc0_kscratch5();
1812 break;
1813 case 7:
1814 *v = (long)read_gc0_kscratch6();
1815 break;
1816 }
1817 break;
1818 case KVM_REG_MIPS_COUNT_CTL:
1819 *v = vcpu->arch.count_ctl;
1820 break;
1821 case KVM_REG_MIPS_COUNT_RESUME:
1822 *v = ktime_to_ns(vcpu->arch.count_resume);
1823 break;
1824 case KVM_REG_MIPS_COUNT_HZ:
1825 *v = vcpu->arch.count_hz;
1826 break;
1827 default:
1828 return -EINVAL;
1829 }
1830 return 0;
1831}
1832
1833static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu,
1834 const struct kvm_one_reg *reg,
1835 s64 v)
1836{
1837 struct mips_coproc *cop0 = vcpu->arch.cop0;
1838 unsigned int idx;
1839 int ret = 0;
1840 unsigned int cur, change;
1841
1842 switch (reg->id) {
1843 case KVM_REG_MIPS_CP0_INDEX:
1844 write_gc0_index(v);
1845 break;
1846 case KVM_REG_MIPS_CP0_ENTRYLO0:
1847 write_gc0_entrylo0(entrylo_user_to_kvm(v));
1848 break;
1849 case KVM_REG_MIPS_CP0_ENTRYLO1:
1850 write_gc0_entrylo1(entrylo_user_to_kvm(v));
1851 break;
1852 case KVM_REG_MIPS_CP0_CONTEXT:
1853 write_gc0_context(v);
1854 break;
James Hogandffe0422017-03-14 10:15:34 +00001855 case KVM_REG_MIPS_CP0_CONTEXTCONFIG:
1856 if (!cpu_guest_has_contextconfig)
1857 return -EINVAL;
1858 write_gc0_contextconfig(v);
1859 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001860 case KVM_REG_MIPS_CP0_USERLOCAL:
1861 if (!cpu_guest_has_userlocal)
1862 return -EINVAL;
1863 write_gc0_userlocal(v);
1864 break;
James Hogandffe0422017-03-14 10:15:34 +00001865#ifdef CONFIG_64BIT
1866 case KVM_REG_MIPS_CP0_XCONTEXTCONFIG:
1867 if (!cpu_guest_has_contextconfig)
1868 return -EINVAL;
1869 write_gc0_xcontextconfig(v);
1870 break;
1871#endif
James Hoganc992a4f2017-03-14 10:15:31 +00001872 case KVM_REG_MIPS_CP0_PAGEMASK:
1873 write_gc0_pagemask(v);
1874 break;
1875 case KVM_REG_MIPS_CP0_PAGEGRAIN:
1876 write_gc0_pagegrain(v);
1877 break;
James Hogan4b7de022017-03-14 10:15:35 +00001878 case KVM_REG_MIPS_CP0_SEGCTL0:
1879 if (!cpu_guest_has_segments)
1880 return -EINVAL;
1881 write_gc0_segctl0(v);
1882 break;
1883 case KVM_REG_MIPS_CP0_SEGCTL1:
1884 if (!cpu_guest_has_segments)
1885 return -EINVAL;
1886 write_gc0_segctl1(v);
1887 break;
1888 case KVM_REG_MIPS_CP0_SEGCTL2:
1889 if (!cpu_guest_has_segments)
1890 return -EINVAL;
1891 write_gc0_segctl2(v);
1892 break;
James Hogan5a2f3522017-03-14 10:15:36 +00001893 case KVM_REG_MIPS_CP0_PWBASE:
1894 if (!cpu_guest_has_htw)
1895 return -EINVAL;
1896 write_gc0_pwbase(v);
1897 break;
1898 case KVM_REG_MIPS_CP0_PWFIELD:
1899 if (!cpu_guest_has_htw)
1900 return -EINVAL;
1901 write_gc0_pwfield(v);
1902 break;
1903 case KVM_REG_MIPS_CP0_PWSIZE:
1904 if (!cpu_guest_has_htw)
1905 return -EINVAL;
1906 write_gc0_pwsize(v);
1907 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001908 case KVM_REG_MIPS_CP0_WIRED:
1909 change_gc0_wired(MIPSR6_WIRED_WIRED, v);
1910 break;
James Hogan5a2f3522017-03-14 10:15:36 +00001911 case KVM_REG_MIPS_CP0_PWCTL:
1912 if (!cpu_guest_has_htw)
1913 return -EINVAL;
1914 write_gc0_pwctl(v);
1915 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001916 case KVM_REG_MIPS_CP0_HWRENA:
1917 write_gc0_hwrena(v);
1918 break;
1919 case KVM_REG_MIPS_CP0_BADVADDR:
1920 write_gc0_badvaddr(v);
1921 break;
James Hoganedc89262017-03-14 10:15:33 +00001922 case KVM_REG_MIPS_CP0_BADINSTR:
1923 if (!cpu_guest_has_badinstr)
1924 return -EINVAL;
1925 write_gc0_badinstr(v);
1926 break;
1927 case KVM_REG_MIPS_CP0_BADINSTRP:
1928 if (!cpu_guest_has_badinstrp)
1929 return -EINVAL;
1930 write_gc0_badinstrp(v);
1931 break;
James Hoganc992a4f2017-03-14 10:15:31 +00001932 case KVM_REG_MIPS_CP0_COUNT:
1933 kvm_mips_write_count(vcpu, v);
1934 break;
1935 case KVM_REG_MIPS_CP0_ENTRYHI:
1936 write_gc0_entryhi(v);
1937 break;
1938 case KVM_REG_MIPS_CP0_COMPARE:
1939 kvm_mips_write_compare(vcpu, v, false);
1940 break;
1941 case KVM_REG_MIPS_CP0_STATUS:
1942 write_gc0_status(v);
1943 break;
1944 case KVM_REG_MIPS_CP0_INTCTL:
1945 write_gc0_intctl(v);
1946 break;
1947 case KVM_REG_MIPS_CP0_CAUSE:
1948 /*
1949 * If the timer is stopped or started (DC bit) it must look
1950 * atomic with changes to the timer interrupt pending bit (TI).
1951 * A timer interrupt should not happen in between.
1952 */
1953 if ((read_gc0_cause() ^ v) & CAUSEF_DC) {
1954 if (v & CAUSEF_DC) {
1955 /* disable timer first */
1956 kvm_mips_count_disable_cause(vcpu);
1957 change_gc0_cause((u32)~CAUSEF_DC, v);
1958 } else {
1959 /* enable timer last */
1960 change_gc0_cause((u32)~CAUSEF_DC, v);
1961 kvm_mips_count_enable_cause(vcpu);
1962 }
1963 } else {
1964 write_gc0_cause(v);
1965 }
1966 break;
1967 case KVM_REG_MIPS_CP0_EPC:
1968 write_gc0_epc(v);
1969 break;
1970 case KVM_REG_MIPS_CP0_PRID:
1971 kvm_write_c0_guest_prid(cop0, v);
1972 break;
1973 case KVM_REG_MIPS_CP0_EBASE:
1974 kvm_vz_write_gc0_ebase(v);
1975 break;
1976 case KVM_REG_MIPS_CP0_CONFIG:
1977 cur = read_gc0_config();
1978 change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu);
1979 if (change) {
1980 v = cur ^ change;
1981 write_gc0_config(v);
1982 }
1983 break;
1984 case KVM_REG_MIPS_CP0_CONFIG1:
1985 if (!cpu_guest_has_conf1)
1986 break;
1987 cur = read_gc0_config1();
1988 change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu);
1989 if (change) {
1990 v = cur ^ change;
1991 write_gc0_config1(v);
1992 }
1993 break;
1994 case KVM_REG_MIPS_CP0_CONFIG2:
1995 if (!cpu_guest_has_conf2)
1996 break;
1997 cur = read_gc0_config2();
1998 change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu);
1999 if (change) {
2000 v = cur ^ change;
2001 write_gc0_config2(v);
2002 }
2003 break;
2004 case KVM_REG_MIPS_CP0_CONFIG3:
2005 if (!cpu_guest_has_conf3)
2006 break;
2007 cur = read_gc0_config3();
2008 change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu);
2009 if (change) {
2010 v = cur ^ change;
2011 write_gc0_config3(v);
2012 }
2013 break;
2014 case KVM_REG_MIPS_CP0_CONFIG4:
2015 if (!cpu_guest_has_conf4)
2016 break;
2017 cur = read_gc0_config4();
2018 change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu);
2019 if (change) {
2020 v = cur ^ change;
2021 write_gc0_config4(v);
2022 }
2023 break;
2024 case KVM_REG_MIPS_CP0_CONFIG5:
2025 if (!cpu_guest_has_conf5)
2026 break;
2027 cur = read_gc0_config5();
2028 change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu);
2029 if (change) {
2030 v = cur ^ change;
2031 write_gc0_config5(v);
2032 }
2033 break;
James Hogand42a0082017-03-14 10:15:38 +00002034 case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f):
2035 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2036 return -EINVAL;
2037 idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0);
2038 if (idx >= ARRAY_SIZE(vcpu->arch.maar))
2039 return -EINVAL;
2040 vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v);
2041 break;
2042 case KVM_REG_MIPS_CP0_MAARI:
2043 if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar)
2044 return -EINVAL;
2045 kvm_write_maari(vcpu, v);
2046 break;
James Hoganc992a4f2017-03-14 10:15:31 +00002047#ifdef CONFIG_64BIT
2048 case KVM_REG_MIPS_CP0_XCONTEXT:
2049 write_gc0_xcontext(v);
2050 break;
2051#endif
2052 case KVM_REG_MIPS_CP0_ERROREPC:
2053 write_gc0_errorepc(v);
2054 break;
2055 case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6:
2056 idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2;
2057 if (!cpu_guest_has_kscr(idx))
2058 return -EINVAL;
2059 switch (idx) {
2060 case 2:
2061 write_gc0_kscratch1(v);
2062 break;
2063 case 3:
2064 write_gc0_kscratch2(v);
2065 break;
2066 case 4:
2067 write_gc0_kscratch3(v);
2068 break;
2069 case 5:
2070 write_gc0_kscratch4(v);
2071 break;
2072 case 6:
2073 write_gc0_kscratch5(v);
2074 break;
2075 case 7:
2076 write_gc0_kscratch6(v);
2077 break;
2078 }
2079 break;
2080 case KVM_REG_MIPS_COUNT_CTL:
2081 ret = kvm_mips_set_count_ctl(vcpu, v);
2082 break;
2083 case KVM_REG_MIPS_COUNT_RESUME:
2084 ret = kvm_mips_set_count_resume(vcpu, v);
2085 break;
2086 case KVM_REG_MIPS_COUNT_HZ:
2087 ret = kvm_mips_set_count_hz(vcpu, v);
2088 break;
2089 default:
2090 return -EINVAL;
2091 }
2092 return ret;
2093}
2094
2095#define guestid_cache(cpu) (cpu_data[cpu].guestid_cache)
2096static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu)
2097{
2098 unsigned long guestid = guestid_cache(cpu);
2099
2100 if (!(++guestid & GUESTID_MASK)) {
2101 if (cpu_has_vtag_icache)
2102 flush_icache_all();
2103
2104 if (!guestid) /* fix version if needed */
2105 guestid = GUESTID_FIRST_VERSION;
2106
2107 ++guestid; /* guestid 0 reserved for root */
2108
2109 /* start new guestid cycle */
2110 kvm_vz_local_flush_roottlb_all_guests();
2111 kvm_vz_local_flush_guesttlb_all();
2112 }
2113
2114 guestid_cache(cpu) = guestid;
2115}
2116
2117/* Returns 1 if the guest TLB may be clobbered */
2118static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu)
2119{
2120 int ret = 0;
2121 int i;
2122
2123 if (!vcpu->requests)
2124 return 0;
2125
2126 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
2127 if (cpu_has_guestid) {
2128 /* Drop all GuestIDs for this VCPU */
2129 for_each_possible_cpu(i)
2130 vcpu->arch.vzguestid[i] = 0;
2131 /* This will clobber guest TLB contents too */
2132 ret = 1;
2133 }
2134 /*
2135 * For Root ASID Dealias (RAD) we don't do anything here, but we
2136 * still need the request to ensure we recheck asid_flush_mask.
2137 * We can still return 0 as only the root TLB will be affected
2138 * by a root ASID flush.
2139 */
2140 }
2141
2142 return ret;
2143}
2144
2145static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu)
2146{
2147 unsigned int wired = read_gc0_wired();
2148 struct kvm_mips_tlb *tlbs;
2149 int i;
2150
2151 /* Expand the wired TLB array if necessary */
2152 wired &= MIPSR6_WIRED_WIRED;
2153 if (wired > vcpu->arch.wired_tlb_limit) {
2154 tlbs = krealloc(vcpu->arch.wired_tlb, wired *
2155 sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC);
2156 if (WARN_ON(!tlbs)) {
2157 /* Save whatever we can */
2158 wired = vcpu->arch.wired_tlb_limit;
2159 } else {
2160 vcpu->arch.wired_tlb = tlbs;
2161 vcpu->arch.wired_tlb_limit = wired;
2162 }
2163 }
2164
2165 if (wired)
2166 /* Save wired entries from the guest TLB */
2167 kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired);
2168 /* Invalidate any dropped entries since last time */
2169 for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) {
2170 vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i);
2171 vcpu->arch.wired_tlb[i].tlb_lo[0] = 0;
2172 vcpu->arch.wired_tlb[i].tlb_lo[1] = 0;
2173 vcpu->arch.wired_tlb[i].tlb_mask = 0;
2174 }
2175 vcpu->arch.wired_tlb_used = wired;
2176}
2177
2178static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu)
2179{
2180 /* Load wired entries into the guest TLB */
2181 if (vcpu->arch.wired_tlb)
2182 kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0,
2183 vcpu->arch.wired_tlb_used);
2184}
2185
2186static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu)
2187{
2188 struct kvm *kvm = vcpu->kvm;
2189 struct mm_struct *gpa_mm = &kvm->arch.gpa_mm;
2190 bool migrated;
2191
2192 /*
2193 * Are we entering guest context on a different CPU to last time?
2194 * If so, the VCPU's guest TLB state on this CPU may be stale.
2195 */
2196 migrated = (vcpu->arch.last_exec_cpu != cpu);
2197 vcpu->arch.last_exec_cpu = cpu;
2198
2199 /*
2200 * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and
2201 * remains set until another vcpu is loaded in. As a rule GuestRID
2202 * remains zeroed when in root context unless the kernel is busy
2203 * manipulating guest tlb entries.
2204 */
2205 if (cpu_has_guestid) {
2206 /*
2207 * Check if our GuestID is of an older version and thus invalid.
2208 *
2209 * We also discard the stored GuestID if we've executed on
2210 * another CPU, as the guest mappings may have changed without
2211 * hypervisor knowledge.
2212 */
2213 if (migrated ||
2214 (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) &
2215 GUESTID_VERSION_MASK) {
2216 kvm_vz_get_new_guestid(cpu, vcpu);
2217 vcpu->arch.vzguestid[cpu] = guestid_cache(cpu);
2218 trace_kvm_guestid_change(vcpu,
2219 vcpu->arch.vzguestid[cpu]);
2220 }
2221
2222 /* Restore GuestID */
2223 change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]);
2224 } else {
2225 /*
2226 * The Guest TLB only stores a single guest's TLB state, so
2227 * flush it if another VCPU has executed on this CPU.
2228 *
2229 * We also flush if we've executed on another CPU, as the guest
2230 * mappings may have changed without hypervisor knowledge.
2231 */
2232 if (migrated || last_exec_vcpu[cpu] != vcpu)
2233 kvm_vz_local_flush_guesttlb_all();
2234 last_exec_vcpu[cpu] = vcpu;
2235
2236 /*
2237 * Root ASID dealiases guest GPA mappings in the root TLB.
2238 * Allocate new root ASID if needed.
2239 */
2240 if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask)
2241 || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) &
2242 asid_version_mask(cpu))
2243 get_new_mmu_context(gpa_mm, cpu);
2244 }
2245}
2246
2247static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2248{
2249 struct mips_coproc *cop0 = vcpu->arch.cop0;
2250 bool migrated, all;
2251
2252 /*
2253 * Have we migrated to a different CPU?
2254 * If so, any old guest TLB state may be stale.
2255 */
2256 migrated = (vcpu->arch.last_sched_cpu != cpu);
2257
2258 /*
2259 * Was this the last VCPU to run on this CPU?
2260 * If not, any old guest state from this VCPU will have been clobbered.
2261 */
2262 all = migrated || (last_vcpu[cpu] != vcpu);
2263 last_vcpu[cpu] = vcpu;
2264
2265 /*
2266 * Restore CP0_Wired unconditionally as we clear it after use, and
2267 * restore wired guest TLB entries (while in guest context).
2268 */
2269 kvm_restore_gc0_wired(cop0);
2270 if (current->flags & PF_VCPU) {
2271 tlbw_use_hazard();
2272 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2273 kvm_vz_vcpu_load_wired(vcpu);
2274 }
2275
2276 /*
2277 * Restore timer state regardless, as e.g. Cause.TI can change over time
2278 * if left unmaintained.
2279 */
2280 kvm_vz_restore_timer(vcpu);
2281
2282 /* Don't bother restoring registers multiple times unless necessary */
2283 if (!all)
2284 return 0;
2285
2286 /*
2287 * Restore config registers first, as some implementations restrict
2288 * writes to other registers when the corresponding feature bits aren't
2289 * set. For example Status.CU1 cannot be set unless Config1.FP is set.
2290 */
2291 kvm_restore_gc0_config(cop0);
2292 if (cpu_guest_has_conf1)
2293 kvm_restore_gc0_config1(cop0);
2294 if (cpu_guest_has_conf2)
2295 kvm_restore_gc0_config2(cop0);
2296 if (cpu_guest_has_conf3)
2297 kvm_restore_gc0_config3(cop0);
2298 if (cpu_guest_has_conf4)
2299 kvm_restore_gc0_config4(cop0);
2300 if (cpu_guest_has_conf5)
2301 kvm_restore_gc0_config5(cop0);
2302 if (cpu_guest_has_conf6)
2303 kvm_restore_gc0_config6(cop0);
2304 if (cpu_guest_has_conf7)
2305 kvm_restore_gc0_config7(cop0);
2306
2307 kvm_restore_gc0_index(cop0);
2308 kvm_restore_gc0_entrylo0(cop0);
2309 kvm_restore_gc0_entrylo1(cop0);
2310 kvm_restore_gc0_context(cop0);
James Hogandffe0422017-03-14 10:15:34 +00002311 if (cpu_guest_has_contextconfig)
2312 kvm_restore_gc0_contextconfig(cop0);
James Hoganc992a4f2017-03-14 10:15:31 +00002313#ifdef CONFIG_64BIT
2314 kvm_restore_gc0_xcontext(cop0);
James Hogandffe0422017-03-14 10:15:34 +00002315 if (cpu_guest_has_contextconfig)
2316 kvm_restore_gc0_xcontextconfig(cop0);
James Hoganc992a4f2017-03-14 10:15:31 +00002317#endif
2318 kvm_restore_gc0_pagemask(cop0);
2319 kvm_restore_gc0_pagegrain(cop0);
2320 kvm_restore_gc0_hwrena(cop0);
2321 kvm_restore_gc0_badvaddr(cop0);
2322 kvm_restore_gc0_entryhi(cop0);
2323 kvm_restore_gc0_status(cop0);
2324 kvm_restore_gc0_intctl(cop0);
2325 kvm_restore_gc0_epc(cop0);
2326 kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0));
2327 if (cpu_guest_has_userlocal)
2328 kvm_restore_gc0_userlocal(cop0);
2329
2330 kvm_restore_gc0_errorepc(cop0);
2331
2332 /* restore KScratch registers if enabled in guest */
2333 if (cpu_guest_has_conf4) {
2334 if (cpu_guest_has_kscr(2))
2335 kvm_restore_gc0_kscratch1(cop0);
2336 if (cpu_guest_has_kscr(3))
2337 kvm_restore_gc0_kscratch2(cop0);
2338 if (cpu_guest_has_kscr(4))
2339 kvm_restore_gc0_kscratch3(cop0);
2340 if (cpu_guest_has_kscr(5))
2341 kvm_restore_gc0_kscratch4(cop0);
2342 if (cpu_guest_has_kscr(6))
2343 kvm_restore_gc0_kscratch5(cop0);
2344 if (cpu_guest_has_kscr(7))
2345 kvm_restore_gc0_kscratch6(cop0);
2346 }
2347
James Hoganedc89262017-03-14 10:15:33 +00002348 if (cpu_guest_has_badinstr)
2349 kvm_restore_gc0_badinstr(cop0);
2350 if (cpu_guest_has_badinstrp)
2351 kvm_restore_gc0_badinstrp(cop0);
2352
James Hogan4b7de022017-03-14 10:15:35 +00002353 if (cpu_guest_has_segments) {
2354 kvm_restore_gc0_segctl0(cop0);
2355 kvm_restore_gc0_segctl1(cop0);
2356 kvm_restore_gc0_segctl2(cop0);
2357 }
2358
James Hogan5a2f3522017-03-14 10:15:36 +00002359 /* restore HTW registers */
2360 if (cpu_guest_has_htw) {
2361 kvm_restore_gc0_pwbase(cop0);
2362 kvm_restore_gc0_pwfield(cop0);
2363 kvm_restore_gc0_pwsize(cop0);
2364 kvm_restore_gc0_pwctl(cop0);
2365 }
2366
James Hoganc992a4f2017-03-14 10:15:31 +00002367 /* restore Root.GuestCtl2 from unused Guest guestctl2 register */
2368 if (cpu_has_guestctl2)
2369 write_c0_guestctl2(
2370 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]);
2371
James Hogan273819a62017-03-14 10:15:37 +00002372 /*
2373 * We should clear linked load bit to break interrupted atomics. This
2374 * prevents a SC on the next VCPU from succeeding by matching a LL on
2375 * the previous VCPU.
2376 */
2377 if (cpu_guest_has_rw_llb)
2378 write_gc0_lladdr(0);
2379
James Hoganc992a4f2017-03-14 10:15:31 +00002380 return 0;
2381}
2382
2383static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
2384{
2385 struct mips_coproc *cop0 = vcpu->arch.cop0;
2386
2387 if (current->flags & PF_VCPU)
2388 kvm_vz_vcpu_save_wired(vcpu);
2389
2390 kvm_lose_fpu(vcpu);
2391
2392 kvm_save_gc0_index(cop0);
2393 kvm_save_gc0_entrylo0(cop0);
2394 kvm_save_gc0_entrylo1(cop0);
2395 kvm_save_gc0_context(cop0);
James Hogandffe0422017-03-14 10:15:34 +00002396 if (cpu_guest_has_contextconfig)
2397 kvm_save_gc0_contextconfig(cop0);
James Hoganc992a4f2017-03-14 10:15:31 +00002398#ifdef CONFIG_64BIT
2399 kvm_save_gc0_xcontext(cop0);
James Hogandffe0422017-03-14 10:15:34 +00002400 if (cpu_guest_has_contextconfig)
2401 kvm_save_gc0_xcontextconfig(cop0);
James Hoganc992a4f2017-03-14 10:15:31 +00002402#endif
2403 kvm_save_gc0_pagemask(cop0);
2404 kvm_save_gc0_pagegrain(cop0);
2405 kvm_save_gc0_wired(cop0);
2406 /* allow wired TLB entries to be overwritten */
2407 clear_gc0_wired(MIPSR6_WIRED_WIRED);
2408 kvm_save_gc0_hwrena(cop0);
2409 kvm_save_gc0_badvaddr(cop0);
2410 kvm_save_gc0_entryhi(cop0);
2411 kvm_save_gc0_status(cop0);
2412 kvm_save_gc0_intctl(cop0);
2413 kvm_save_gc0_epc(cop0);
2414 kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase());
2415 if (cpu_guest_has_userlocal)
2416 kvm_save_gc0_userlocal(cop0);
2417
2418 /* only save implemented config registers */
2419 kvm_save_gc0_config(cop0);
2420 if (cpu_guest_has_conf1)
2421 kvm_save_gc0_config1(cop0);
2422 if (cpu_guest_has_conf2)
2423 kvm_save_gc0_config2(cop0);
2424 if (cpu_guest_has_conf3)
2425 kvm_save_gc0_config3(cop0);
2426 if (cpu_guest_has_conf4)
2427 kvm_save_gc0_config4(cop0);
2428 if (cpu_guest_has_conf5)
2429 kvm_save_gc0_config5(cop0);
2430 if (cpu_guest_has_conf6)
2431 kvm_save_gc0_config6(cop0);
2432 if (cpu_guest_has_conf7)
2433 kvm_save_gc0_config7(cop0);
2434
2435 kvm_save_gc0_errorepc(cop0);
2436
2437 /* save KScratch registers if enabled in guest */
2438 if (cpu_guest_has_conf4) {
2439 if (cpu_guest_has_kscr(2))
2440 kvm_save_gc0_kscratch1(cop0);
2441 if (cpu_guest_has_kscr(3))
2442 kvm_save_gc0_kscratch2(cop0);
2443 if (cpu_guest_has_kscr(4))
2444 kvm_save_gc0_kscratch3(cop0);
2445 if (cpu_guest_has_kscr(5))
2446 kvm_save_gc0_kscratch4(cop0);
2447 if (cpu_guest_has_kscr(6))
2448 kvm_save_gc0_kscratch5(cop0);
2449 if (cpu_guest_has_kscr(7))
2450 kvm_save_gc0_kscratch6(cop0);
2451 }
2452
James Hoganedc89262017-03-14 10:15:33 +00002453 if (cpu_guest_has_badinstr)
2454 kvm_save_gc0_badinstr(cop0);
2455 if (cpu_guest_has_badinstrp)
2456 kvm_save_gc0_badinstrp(cop0);
2457
James Hogan4b7de022017-03-14 10:15:35 +00002458 if (cpu_guest_has_segments) {
2459 kvm_save_gc0_segctl0(cop0);
2460 kvm_save_gc0_segctl1(cop0);
2461 kvm_save_gc0_segctl2(cop0);
2462 }
2463
James Hogan5a2f3522017-03-14 10:15:36 +00002464 /* save HTW registers if enabled in guest */
2465 if (cpu_guest_has_htw &&
2466 kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) {
2467 kvm_save_gc0_pwbase(cop0);
2468 kvm_save_gc0_pwfield(cop0);
2469 kvm_save_gc0_pwsize(cop0);
2470 kvm_save_gc0_pwctl(cop0);
2471 }
2472
James Hoganc992a4f2017-03-14 10:15:31 +00002473 kvm_vz_save_timer(vcpu);
2474
2475 /* save Root.GuestCtl2 in unused Guest guestctl2 register */
2476 if (cpu_has_guestctl2)
2477 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] =
2478 read_c0_guestctl2();
2479
2480 return 0;
2481}
2482
2483/**
2484 * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB.
2485 * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries).
2486 *
2487 * Attempt to resize the guest VTLB by writing guest Config registers. This is
2488 * necessary for cores with a shared root/guest TLB to avoid overlap with wired
2489 * entries in the root VTLB.
2490 *
2491 * Returns: The resulting guest VTLB size.
2492 */
2493static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size)
2494{
2495 unsigned int config4 = 0, ret = 0, limit;
2496
2497 /* Write MMUSize - 1 into guest Config registers */
2498 if (cpu_guest_has_conf1)
2499 change_gc0_config1(MIPS_CONF1_TLBS,
2500 (size - 1) << MIPS_CONF1_TLBS_SHIFT);
2501 if (cpu_guest_has_conf4) {
2502 config4 = read_gc0_config4();
2503 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2504 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) {
2505 config4 &= ~MIPS_CONF4_VTLBSIZEEXT;
2506 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2507 MIPS_CONF4_VTLBSIZEEXT_SHIFT;
2508 } else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2509 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) {
2510 config4 &= ~MIPS_CONF4_MMUSIZEEXT;
2511 config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) <<
2512 MIPS_CONF4_MMUSIZEEXT_SHIFT;
2513 }
2514 write_gc0_config4(config4);
2515 }
2516
2517 /*
2518 * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it
2519 * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write
2520 * not dropped)
2521 */
2522 if (cpu_has_mips_r6) {
2523 limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >>
2524 MIPSR6_WIRED_LIMIT_SHIFT;
2525 if (size - 1 <= limit)
2526 limit = 0;
2527 write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT);
2528 }
2529
2530 /* Read back MMUSize - 1 */
2531 back_to_back_c0_hazard();
2532 if (cpu_guest_has_conf1)
2533 ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >>
2534 MIPS_CONF1_TLBS_SHIFT;
2535 if (config4) {
2536 if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) ==
2537 MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT)
2538 ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >>
2539 MIPS_CONF4_VTLBSIZEEXT_SHIFT) <<
2540 MIPS_CONF1_TLBS_SIZE;
2541 else if ((config4 & MIPS_CONF4_MMUEXTDEF) ==
2542 MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT)
2543 ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >>
2544 MIPS_CONF4_MMUSIZEEXT_SHIFT) <<
2545 MIPS_CONF1_TLBS_SIZE;
2546 }
2547 return ret + 1;
2548}
2549
2550static int kvm_vz_hardware_enable(void)
2551{
2552 unsigned int mmu_size, guest_mmu_size, ftlb_size;
2553
2554 /*
2555 * ImgTec cores tend to use a shared root/guest TLB. To avoid overlap of
2556 * root wired and guest entries, the guest TLB may need resizing.
2557 */
2558 mmu_size = current_cpu_data.tlbsizevtlb;
2559 ftlb_size = current_cpu_data.tlbsize - mmu_size;
2560
2561 /* Try switching to maximum guest VTLB size for flush */
2562 guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size);
2563 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2564 kvm_vz_local_flush_guesttlb_all();
2565
2566 /*
2567 * Reduce to make space for root wired entries and at least 2 root
2568 * non-wired entries. This does assume that long-term wired entries
2569 * won't be added later.
2570 */
2571 guest_mmu_size = mmu_size - num_wired_entries() - 2;
2572 guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size);
2573 current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size;
2574
2575 /*
2576 * Write the VTLB size, but if another CPU has already written, check it
2577 * matches or we won't provide a consistent view to the guest. If this
2578 * ever happens it suggests an asymmetric number of wired entries.
2579 */
2580 if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) &&
2581 WARN(guest_mmu_size != kvm_vz_guest_vtlb_size,
2582 "Available guest VTLB size mismatch"))
2583 return -EINVAL;
2584
2585 /*
2586 * Enable virtualization features granting guest direct control of
2587 * certain features:
2588 * CP0=1: Guest coprocessor 0 context.
2589 * AT=Guest: Guest MMU.
2590 * CG=1: Hit (virtual address) CACHE operations (optional).
2591 * CF=1: Guest Config registers.
2592 * CGI=1: Indexed flush CACHE operations (optional).
2593 */
2594 write_c0_guestctl0(MIPS_GCTL0_CP0 |
2595 (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) |
2596 MIPS_GCTL0_CG | MIPS_GCTL0_CF);
2597 if (cpu_has_guestctl0ext)
2598 set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI);
2599
2600 if (cpu_has_guestid) {
2601 write_c0_guestctl1(0);
2602 kvm_vz_local_flush_roottlb_all_guests();
2603
2604 GUESTID_MASK = current_cpu_data.guestid_mask;
2605 GUESTID_FIRST_VERSION = GUESTID_MASK + 1;
2606 GUESTID_VERSION_MASK = ~GUESTID_MASK;
2607
2608 current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION;
2609 }
2610
2611 /* clear any pending injected virtual guest interrupts */
2612 if (cpu_has_guestctl2)
2613 clear_c0_guestctl2(0x3f << 10);
2614
2615 return 0;
2616}
2617
2618static void kvm_vz_hardware_disable(void)
2619{
2620 kvm_vz_local_flush_guesttlb_all();
2621
2622 if (cpu_has_guestid) {
2623 write_c0_guestctl1(0);
2624 kvm_vz_local_flush_roottlb_all_guests();
2625 }
2626}
2627
2628static int kvm_vz_check_extension(struct kvm *kvm, long ext)
2629{
2630 int r;
2631
2632 switch (ext) {
2633 case KVM_CAP_MIPS_VZ:
2634 /* we wouldn't be here unless cpu_has_vz */
2635 r = 1;
2636 break;
2637#ifdef CONFIG_64BIT
2638 case KVM_CAP_MIPS_64BIT:
2639 /* We support 64-bit registers/operations and addresses */
2640 r = 2;
2641 break;
2642#endif
2643 default:
2644 r = 0;
2645 break;
2646 }
2647
2648 return r;
2649}
2650
2651static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu)
2652{
2653 int i;
2654
2655 for_each_possible_cpu(i)
2656 vcpu->arch.vzguestid[i] = 0;
2657
2658 return 0;
2659}
2660
2661static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu)
2662{
2663 int cpu;
2664
2665 /*
2666 * If the VCPU is freed and reused as another VCPU, we don't want the
2667 * matching pointer wrongly hanging around in last_vcpu[] or
2668 * last_exec_vcpu[].
2669 */
2670 for_each_possible_cpu(cpu) {
2671 if (last_vcpu[cpu] == vcpu)
2672 last_vcpu[cpu] = NULL;
2673 if (last_exec_vcpu[cpu] == vcpu)
2674 last_exec_vcpu[cpu] = NULL;
2675 }
2676}
2677
2678static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu)
2679{
2680 struct mips_coproc *cop0 = vcpu->arch.cop0;
2681 unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */
2682
2683 /*
2684 * Start off the timer at the same frequency as the host timer, but the
2685 * soft timer doesn't handle frequencies greater than 1GHz yet.
2686 */
2687 if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC)
2688 count_hz = mips_hpt_frequency;
2689 kvm_mips_init_count(vcpu, count_hz);
2690
2691 /*
2692 * Initialize guest register state to valid architectural reset state.
2693 */
2694
2695 /* PageGrain */
2696 if (cpu_has_mips_r6)
2697 kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC);
2698 /* Wired */
2699 if (cpu_has_mips_r6)
2700 kvm_write_sw_gc0_wired(cop0,
2701 read_gc0_wired() & MIPSR6_WIRED_LIMIT);
2702 /* Status */
2703 kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL);
2704 if (cpu_has_mips_r6)
2705 kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status());
2706 /* IntCtl */
2707 kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() &
2708 (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI));
2709 /* PRId */
2710 kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id);
2711 /* EBase */
2712 kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id);
2713 /* Config */
2714 kvm_save_gc0_config(cop0);
2715 /* architecturally writable (e.g. from guest) */
2716 kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK,
2717 _page_cachable_default >> _CACHE_SHIFT);
2718 /* architecturally read only, but maybe writable from root */
2719 kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config());
2720 if (cpu_guest_has_conf1) {
2721 kvm_set_sw_gc0_config(cop0, MIPS_CONF_M);
2722 /* Config1 */
2723 kvm_save_gc0_config1(cop0);
2724 /* architecturally read only, but maybe writable from root */
2725 kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 |
2726 MIPS_CONF1_MD |
2727 MIPS_CONF1_PC |
2728 MIPS_CONF1_WR |
2729 MIPS_CONF1_CA |
2730 MIPS_CONF1_FP);
2731 }
2732 if (cpu_guest_has_conf2) {
2733 kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M);
2734 /* Config2 */
2735 kvm_save_gc0_config2(cop0);
2736 }
2737 if (cpu_guest_has_conf3) {
2738 kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M);
2739 /* Config3 */
2740 kvm_save_gc0_config3(cop0);
2741 /* architecturally writable (e.g. from guest) */
2742 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE);
2743 /* architecturally read only, but maybe writable from root */
2744 kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA |
2745 MIPS_CONF3_BPG |
2746 MIPS_CONF3_ULRI |
2747 MIPS_CONF3_DSP |
2748 MIPS_CONF3_CTXTC |
2749 MIPS_CONF3_ITL |
2750 MIPS_CONF3_LPA |
2751 MIPS_CONF3_VEIC |
2752 MIPS_CONF3_VINT |
2753 MIPS_CONF3_SP |
2754 MIPS_CONF3_CDMM |
2755 MIPS_CONF3_MT |
2756 MIPS_CONF3_SM |
2757 MIPS_CONF3_TL);
2758 }
2759 if (cpu_guest_has_conf4) {
2760 kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M);
2761 /* Config4 */
2762 kvm_save_gc0_config4(cop0);
2763 }
2764 if (cpu_guest_has_conf5) {
2765 kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M);
2766 /* Config5 */
2767 kvm_save_gc0_config5(cop0);
2768 /* architecturally writable (e.g. from guest) */
2769 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K |
2770 MIPS_CONF5_CV |
2771 MIPS_CONF5_MSAEN |
2772 MIPS_CONF5_UFE |
2773 MIPS_CONF5_FRE |
2774 MIPS_CONF5_SBRI |
2775 MIPS_CONF5_UFR);
2776 /* architecturally read only, but maybe writable from root */
2777 kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP);
2778 }
2779
James Hogandffe0422017-03-14 10:15:34 +00002780 if (cpu_guest_has_contextconfig) {
2781 /* ContextConfig */
2782 kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0);
2783#ifdef CONFIG_64BIT
2784 /* XContextConfig */
2785 /* bits SEGBITS-13+3:4 set */
2786 kvm_write_sw_gc0_xcontextconfig(cop0,
2787 ((1ull << (cpu_vmbits - 13)) - 1) << 4);
2788#endif
2789 }
2790
James Hogan4b7de022017-03-14 10:15:35 +00002791 /* Implementation dependent, use the legacy layout */
2792 if (cpu_guest_has_segments) {
2793 /* SegCtl0, SegCtl1, SegCtl2 */
2794 kvm_write_sw_gc0_segctl0(cop0, 0x00200010);
2795 kvm_write_sw_gc0_segctl1(cop0, 0x00000002 |
2796 (_page_cachable_default >> _CACHE_SHIFT) <<
2797 (16 + MIPS_SEGCFG_C_SHIFT));
2798 kvm_write_sw_gc0_segctl2(cop0, 0x00380438);
2799 }
2800
James Hogan5a2f3522017-03-14 10:15:36 +00002801 /* reset HTW registers */
2802 if (cpu_guest_has_htw && cpu_has_mips_r6) {
2803 /* PWField */
2804 kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302);
2805 /* PWSize */
2806 kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT);
2807 }
2808
James Hoganc992a4f2017-03-14 10:15:31 +00002809 /* start with no pending virtual guest interrupts */
2810 if (cpu_has_guestctl2)
2811 cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0;
2812
2813 /* Put PC at reset vector */
2814 vcpu->arch.pc = CKSEG1ADDR(0x1fc00000);
2815
2816 return 0;
2817}
2818
2819static void kvm_vz_flush_shadow_all(struct kvm *kvm)
2820{
2821 if (cpu_has_guestid) {
2822 /* Flush GuestID for each VCPU individually */
2823 kvm_flush_remote_tlbs(kvm);
2824 } else {
2825 /*
2826 * For each CPU there is a single GPA ASID used by all VCPUs in
2827 * the VM, so it doesn't make sense for the VCPUs to handle
2828 * invalidation of these ASIDs individually.
2829 *
2830 * Instead mark all CPUs as needing ASID invalidation in
2831 * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to
2832 * kick any running VCPUs so they check asid_flush_mask.
2833 */
2834 cpumask_setall(&kvm->arch.asid_flush_mask);
2835 kvm_flush_remote_tlbs(kvm);
2836 }
2837}
2838
2839static void kvm_vz_flush_shadow_memslot(struct kvm *kvm,
2840 const struct kvm_memory_slot *slot)
2841{
2842 kvm_vz_flush_shadow_all(kvm);
2843}
2844
2845static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu)
2846{
2847 int cpu = smp_processor_id();
2848 int preserve_guest_tlb;
2849
2850 preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu);
2851
2852 if (preserve_guest_tlb)
2853 kvm_vz_vcpu_save_wired(vcpu);
2854
2855 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2856
2857 if (preserve_guest_tlb)
2858 kvm_vz_vcpu_load_wired(vcpu);
2859}
2860
2861static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
2862{
2863 int cpu = smp_processor_id();
2864 int r;
2865
2866 /* Check if we have any exceptions/interrupts pending */
2867 kvm_mips_deliver_interrupts(vcpu, read_gc0_cause());
2868
2869 kvm_vz_check_requests(vcpu, cpu);
2870 kvm_vz_vcpu_load_tlb(vcpu, cpu);
2871 kvm_vz_vcpu_load_wired(vcpu);
2872
2873 r = vcpu->arch.vcpu_run(run, vcpu);
2874
2875 kvm_vz_vcpu_save_wired(vcpu);
2876
2877 return r;
2878}
2879
2880static struct kvm_mips_callbacks kvm_vz_callbacks = {
2881 .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable,
2882 .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss,
2883 .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss,
2884 .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss,
2885 .handle_addr_err_st = kvm_trap_vz_no_handler,
2886 .handle_addr_err_ld = kvm_trap_vz_no_handler,
2887 .handle_syscall = kvm_trap_vz_no_handler,
2888 .handle_res_inst = kvm_trap_vz_no_handler,
2889 .handle_break = kvm_trap_vz_no_handler,
2890 .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled,
2891 .handle_guest_exit = kvm_trap_vz_handle_guest_exit,
2892
2893 .hardware_enable = kvm_vz_hardware_enable,
2894 .hardware_disable = kvm_vz_hardware_disable,
2895 .check_extension = kvm_vz_check_extension,
2896 .vcpu_init = kvm_vz_vcpu_init,
2897 .vcpu_uninit = kvm_vz_vcpu_uninit,
2898 .vcpu_setup = kvm_vz_vcpu_setup,
2899 .flush_shadow_all = kvm_vz_flush_shadow_all,
2900 .flush_shadow_memslot = kvm_vz_flush_shadow_memslot,
2901 .gva_to_gpa = kvm_vz_gva_to_gpa_cb,
2902 .queue_timer_int = kvm_vz_queue_timer_int_cb,
2903 .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb,
2904 .queue_io_int = kvm_vz_queue_io_int_cb,
2905 .dequeue_io_int = kvm_vz_dequeue_io_int_cb,
2906 .irq_deliver = kvm_vz_irq_deliver_cb,
2907 .irq_clear = kvm_vz_irq_clear_cb,
2908 .num_regs = kvm_vz_num_regs,
2909 .copy_reg_indices = kvm_vz_copy_reg_indices,
2910 .get_one_reg = kvm_vz_get_one_reg,
2911 .set_one_reg = kvm_vz_set_one_reg,
2912 .vcpu_load = kvm_vz_vcpu_load,
2913 .vcpu_put = kvm_vz_vcpu_put,
2914 .vcpu_run = kvm_vz_vcpu_run,
2915 .vcpu_reenter = kvm_vz_vcpu_reenter,
2916};
2917
2918int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
2919{
2920 if (!cpu_has_vz)
2921 return -ENODEV;
2922
2923 /*
2924 * VZ requires at least 2 KScratch registers, so it should have been
2925 * possible to allocate pgd_reg.
2926 */
2927 if (WARN(pgd_reg == -1,
2928 "pgd_reg not allocated even though cpu_has_vz\n"))
2929 return -ENODEV;
2930
2931 pr_info("Starting KVM with MIPS VZ extensions\n");
2932
2933 *install_callbacks = &kvm_vz_callbacks;
2934 return 0;
2935}