James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * KVM/MIPS: Support for hardware virtualization extensions |
| 7 | * |
| 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
| 9 | * Authors: Yann Le Du <ledu@kymasys.com> |
| 10 | */ |
| 11 | |
| 12 | #include <linux/errno.h> |
| 13 | #include <linux/err.h> |
| 14 | #include <linux/module.h> |
| 15 | #include <linux/preempt.h> |
| 16 | #include <linux/vmalloc.h> |
| 17 | #include <asm/cacheflush.h> |
| 18 | #include <asm/cacheops.h> |
| 19 | #include <asm/cmpxchg.h> |
| 20 | #include <asm/fpu.h> |
| 21 | #include <asm/hazards.h> |
| 22 | #include <asm/inst.h> |
| 23 | #include <asm/mmu_context.h> |
| 24 | #include <asm/r4kcache.h> |
| 25 | #include <asm/time.h> |
| 26 | #include <asm/tlb.h> |
| 27 | #include <asm/tlbex.h> |
| 28 | |
| 29 | #include <linux/kvm_host.h> |
| 30 | |
| 31 | #include "interrupt.h" |
| 32 | |
| 33 | #include "trace.h" |
| 34 | |
| 35 | /* Pointers to last VCPU loaded on each physical CPU */ |
| 36 | static struct kvm_vcpu *last_vcpu[NR_CPUS]; |
| 37 | /* Pointers to last VCPU executed on each physical CPU */ |
| 38 | static struct kvm_vcpu *last_exec_vcpu[NR_CPUS]; |
| 39 | |
| 40 | /* |
| 41 | * Number of guest VTLB entries to use, so we can catch inconsistency between |
| 42 | * CPUs. |
| 43 | */ |
| 44 | static unsigned int kvm_vz_guest_vtlb_size; |
| 45 | |
| 46 | static inline long kvm_vz_read_gc0_ebase(void) |
| 47 | { |
| 48 | if (sizeof(long) == 8 && cpu_has_ebase_wg) |
| 49 | return read_gc0_ebase_64(); |
| 50 | else |
| 51 | return read_gc0_ebase(); |
| 52 | } |
| 53 | |
| 54 | static inline void kvm_vz_write_gc0_ebase(long v) |
| 55 | { |
| 56 | /* |
| 57 | * First write with WG=1 to write upper bits, then write again in case |
| 58 | * WG should be left at 0. |
| 59 | * write_gc0_ebase_64() is no longer UNDEFINED since R6. |
| 60 | */ |
| 61 | if (sizeof(long) == 8 && |
| 62 | (cpu_has_mips64r6 || cpu_has_ebase_wg)) { |
| 63 | write_gc0_ebase_64(v | MIPS_EBASE_WG); |
| 64 | write_gc0_ebase_64(v); |
| 65 | } else { |
| 66 | write_gc0_ebase(v | MIPS_EBASE_WG); |
| 67 | write_gc0_ebase(v); |
| 68 | } |
| 69 | } |
| 70 | |
| 71 | /* |
| 72 | * These Config bits may be writable by the guest: |
| 73 | * Config: [K23, KU] (!TLB), K0 |
| 74 | * Config1: (none) |
| 75 | * Config2: [TU, SU] (impl) |
| 76 | * Config3: ISAOnExc |
| 77 | * Config4: FTLBPageSize |
| 78 | * Config5: K, CV, MSAEn, UFE, FRE, SBRI, UFR |
| 79 | */ |
| 80 | |
| 81 | static inline unsigned int kvm_vz_config_guest_wrmask(struct kvm_vcpu *vcpu) |
| 82 | { |
| 83 | return CONF_CM_CMASK; |
| 84 | } |
| 85 | |
| 86 | static inline unsigned int kvm_vz_config1_guest_wrmask(struct kvm_vcpu *vcpu) |
| 87 | { |
| 88 | return 0; |
| 89 | } |
| 90 | |
| 91 | static inline unsigned int kvm_vz_config2_guest_wrmask(struct kvm_vcpu *vcpu) |
| 92 | { |
| 93 | return 0; |
| 94 | } |
| 95 | |
| 96 | static inline unsigned int kvm_vz_config3_guest_wrmask(struct kvm_vcpu *vcpu) |
| 97 | { |
| 98 | return MIPS_CONF3_ISA_OE; |
| 99 | } |
| 100 | |
| 101 | static inline unsigned int kvm_vz_config4_guest_wrmask(struct kvm_vcpu *vcpu) |
| 102 | { |
| 103 | /* no need to be exact */ |
| 104 | return MIPS_CONF4_VFTLBPAGESIZE; |
| 105 | } |
| 106 | |
| 107 | static inline unsigned int kvm_vz_config5_guest_wrmask(struct kvm_vcpu *vcpu) |
| 108 | { |
| 109 | unsigned int mask = MIPS_CONF5_K | MIPS_CONF5_CV | MIPS_CONF5_SBRI; |
| 110 | |
| 111 | /* Permit MSAEn changes if MSA supported and enabled */ |
| 112 | if (kvm_mips_guest_has_msa(&vcpu->arch)) |
| 113 | mask |= MIPS_CONF5_MSAEN; |
| 114 | |
| 115 | /* |
| 116 | * Permit guest FPU mode changes if FPU is enabled and the relevant |
| 117 | * feature exists according to FIR register. |
| 118 | */ |
| 119 | if (kvm_mips_guest_has_fpu(&vcpu->arch)) { |
| 120 | if (cpu_has_ufr) |
| 121 | mask |= MIPS_CONF5_UFR; |
| 122 | if (cpu_has_fre) |
| 123 | mask |= MIPS_CONF5_FRE | MIPS_CONF5_UFE; |
| 124 | } |
| 125 | |
| 126 | return mask; |
| 127 | } |
| 128 | |
| 129 | /* |
| 130 | * VZ optionally allows these additional Config bits to be written by root: |
| 131 | * Config: M, [MT] |
| 132 | * Config1: M, [MMUSize-1, C2, MD, PC, WR, CA], FP |
| 133 | * Config2: M |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 134 | * Config3: M, MSAP, [BPG], ULRI, [DSP2P, DSPP], CTXTC, [ITL, LPA, VEIC, |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 135 | * VInt, SP, CDMM, MT, SM, TL] |
| 136 | * Config4: M, [VTLBSizeExt, MMUSizeExt] |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 137 | * Config5: MRP |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 138 | */ |
| 139 | |
| 140 | static inline unsigned int kvm_vz_config_user_wrmask(struct kvm_vcpu *vcpu) |
| 141 | { |
| 142 | return kvm_vz_config_guest_wrmask(vcpu) | MIPS_CONF_M; |
| 143 | } |
| 144 | |
| 145 | static inline unsigned int kvm_vz_config1_user_wrmask(struct kvm_vcpu *vcpu) |
| 146 | { |
| 147 | unsigned int mask = kvm_vz_config1_guest_wrmask(vcpu) | MIPS_CONF_M; |
| 148 | |
| 149 | /* Permit FPU to be present if FPU is supported */ |
| 150 | if (kvm_mips_guest_can_have_fpu(&vcpu->arch)) |
| 151 | mask |= MIPS_CONF1_FP; |
| 152 | |
| 153 | return mask; |
| 154 | } |
| 155 | |
| 156 | static inline unsigned int kvm_vz_config2_user_wrmask(struct kvm_vcpu *vcpu) |
| 157 | { |
| 158 | return kvm_vz_config2_guest_wrmask(vcpu) | MIPS_CONF_M; |
| 159 | } |
| 160 | |
| 161 | static inline unsigned int kvm_vz_config3_user_wrmask(struct kvm_vcpu *vcpu) |
| 162 | { |
| 163 | unsigned int mask = kvm_vz_config3_guest_wrmask(vcpu) | MIPS_CONF_M | |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 164 | MIPS_CONF3_ULRI | MIPS_CONF3_CTXTC; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 165 | |
| 166 | /* Permit MSA to be present if MSA is supported */ |
| 167 | if (kvm_mips_guest_can_have_msa(&vcpu->arch)) |
| 168 | mask |= MIPS_CONF3_MSA; |
| 169 | |
| 170 | return mask; |
| 171 | } |
| 172 | |
| 173 | static inline unsigned int kvm_vz_config4_user_wrmask(struct kvm_vcpu *vcpu) |
| 174 | { |
| 175 | return kvm_vz_config4_guest_wrmask(vcpu) | MIPS_CONF_M; |
| 176 | } |
| 177 | |
| 178 | static inline unsigned int kvm_vz_config5_user_wrmask(struct kvm_vcpu *vcpu) |
| 179 | { |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 180 | return kvm_vz_config5_guest_wrmask(vcpu) | MIPS_CONF5_MRP; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 181 | } |
| 182 | |
| 183 | static gpa_t kvm_vz_gva_to_gpa_cb(gva_t gva) |
| 184 | { |
| 185 | /* VZ guest has already converted gva to gpa */ |
| 186 | return gva; |
| 187 | } |
| 188 | |
| 189 | static void kvm_vz_queue_irq(struct kvm_vcpu *vcpu, unsigned int priority) |
| 190 | { |
| 191 | set_bit(priority, &vcpu->arch.pending_exceptions); |
| 192 | clear_bit(priority, &vcpu->arch.pending_exceptions_clr); |
| 193 | } |
| 194 | |
| 195 | static void kvm_vz_dequeue_irq(struct kvm_vcpu *vcpu, unsigned int priority) |
| 196 | { |
| 197 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
| 198 | set_bit(priority, &vcpu->arch.pending_exceptions_clr); |
| 199 | } |
| 200 | |
| 201 | static void kvm_vz_queue_timer_int_cb(struct kvm_vcpu *vcpu) |
| 202 | { |
| 203 | /* |
| 204 | * timer expiry is asynchronous to vcpu execution therefore defer guest |
| 205 | * cp0 accesses |
| 206 | */ |
| 207 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); |
| 208 | } |
| 209 | |
| 210 | static void kvm_vz_dequeue_timer_int_cb(struct kvm_vcpu *vcpu) |
| 211 | { |
| 212 | /* |
| 213 | * timer expiry is asynchronous to vcpu execution therefore defer guest |
| 214 | * cp0 accesses |
| 215 | */ |
| 216 | kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_TIMER); |
| 217 | } |
| 218 | |
| 219 | static void kvm_vz_queue_io_int_cb(struct kvm_vcpu *vcpu, |
| 220 | struct kvm_mips_interrupt *irq) |
| 221 | { |
| 222 | int intr = (int)irq->irq; |
| 223 | |
| 224 | /* |
| 225 | * interrupts are asynchronous to vcpu execution therefore defer guest |
| 226 | * cp0 accesses |
| 227 | */ |
| 228 | switch (intr) { |
| 229 | case 2: |
| 230 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IO); |
| 231 | break; |
| 232 | |
| 233 | case 3: |
| 234 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_1); |
| 235 | break; |
| 236 | |
| 237 | case 4: |
| 238 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_IPI_2); |
| 239 | break; |
| 240 | |
| 241 | default: |
| 242 | break; |
| 243 | } |
| 244 | |
| 245 | } |
| 246 | |
| 247 | static void kvm_vz_dequeue_io_int_cb(struct kvm_vcpu *vcpu, |
| 248 | struct kvm_mips_interrupt *irq) |
| 249 | { |
| 250 | int intr = (int)irq->irq; |
| 251 | |
| 252 | /* |
| 253 | * interrupts are asynchronous to vcpu execution therefore defer guest |
| 254 | * cp0 accesses |
| 255 | */ |
| 256 | switch (intr) { |
| 257 | case -2: |
| 258 | kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IO); |
| 259 | break; |
| 260 | |
| 261 | case -3: |
| 262 | kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_1); |
| 263 | break; |
| 264 | |
| 265 | case -4: |
| 266 | kvm_vz_dequeue_irq(vcpu, MIPS_EXC_INT_IPI_2); |
| 267 | break; |
| 268 | |
| 269 | default: |
| 270 | break; |
| 271 | } |
| 272 | |
| 273 | } |
| 274 | |
| 275 | static u32 kvm_vz_priority_to_irq[MIPS_EXC_MAX] = { |
| 276 | [MIPS_EXC_INT_TIMER] = C_IRQ5, |
| 277 | [MIPS_EXC_INT_IO] = C_IRQ0, |
| 278 | [MIPS_EXC_INT_IPI_1] = C_IRQ1, |
| 279 | [MIPS_EXC_INT_IPI_2] = C_IRQ2, |
| 280 | }; |
| 281 | |
| 282 | static int kvm_vz_irq_deliver_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
| 283 | u32 cause) |
| 284 | { |
| 285 | u32 irq = (priority < MIPS_EXC_MAX) ? |
| 286 | kvm_vz_priority_to_irq[priority] : 0; |
| 287 | |
| 288 | switch (priority) { |
| 289 | case MIPS_EXC_INT_TIMER: |
| 290 | set_gc0_cause(C_TI); |
| 291 | break; |
| 292 | |
| 293 | case MIPS_EXC_INT_IO: |
| 294 | case MIPS_EXC_INT_IPI_1: |
| 295 | case MIPS_EXC_INT_IPI_2: |
| 296 | if (cpu_has_guestctl2) |
| 297 | set_c0_guestctl2(irq); |
| 298 | else |
| 299 | set_gc0_cause(irq); |
| 300 | break; |
| 301 | |
| 302 | default: |
| 303 | break; |
| 304 | } |
| 305 | |
| 306 | clear_bit(priority, &vcpu->arch.pending_exceptions); |
| 307 | return 1; |
| 308 | } |
| 309 | |
| 310 | static int kvm_vz_irq_clear_cb(struct kvm_vcpu *vcpu, unsigned int priority, |
| 311 | u32 cause) |
| 312 | { |
| 313 | u32 irq = (priority < MIPS_EXC_MAX) ? |
| 314 | kvm_vz_priority_to_irq[priority] : 0; |
| 315 | |
| 316 | switch (priority) { |
| 317 | case MIPS_EXC_INT_TIMER: |
| 318 | /* |
| 319 | * Call to kvm_write_c0_guest_compare() clears Cause.TI in |
| 320 | * kvm_mips_emulate_CP0(). Explicitly clear irq associated with |
| 321 | * Cause.IP[IPTI] if GuestCtl2 virtual interrupt register not |
| 322 | * supported or if not using GuestCtl2 Hardware Clear. |
| 323 | */ |
| 324 | if (cpu_has_guestctl2) { |
| 325 | if (!(read_c0_guestctl2() & (irq << 14))) |
| 326 | clear_c0_guestctl2(irq); |
| 327 | } else { |
| 328 | clear_gc0_cause(irq); |
| 329 | } |
| 330 | break; |
| 331 | |
| 332 | case MIPS_EXC_INT_IO: |
| 333 | case MIPS_EXC_INT_IPI_1: |
| 334 | case MIPS_EXC_INT_IPI_2: |
| 335 | /* Clear GuestCtl2.VIP irq if not using Hardware Clear */ |
| 336 | if (cpu_has_guestctl2) { |
| 337 | if (!(read_c0_guestctl2() & (irq << 14))) |
| 338 | clear_c0_guestctl2(irq); |
| 339 | } else { |
| 340 | clear_gc0_cause(irq); |
| 341 | } |
| 342 | break; |
| 343 | |
| 344 | default: |
| 345 | break; |
| 346 | } |
| 347 | |
| 348 | clear_bit(priority, &vcpu->arch.pending_exceptions_clr); |
| 349 | return 1; |
| 350 | } |
| 351 | |
| 352 | /* |
| 353 | * VZ guest timer handling. |
| 354 | */ |
| 355 | |
| 356 | /** |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 357 | * kvm_vz_should_use_htimer() - Find whether to use the VZ hard guest timer. |
| 358 | * @vcpu: Virtual CPU. |
| 359 | * |
| 360 | * Returns: true if the VZ GTOffset & real guest CP0_Count should be used |
| 361 | * instead of software emulation of guest timer. |
| 362 | * false otherwise. |
| 363 | */ |
| 364 | static bool kvm_vz_should_use_htimer(struct kvm_vcpu *vcpu) |
| 365 | { |
| 366 | if (kvm_mips_count_disabled(vcpu)) |
| 367 | return false; |
| 368 | |
| 369 | /* Chosen frequency must match real frequency */ |
| 370 | if (mips_hpt_frequency != vcpu->arch.count_hz) |
| 371 | return false; |
| 372 | |
| 373 | /* We don't support a CP0_GTOffset with fewer bits than CP0_Count */ |
| 374 | if (current_cpu_data.gtoffset_mask != 0xffffffff) |
| 375 | return false; |
| 376 | |
| 377 | return true; |
| 378 | } |
| 379 | |
| 380 | /** |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 381 | * _kvm_vz_restore_stimer() - Restore soft timer state. |
| 382 | * @vcpu: Virtual CPU. |
| 383 | * @compare: CP0_Compare register value, restored by caller. |
| 384 | * @cause: CP0_Cause register to restore. |
| 385 | * |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 386 | * Restore VZ state relating to the soft timer. The hard timer can be enabled |
| 387 | * later. |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 388 | */ |
| 389 | static void _kvm_vz_restore_stimer(struct kvm_vcpu *vcpu, u32 compare, |
| 390 | u32 cause) |
| 391 | { |
| 392 | /* |
| 393 | * Avoid spurious counter interrupts by setting Guest CP0_Count to just |
| 394 | * after Guest CP0_Compare. |
| 395 | */ |
| 396 | write_c0_gtoffset(compare - read_c0_count()); |
| 397 | |
| 398 | back_to_back_c0_hazard(); |
| 399 | write_gc0_cause(cause); |
| 400 | } |
| 401 | |
| 402 | /** |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 403 | * _kvm_vz_restore_htimer() - Restore hard timer state. |
| 404 | * @vcpu: Virtual CPU. |
| 405 | * @compare: CP0_Compare register value, restored by caller. |
| 406 | * @cause: CP0_Cause register to restore. |
| 407 | * |
| 408 | * Restore hard timer Guest.Count & Guest.Cause taking care to preserve the |
| 409 | * value of Guest.CP0_Cause.TI while restoring Guest.CP0_Cause. |
| 410 | */ |
| 411 | static void _kvm_vz_restore_htimer(struct kvm_vcpu *vcpu, |
| 412 | u32 compare, u32 cause) |
| 413 | { |
| 414 | u32 start_count, after_count; |
| 415 | ktime_t freeze_time; |
| 416 | unsigned long flags; |
| 417 | |
| 418 | /* |
| 419 | * Freeze the soft-timer and sync the guest CP0_Count with it. We do |
| 420 | * this with interrupts disabled to avoid latency. |
| 421 | */ |
| 422 | local_irq_save(flags); |
| 423 | freeze_time = kvm_mips_freeze_hrtimer(vcpu, &start_count); |
| 424 | write_c0_gtoffset(start_count - read_c0_count()); |
| 425 | local_irq_restore(flags); |
| 426 | |
| 427 | /* restore guest CP0_Cause, as TI may already be set */ |
| 428 | back_to_back_c0_hazard(); |
| 429 | write_gc0_cause(cause); |
| 430 | |
| 431 | /* |
| 432 | * The above sequence isn't atomic and would result in lost timer |
| 433 | * interrupts if we're not careful. Detect if a timer interrupt is due |
| 434 | * and assert it. |
| 435 | */ |
| 436 | back_to_back_c0_hazard(); |
| 437 | after_count = read_gc0_count(); |
| 438 | if (after_count - start_count > compare - start_count - 1) |
| 439 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); |
| 440 | } |
| 441 | |
| 442 | /** |
| 443 | * kvm_vz_restore_timer() - Restore timer state. |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 444 | * @vcpu: Virtual CPU. |
| 445 | * |
| 446 | * Restore soft timer state from saved context. |
| 447 | */ |
| 448 | static void kvm_vz_restore_timer(struct kvm_vcpu *vcpu) |
| 449 | { |
| 450 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 451 | u32 cause, compare; |
| 452 | |
| 453 | compare = kvm_read_sw_gc0_compare(cop0); |
| 454 | cause = kvm_read_sw_gc0_cause(cop0); |
| 455 | |
| 456 | write_gc0_compare(compare); |
| 457 | _kvm_vz_restore_stimer(vcpu, compare, cause); |
| 458 | } |
| 459 | |
| 460 | /** |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 461 | * kvm_vz_acquire_htimer() - Switch to hard timer state. |
| 462 | * @vcpu: Virtual CPU. |
| 463 | * |
| 464 | * Restore hard timer state on top of existing soft timer state if possible. |
| 465 | * |
| 466 | * Since hard timer won't remain active over preemption, preemption should be |
| 467 | * disabled by the caller. |
| 468 | */ |
| 469 | void kvm_vz_acquire_htimer(struct kvm_vcpu *vcpu) |
| 470 | { |
| 471 | u32 gctl0; |
| 472 | |
| 473 | gctl0 = read_c0_guestctl0(); |
| 474 | if (!(gctl0 & MIPS_GCTL0_GT) && kvm_vz_should_use_htimer(vcpu)) { |
| 475 | /* enable guest access to hard timer */ |
| 476 | write_c0_guestctl0(gctl0 | MIPS_GCTL0_GT); |
| 477 | |
| 478 | _kvm_vz_restore_htimer(vcpu, read_gc0_compare(), |
| 479 | read_gc0_cause()); |
| 480 | } |
| 481 | } |
| 482 | |
| 483 | /** |
| 484 | * _kvm_vz_save_htimer() - Switch to software emulation of guest timer. |
| 485 | * @vcpu: Virtual CPU. |
| 486 | * @compare: Pointer to write compare value to. |
| 487 | * @cause: Pointer to write cause value to. |
| 488 | * |
| 489 | * Save VZ guest timer state and switch to software emulation of guest CP0 |
| 490 | * timer. The hard timer must already be in use, so preemption should be |
| 491 | * disabled. |
| 492 | */ |
| 493 | static void _kvm_vz_save_htimer(struct kvm_vcpu *vcpu, |
| 494 | u32 *out_compare, u32 *out_cause) |
| 495 | { |
| 496 | u32 cause, compare, before_count, end_count; |
| 497 | ktime_t before_time; |
| 498 | |
| 499 | compare = read_gc0_compare(); |
| 500 | *out_compare = compare; |
| 501 | |
| 502 | before_time = ktime_get(); |
| 503 | |
| 504 | /* |
| 505 | * Record the CP0_Count *prior* to saving CP0_Cause, so we have a time |
| 506 | * at which no pending timer interrupt is missing. |
| 507 | */ |
| 508 | before_count = read_gc0_count(); |
| 509 | back_to_back_c0_hazard(); |
| 510 | cause = read_gc0_cause(); |
| 511 | *out_cause = cause; |
| 512 | |
| 513 | /* |
| 514 | * Record a final CP0_Count which we will transfer to the soft-timer. |
| 515 | * This is recorded *after* saving CP0_Cause, so we don't get any timer |
| 516 | * interrupts from just after the final CP0_Count point. |
| 517 | */ |
| 518 | back_to_back_c0_hazard(); |
| 519 | end_count = read_gc0_count(); |
| 520 | |
| 521 | /* |
| 522 | * The above sequence isn't atomic, so we could miss a timer interrupt |
| 523 | * between reading CP0_Cause and end_count. Detect and record any timer |
| 524 | * interrupt due between before_count and end_count. |
| 525 | */ |
| 526 | if (end_count - before_count > compare - before_count - 1) |
| 527 | kvm_vz_queue_irq(vcpu, MIPS_EXC_INT_TIMER); |
| 528 | |
| 529 | /* |
| 530 | * Restore soft-timer, ignoring a small amount of negative drift due to |
| 531 | * delay between freeze_hrtimer and setting CP0_GTOffset. |
| 532 | */ |
| 533 | kvm_mips_restore_hrtimer(vcpu, before_time, end_count, -0x10000); |
| 534 | } |
| 535 | |
| 536 | /** |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 537 | * kvm_vz_save_timer() - Save guest timer state. |
| 538 | * @vcpu: Virtual CPU. |
| 539 | * |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 540 | * Save VZ guest timer state and switch to soft guest timer if hard timer was in |
| 541 | * use. |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 542 | */ |
| 543 | static void kvm_vz_save_timer(struct kvm_vcpu *vcpu) |
| 544 | { |
| 545 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 546 | u32 gctl0, compare, cause; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 547 | |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 548 | gctl0 = read_c0_guestctl0(); |
| 549 | if (gctl0 & MIPS_GCTL0_GT) { |
| 550 | /* disable guest use of hard timer */ |
| 551 | write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); |
| 552 | |
| 553 | /* save hard timer state */ |
| 554 | _kvm_vz_save_htimer(vcpu, &compare, &cause); |
| 555 | } else { |
| 556 | compare = read_gc0_compare(); |
| 557 | cause = read_gc0_cause(); |
| 558 | } |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 559 | |
| 560 | /* save timer-related state to VCPU context */ |
| 561 | kvm_write_sw_gc0_cause(cop0, cause); |
| 562 | kvm_write_sw_gc0_compare(cop0, compare); |
| 563 | } |
| 564 | |
| 565 | /** |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 566 | * kvm_vz_lose_htimer() - Ensure hard guest timer is not in use. |
| 567 | * @vcpu: Virtual CPU. |
| 568 | * |
| 569 | * Transfers the state of the hard guest timer to the soft guest timer, leaving |
| 570 | * guest state intact so it can continue to be used with the soft timer. |
| 571 | */ |
| 572 | void kvm_vz_lose_htimer(struct kvm_vcpu *vcpu) |
| 573 | { |
| 574 | u32 gctl0, compare, cause; |
| 575 | |
| 576 | preempt_disable(); |
| 577 | gctl0 = read_c0_guestctl0(); |
| 578 | if (gctl0 & MIPS_GCTL0_GT) { |
| 579 | /* disable guest use of timer */ |
| 580 | write_c0_guestctl0(gctl0 & ~MIPS_GCTL0_GT); |
| 581 | |
| 582 | /* switch to soft timer */ |
| 583 | _kvm_vz_save_htimer(vcpu, &compare, &cause); |
| 584 | |
| 585 | /* leave soft timer in usable state */ |
| 586 | _kvm_vz_restore_stimer(vcpu, compare, cause); |
| 587 | } |
| 588 | preempt_enable(); |
| 589 | } |
| 590 | |
| 591 | /** |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 592 | * is_eva_access() - Find whether an instruction is an EVA memory accessor. |
| 593 | * @inst: 32-bit instruction encoding. |
| 594 | * |
| 595 | * Finds whether @inst encodes an EVA memory access instruction, which would |
| 596 | * indicate that emulation of it should access the user mode address space |
| 597 | * instead of the kernel mode address space. This matters for MUSUK segments |
| 598 | * which are TLB mapped for user mode but unmapped for kernel mode. |
| 599 | * |
| 600 | * Returns: Whether @inst encodes an EVA accessor instruction. |
| 601 | */ |
| 602 | static bool is_eva_access(union mips_instruction inst) |
| 603 | { |
| 604 | if (inst.spec3_format.opcode != spec3_op) |
| 605 | return false; |
| 606 | |
| 607 | switch (inst.spec3_format.func) { |
| 608 | case lwle_op: |
| 609 | case lwre_op: |
| 610 | case cachee_op: |
| 611 | case sbe_op: |
| 612 | case she_op: |
| 613 | case sce_op: |
| 614 | case swe_op: |
| 615 | case swle_op: |
| 616 | case swre_op: |
| 617 | case prefe_op: |
| 618 | case lbue_op: |
| 619 | case lhue_op: |
| 620 | case lbe_op: |
| 621 | case lhe_op: |
| 622 | case lle_op: |
| 623 | case lwe_op: |
| 624 | return true; |
| 625 | default: |
| 626 | return false; |
| 627 | } |
| 628 | } |
| 629 | |
| 630 | /** |
| 631 | * is_eva_am_mapped() - Find whether an access mode is mapped. |
| 632 | * @vcpu: KVM VCPU state. |
| 633 | * @am: 3-bit encoded access mode. |
| 634 | * @eu: Segment becomes unmapped and uncached when Status.ERL=1. |
| 635 | * |
| 636 | * Decode @am to find whether it encodes a mapped segment for the current VCPU |
| 637 | * state. Where necessary @eu and the actual instruction causing the fault are |
| 638 | * taken into account to make the decision. |
| 639 | * |
| 640 | * Returns: Whether the VCPU faulted on a TLB mapped address. |
| 641 | */ |
| 642 | static bool is_eva_am_mapped(struct kvm_vcpu *vcpu, unsigned int am, bool eu) |
| 643 | { |
| 644 | u32 am_lookup; |
| 645 | int err; |
| 646 | |
| 647 | /* |
| 648 | * Interpret access control mode. We assume address errors will already |
| 649 | * have been caught by the guest, leaving us with: |
| 650 | * AM UM SM KM 31..24 23..16 |
| 651 | * UK 0 000 Unm 0 0 |
| 652 | * MK 1 001 TLB 1 |
| 653 | * MSK 2 010 TLB TLB 1 |
| 654 | * MUSK 3 011 TLB TLB TLB 1 |
| 655 | * MUSUK 4 100 TLB TLB Unm 0 1 |
| 656 | * USK 5 101 Unm Unm 0 0 |
| 657 | * - 6 110 0 0 |
| 658 | * UUSK 7 111 Unm Unm Unm 0 0 |
| 659 | * |
| 660 | * We shift a magic value by AM across the sign bit to find if always |
| 661 | * TLB mapped, and if not shift by 8 again to find if it depends on KM. |
| 662 | */ |
| 663 | am_lookup = 0x70080000 << am; |
| 664 | if ((s32)am_lookup < 0) { |
| 665 | /* |
| 666 | * MK, MSK, MUSK |
| 667 | * Always TLB mapped, unless SegCtl.EU && ERL |
| 668 | */ |
| 669 | if (!eu || !(read_gc0_status() & ST0_ERL)) |
| 670 | return true; |
| 671 | } else { |
| 672 | am_lookup <<= 8; |
| 673 | if ((s32)am_lookup < 0) { |
| 674 | union mips_instruction inst; |
| 675 | unsigned int status; |
| 676 | u32 *opc; |
| 677 | |
| 678 | /* |
| 679 | * MUSUK |
| 680 | * TLB mapped if not in kernel mode |
| 681 | */ |
| 682 | status = read_gc0_status(); |
| 683 | if (!(status & (ST0_EXL | ST0_ERL)) && |
| 684 | (status & ST0_KSU)) |
| 685 | return true; |
| 686 | /* |
| 687 | * EVA access instructions in kernel |
| 688 | * mode access user address space. |
| 689 | */ |
| 690 | opc = (u32 *)vcpu->arch.pc; |
| 691 | if (vcpu->arch.host_cp0_cause & CAUSEF_BD) |
| 692 | opc += 1; |
| 693 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 694 | if (!err && is_eva_access(inst)) |
| 695 | return true; |
| 696 | } |
| 697 | } |
| 698 | |
| 699 | return false; |
| 700 | } |
| 701 | |
| 702 | /** |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 703 | * kvm_vz_gva_to_gpa() - Convert valid GVA to GPA. |
| 704 | * @vcpu: KVM VCPU state. |
| 705 | * @gva: Guest virtual address to convert. |
| 706 | * @gpa: Output guest physical address. |
| 707 | * |
| 708 | * Convert a guest virtual address (GVA) which is valid according to the guest |
| 709 | * context, to a guest physical address (GPA). |
| 710 | * |
| 711 | * Returns: 0 on success. |
| 712 | * -errno on failure. |
| 713 | */ |
| 714 | static int kvm_vz_gva_to_gpa(struct kvm_vcpu *vcpu, unsigned long gva, |
| 715 | unsigned long *gpa) |
| 716 | { |
| 717 | u32 gva32 = gva; |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 718 | unsigned long segctl; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 719 | |
| 720 | if ((long)gva == (s32)gva32) { |
| 721 | /* Handle canonical 32-bit virtual address */ |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 722 | if (cpu_guest_has_segments) { |
| 723 | unsigned long mask, pa; |
| 724 | |
| 725 | switch (gva32 >> 29) { |
| 726 | case 0: |
| 727 | case 1: /* CFG5 (1GB) */ |
| 728 | segctl = read_gc0_segctl2() >> 16; |
| 729 | mask = (unsigned long)0xfc0000000ull; |
| 730 | break; |
| 731 | case 2: |
| 732 | case 3: /* CFG4 (1GB) */ |
| 733 | segctl = read_gc0_segctl2(); |
| 734 | mask = (unsigned long)0xfc0000000ull; |
| 735 | break; |
| 736 | case 4: /* CFG3 (512MB) */ |
| 737 | segctl = read_gc0_segctl1() >> 16; |
| 738 | mask = (unsigned long)0xfe0000000ull; |
| 739 | break; |
| 740 | case 5: /* CFG2 (512MB) */ |
| 741 | segctl = read_gc0_segctl1(); |
| 742 | mask = (unsigned long)0xfe0000000ull; |
| 743 | break; |
| 744 | case 6: /* CFG1 (512MB) */ |
| 745 | segctl = read_gc0_segctl0() >> 16; |
| 746 | mask = (unsigned long)0xfe0000000ull; |
| 747 | break; |
| 748 | case 7: /* CFG0 (512MB) */ |
| 749 | segctl = read_gc0_segctl0(); |
| 750 | mask = (unsigned long)0xfe0000000ull; |
| 751 | break; |
| 752 | default: |
| 753 | /* |
| 754 | * GCC 4.9 isn't smart enough to figure out that |
| 755 | * segctl and mask are always initialised. |
| 756 | */ |
| 757 | unreachable(); |
| 758 | } |
| 759 | |
| 760 | if (is_eva_am_mapped(vcpu, (segctl >> 4) & 0x7, |
| 761 | segctl & 0x0008)) |
| 762 | goto tlb_mapped; |
| 763 | |
| 764 | /* Unmapped, find guest physical address */ |
| 765 | pa = (segctl << 20) & mask; |
| 766 | pa |= gva32 & ~mask; |
| 767 | *gpa = pa; |
| 768 | return 0; |
| 769 | } else if ((s32)gva32 < (s32)0xc0000000) { |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 770 | /* legacy unmapped KSeg0 or KSeg1 */ |
| 771 | *gpa = gva32 & 0x1fffffff; |
| 772 | return 0; |
| 773 | } |
| 774 | #ifdef CONFIG_64BIT |
| 775 | } else if ((gva & 0xc000000000000000) == 0x8000000000000000) { |
| 776 | /* XKPHYS */ |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 777 | if (cpu_guest_has_segments) { |
| 778 | /* |
| 779 | * Each of the 8 regions can be overridden by SegCtl2.XR |
| 780 | * to use SegCtl1.XAM. |
| 781 | */ |
| 782 | segctl = read_gc0_segctl2(); |
| 783 | if (segctl & (1ull << (56 + ((gva >> 59) & 0x7)))) { |
| 784 | segctl = read_gc0_segctl1(); |
| 785 | if (is_eva_am_mapped(vcpu, (segctl >> 59) & 0x7, |
| 786 | 0)) |
| 787 | goto tlb_mapped; |
| 788 | } |
| 789 | |
| 790 | } |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 791 | /* |
| 792 | * Traditionally fully unmapped. |
| 793 | * Bits 61:59 specify the CCA, which we can just mask off here. |
| 794 | * Bits 58:PABITS should be zero, but we shouldn't have got here |
| 795 | * if it wasn't. |
| 796 | */ |
| 797 | *gpa = gva & 0x07ffffffffffffff; |
| 798 | return 0; |
| 799 | #endif |
| 800 | } |
| 801 | |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 802 | tlb_mapped: |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 803 | return kvm_vz_guest_tlb_lookup(vcpu, gva, gpa); |
| 804 | } |
| 805 | |
| 806 | /** |
| 807 | * kvm_vz_badvaddr_to_gpa() - Convert GVA BadVAddr from root exception to GPA. |
| 808 | * @vcpu: KVM VCPU state. |
| 809 | * @badvaddr: Root BadVAddr. |
| 810 | * @gpa: Output guest physical address. |
| 811 | * |
| 812 | * VZ implementations are permitted to report guest virtual addresses (GVA) in |
| 813 | * BadVAddr on a root exception during guest execution, instead of the more |
| 814 | * convenient guest physical addresses (GPA). When we get a GVA, this function |
| 815 | * converts it to a GPA, taking into account guest segmentation and guest TLB |
| 816 | * state. |
| 817 | * |
| 818 | * Returns: 0 on success. |
| 819 | * -errno on failure. |
| 820 | */ |
| 821 | static int kvm_vz_badvaddr_to_gpa(struct kvm_vcpu *vcpu, unsigned long badvaddr, |
| 822 | unsigned long *gpa) |
| 823 | { |
| 824 | unsigned int gexccode = (vcpu->arch.host_cp0_guestctl0 & |
| 825 | MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; |
| 826 | |
| 827 | /* If BadVAddr is GPA, then all is well in the world */ |
| 828 | if (likely(gexccode == MIPS_GCTL0_GEXC_GPA)) { |
| 829 | *gpa = badvaddr; |
| 830 | return 0; |
| 831 | } |
| 832 | |
| 833 | /* Otherwise we'd expect it to be GVA ... */ |
| 834 | if (WARN(gexccode != MIPS_GCTL0_GEXC_GVA, |
| 835 | "Unexpected gexccode %#x\n", gexccode)) |
| 836 | return -EINVAL; |
| 837 | |
| 838 | /* ... and we need to perform the GVA->GPA translation in software */ |
| 839 | return kvm_vz_gva_to_gpa(vcpu, badvaddr, gpa); |
| 840 | } |
| 841 | |
| 842 | static int kvm_trap_vz_no_handler(struct kvm_vcpu *vcpu) |
| 843 | { |
| 844 | u32 *opc = (u32 *) vcpu->arch.pc; |
| 845 | u32 cause = vcpu->arch.host_cp0_cause; |
| 846 | u32 exccode = (cause & CAUSEF_EXCCODE) >> CAUSEB_EXCCODE; |
| 847 | unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr; |
| 848 | u32 inst = 0; |
| 849 | |
| 850 | /* |
| 851 | * Fetch the instruction. |
| 852 | */ |
| 853 | if (cause & CAUSEF_BD) |
| 854 | opc += 1; |
| 855 | kvm_get_badinstr(opc, vcpu, &inst); |
| 856 | |
| 857 | kvm_err("Exception Code: %d not handled @ PC: %p, inst: 0x%08x BadVaddr: %#lx Status: %#x\n", |
| 858 | exccode, opc, inst, badvaddr, |
| 859 | read_gc0_status()); |
| 860 | kvm_arch_vcpu_dump_regs(vcpu); |
| 861 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 862 | return RESUME_HOST; |
| 863 | } |
| 864 | |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 865 | static unsigned long mips_process_maar(unsigned int op, unsigned long val) |
| 866 | { |
| 867 | /* Mask off unused bits */ |
| 868 | unsigned long mask = 0xfffff000 | MIPS_MAAR_S | MIPS_MAAR_VL; |
| 869 | |
| 870 | if (read_gc0_pagegrain() & PG_ELPA) |
| 871 | mask |= 0x00ffffff00000000ull; |
| 872 | if (cpu_guest_has_mvh) |
| 873 | mask |= MIPS_MAAR_VH; |
| 874 | |
| 875 | /* Set or clear VH */ |
| 876 | if (op == mtc_op) { |
| 877 | /* clear VH */ |
| 878 | val &= ~MIPS_MAAR_VH; |
| 879 | } else if (op == dmtc_op) { |
| 880 | /* set VH to match VL */ |
| 881 | val &= ~MIPS_MAAR_VH; |
| 882 | if (val & MIPS_MAAR_VL) |
| 883 | val |= MIPS_MAAR_VH; |
| 884 | } |
| 885 | |
| 886 | return val & mask; |
| 887 | } |
| 888 | |
| 889 | static void kvm_write_maari(struct kvm_vcpu *vcpu, unsigned long val) |
| 890 | { |
| 891 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 892 | |
| 893 | val &= MIPS_MAARI_INDEX; |
| 894 | if (val == MIPS_MAARI_INDEX) |
| 895 | kvm_write_sw_gc0_maari(cop0, ARRAY_SIZE(vcpu->arch.maar) - 1); |
| 896 | else if (val < ARRAY_SIZE(vcpu->arch.maar)) |
| 897 | kvm_write_sw_gc0_maari(cop0, val); |
| 898 | } |
| 899 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 900 | static enum emulation_result kvm_vz_gpsi_cop0(union mips_instruction inst, |
| 901 | u32 *opc, u32 cause, |
| 902 | struct kvm_run *run, |
| 903 | struct kvm_vcpu *vcpu) |
| 904 | { |
| 905 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 906 | enum emulation_result er = EMULATE_DONE; |
| 907 | u32 rt, rd, sel; |
| 908 | unsigned long curr_pc; |
| 909 | unsigned long val; |
| 910 | |
| 911 | /* |
| 912 | * Update PC and hold onto current PC in case there is |
| 913 | * an error and we want to rollback the PC |
| 914 | */ |
| 915 | curr_pc = vcpu->arch.pc; |
| 916 | er = update_pc(vcpu, cause); |
| 917 | if (er == EMULATE_FAIL) |
| 918 | return er; |
| 919 | |
| 920 | if (inst.co_format.co) { |
| 921 | switch (inst.co_format.func) { |
| 922 | case wait_op: |
| 923 | er = kvm_mips_emul_wait(vcpu); |
| 924 | break; |
| 925 | default: |
| 926 | er = EMULATE_FAIL; |
| 927 | } |
| 928 | } else { |
| 929 | rt = inst.c0r_format.rt; |
| 930 | rd = inst.c0r_format.rd; |
| 931 | sel = inst.c0r_format.sel; |
| 932 | |
| 933 | switch (inst.c0r_format.rs) { |
| 934 | case dmfc_op: |
| 935 | case mfc_op: |
| 936 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
| 937 | cop0->stat[rd][sel]++; |
| 938 | #endif |
| 939 | if (rd == MIPS_CP0_COUNT && |
| 940 | sel == 0) { /* Count */ |
| 941 | val = kvm_mips_read_count(vcpu); |
| 942 | } else if (rd == MIPS_CP0_COMPARE && |
| 943 | sel == 0) { /* Compare */ |
| 944 | val = read_gc0_compare(); |
James Hogan | 273819a6 | 2017-03-14 10:15:37 +0000 | [diff] [blame] | 945 | } else if (rd == MIPS_CP0_LLADDR && |
| 946 | sel == 0) { /* LLAddr */ |
| 947 | if (cpu_guest_has_rw_llb) |
| 948 | val = read_gc0_lladdr() & |
| 949 | MIPS_LLADDR_LLB; |
| 950 | else |
| 951 | val = 0; |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 952 | } else if (rd == MIPS_CP0_LLADDR && |
| 953 | sel == 1 && /* MAAR */ |
| 954 | cpu_guest_has_maar && |
| 955 | !cpu_guest_has_dyn_maar) { |
| 956 | /* MAARI must be in range */ |
| 957 | BUG_ON(kvm_read_sw_gc0_maari(cop0) >= |
| 958 | ARRAY_SIZE(vcpu->arch.maar)); |
| 959 | val = vcpu->arch.maar[ |
| 960 | kvm_read_sw_gc0_maari(cop0)]; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 961 | } else if ((rd == MIPS_CP0_PRID && |
| 962 | (sel == 0 || /* PRid */ |
| 963 | sel == 2 || /* CDMMBase */ |
| 964 | sel == 3)) || /* CMGCRBase */ |
| 965 | (rd == MIPS_CP0_STATUS && |
| 966 | (sel == 2 || /* SRSCtl */ |
| 967 | sel == 3)) || /* SRSMap */ |
| 968 | (rd == MIPS_CP0_CONFIG && |
| 969 | (sel == 7)) || /* Config7 */ |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 970 | (rd == MIPS_CP0_LLADDR && |
| 971 | (sel == 2) && /* MAARI */ |
| 972 | cpu_guest_has_maar && |
| 973 | !cpu_guest_has_dyn_maar) || |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 974 | (rd == MIPS_CP0_ERRCTL && |
| 975 | (sel == 0))) { /* ErrCtl */ |
| 976 | val = cop0->reg[rd][sel]; |
| 977 | } else { |
| 978 | val = 0; |
| 979 | er = EMULATE_FAIL; |
| 980 | } |
| 981 | |
| 982 | if (er != EMULATE_FAIL) { |
| 983 | /* Sign extend */ |
| 984 | if (inst.c0r_format.rs == mfc_op) |
| 985 | val = (int)val; |
| 986 | vcpu->arch.gprs[rt] = val; |
| 987 | } |
| 988 | |
| 989 | trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mfc_op) ? |
| 990 | KVM_TRACE_MFC0 : KVM_TRACE_DMFC0, |
| 991 | KVM_TRACE_COP0(rd, sel), val); |
| 992 | break; |
| 993 | |
| 994 | case dmtc_op: |
| 995 | case mtc_op: |
| 996 | #ifdef CONFIG_KVM_MIPS_DEBUG_COP0_COUNTERS |
| 997 | cop0->stat[rd][sel]++; |
| 998 | #endif |
| 999 | val = vcpu->arch.gprs[rt]; |
| 1000 | trace_kvm_hwr(vcpu, (inst.c0r_format.rs == mtc_op) ? |
| 1001 | KVM_TRACE_MTC0 : KVM_TRACE_DMTC0, |
| 1002 | KVM_TRACE_COP0(rd, sel), val); |
| 1003 | |
| 1004 | if (rd == MIPS_CP0_COUNT && |
| 1005 | sel == 0) { /* Count */ |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 1006 | kvm_vz_lose_htimer(vcpu); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1007 | kvm_mips_write_count(vcpu, vcpu->arch.gprs[rt]); |
| 1008 | } else if (rd == MIPS_CP0_COMPARE && |
| 1009 | sel == 0) { /* Compare */ |
| 1010 | kvm_mips_write_compare(vcpu, |
| 1011 | vcpu->arch.gprs[rt], |
| 1012 | true); |
James Hogan | 273819a6 | 2017-03-14 10:15:37 +0000 | [diff] [blame] | 1013 | } else if (rd == MIPS_CP0_LLADDR && |
| 1014 | sel == 0) { /* LLAddr */ |
| 1015 | /* |
| 1016 | * P5600 generates GPSI on guest MTC0 LLAddr. |
| 1017 | * Only allow the guest to clear LLB. |
| 1018 | */ |
| 1019 | if (cpu_guest_has_rw_llb && |
| 1020 | !(val & MIPS_LLADDR_LLB)) |
| 1021 | write_gc0_lladdr(0); |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 1022 | } else if (rd == MIPS_CP0_LLADDR && |
| 1023 | sel == 1 && /* MAAR */ |
| 1024 | cpu_guest_has_maar && |
| 1025 | !cpu_guest_has_dyn_maar) { |
| 1026 | val = mips_process_maar(inst.c0r_format.rs, |
| 1027 | val); |
| 1028 | |
| 1029 | /* MAARI must be in range */ |
| 1030 | BUG_ON(kvm_read_sw_gc0_maari(cop0) >= |
| 1031 | ARRAY_SIZE(vcpu->arch.maar)); |
| 1032 | vcpu->arch.maar[kvm_read_sw_gc0_maari(cop0)] = |
| 1033 | val; |
| 1034 | } else if (rd == MIPS_CP0_LLADDR && |
| 1035 | (sel == 2) && /* MAARI */ |
| 1036 | cpu_guest_has_maar && |
| 1037 | !cpu_guest_has_dyn_maar) { |
| 1038 | kvm_write_maari(vcpu, val); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1039 | } else if (rd == MIPS_CP0_ERRCTL && |
| 1040 | (sel == 0)) { /* ErrCtl */ |
| 1041 | /* ignore the written value */ |
| 1042 | } else { |
| 1043 | er = EMULATE_FAIL; |
| 1044 | } |
| 1045 | break; |
| 1046 | |
| 1047 | default: |
| 1048 | er = EMULATE_FAIL; |
| 1049 | break; |
| 1050 | } |
| 1051 | } |
| 1052 | /* Rollback PC only if emulation was unsuccessful */ |
| 1053 | if (er == EMULATE_FAIL) { |
| 1054 | kvm_err("[%#lx]%s: unsupported cop0 instruction 0x%08x\n", |
| 1055 | curr_pc, __func__, inst.word); |
| 1056 | |
| 1057 | vcpu->arch.pc = curr_pc; |
| 1058 | } |
| 1059 | |
| 1060 | return er; |
| 1061 | } |
| 1062 | |
| 1063 | static enum emulation_result kvm_vz_gpsi_cache(union mips_instruction inst, |
| 1064 | u32 *opc, u32 cause, |
| 1065 | struct kvm_run *run, |
| 1066 | struct kvm_vcpu *vcpu) |
| 1067 | { |
| 1068 | enum emulation_result er = EMULATE_DONE; |
| 1069 | u32 cache, op_inst, op, base; |
| 1070 | s16 offset; |
| 1071 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1072 | unsigned long va, curr_pc; |
| 1073 | |
| 1074 | /* |
| 1075 | * Update PC and hold onto current PC in case there is |
| 1076 | * an error and we want to rollback the PC |
| 1077 | */ |
| 1078 | curr_pc = vcpu->arch.pc; |
| 1079 | er = update_pc(vcpu, cause); |
| 1080 | if (er == EMULATE_FAIL) |
| 1081 | return er; |
| 1082 | |
| 1083 | base = inst.i_format.rs; |
| 1084 | op_inst = inst.i_format.rt; |
| 1085 | if (cpu_has_mips_r6) |
| 1086 | offset = inst.spec3_format.simmediate; |
| 1087 | else |
| 1088 | offset = inst.i_format.simmediate; |
| 1089 | cache = op_inst & CacheOp_Cache; |
| 1090 | op = op_inst & CacheOp_Op; |
| 1091 | |
| 1092 | va = arch->gprs[base] + offset; |
| 1093 | |
| 1094 | kvm_debug("CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
| 1095 | cache, op, base, arch->gprs[base], offset); |
| 1096 | |
| 1097 | /* Secondary or tirtiary cache ops ignored */ |
| 1098 | if (cache != Cache_I && cache != Cache_D) |
| 1099 | return EMULATE_DONE; |
| 1100 | |
| 1101 | switch (op_inst) { |
| 1102 | case Index_Invalidate_I: |
| 1103 | flush_icache_line_indexed(va); |
| 1104 | return EMULATE_DONE; |
| 1105 | case Index_Writeback_Inv_D: |
| 1106 | flush_dcache_line_indexed(va); |
| 1107 | return EMULATE_DONE; |
James Hogan | 3ba731d | 2017-03-14 10:25:49 +0000 | [diff] [blame] | 1108 | case Hit_Invalidate_I: |
| 1109 | case Hit_Invalidate_D: |
| 1110 | case Hit_Writeback_Inv_D: |
| 1111 | if (boot_cpu_type() == CPU_CAVIUM_OCTEON3) { |
| 1112 | /* We can just flush entire icache */ |
| 1113 | local_flush_icache_range(0, 0); |
| 1114 | return EMULATE_DONE; |
| 1115 | } |
| 1116 | |
| 1117 | /* So far, other platforms support guest hit cache ops */ |
| 1118 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1119 | default: |
| 1120 | break; |
| 1121 | }; |
| 1122 | |
| 1123 | kvm_err("@ %#lx/%#lx CACHE (cache: %#x, op: %#x, base[%d]: %#lx, offset: %#x\n", |
| 1124 | curr_pc, vcpu->arch.gprs[31], cache, op, base, arch->gprs[base], |
| 1125 | offset); |
| 1126 | /* Rollback PC */ |
| 1127 | vcpu->arch.pc = curr_pc; |
| 1128 | |
| 1129 | return EMULATE_FAIL; |
| 1130 | } |
| 1131 | |
| 1132 | static enum emulation_result kvm_trap_vz_handle_gpsi(u32 cause, u32 *opc, |
| 1133 | struct kvm_vcpu *vcpu) |
| 1134 | { |
| 1135 | enum emulation_result er = EMULATE_DONE; |
| 1136 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1137 | struct kvm_run *run = vcpu->run; |
| 1138 | union mips_instruction inst; |
| 1139 | int rd, rt, sel; |
| 1140 | int err; |
| 1141 | |
| 1142 | /* |
| 1143 | * Fetch the instruction. |
| 1144 | */ |
| 1145 | if (cause & CAUSEF_BD) |
| 1146 | opc += 1; |
| 1147 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 1148 | if (err) |
| 1149 | return EMULATE_FAIL; |
| 1150 | |
| 1151 | switch (inst.r_format.opcode) { |
| 1152 | case cop0_op: |
| 1153 | er = kvm_vz_gpsi_cop0(inst, opc, cause, run, vcpu); |
| 1154 | break; |
| 1155 | #ifndef CONFIG_CPU_MIPSR6 |
| 1156 | case cache_op: |
| 1157 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); |
| 1158 | er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); |
| 1159 | break; |
| 1160 | #endif |
| 1161 | case spec3_op: |
| 1162 | switch (inst.spec3_format.func) { |
| 1163 | #ifdef CONFIG_CPU_MIPSR6 |
| 1164 | case cache6_op: |
| 1165 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_CACHE); |
| 1166 | er = kvm_vz_gpsi_cache(inst, opc, cause, run, vcpu); |
| 1167 | break; |
| 1168 | #endif |
| 1169 | case rdhwr_op: |
| 1170 | if (inst.r_format.rs || (inst.r_format.re >> 3)) |
| 1171 | goto unknown; |
| 1172 | |
| 1173 | rd = inst.r_format.rd; |
| 1174 | rt = inst.r_format.rt; |
| 1175 | sel = inst.r_format.re & 0x7; |
| 1176 | |
| 1177 | switch (rd) { |
| 1178 | case MIPS_HWR_CC: /* Read count register */ |
| 1179 | arch->gprs[rt] = |
| 1180 | (long)(int)kvm_mips_read_count(vcpu); |
| 1181 | break; |
| 1182 | default: |
| 1183 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, |
| 1184 | KVM_TRACE_HWR(rd, sel), 0); |
| 1185 | goto unknown; |
| 1186 | }; |
| 1187 | |
| 1188 | trace_kvm_hwr(vcpu, KVM_TRACE_RDHWR, |
| 1189 | KVM_TRACE_HWR(rd, sel), arch->gprs[rt]); |
| 1190 | |
| 1191 | er = update_pc(vcpu, cause); |
| 1192 | break; |
| 1193 | default: |
| 1194 | goto unknown; |
| 1195 | }; |
| 1196 | break; |
| 1197 | unknown: |
| 1198 | |
| 1199 | default: |
| 1200 | kvm_err("GPSI exception not supported (%p/%#x)\n", |
| 1201 | opc, inst.word); |
| 1202 | kvm_arch_vcpu_dump_regs(vcpu); |
| 1203 | er = EMULATE_FAIL; |
| 1204 | break; |
| 1205 | } |
| 1206 | |
| 1207 | return er; |
| 1208 | } |
| 1209 | |
| 1210 | static enum emulation_result kvm_trap_vz_handle_gsfc(u32 cause, u32 *opc, |
| 1211 | struct kvm_vcpu *vcpu) |
| 1212 | { |
| 1213 | enum emulation_result er = EMULATE_DONE; |
| 1214 | struct kvm_vcpu_arch *arch = &vcpu->arch; |
| 1215 | union mips_instruction inst; |
| 1216 | int err; |
| 1217 | |
| 1218 | /* |
| 1219 | * Fetch the instruction. |
| 1220 | */ |
| 1221 | if (cause & CAUSEF_BD) |
| 1222 | opc += 1; |
| 1223 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 1224 | if (err) |
| 1225 | return EMULATE_FAIL; |
| 1226 | |
| 1227 | /* complete MTC0 on behalf of guest and advance EPC */ |
| 1228 | if (inst.c0r_format.opcode == cop0_op && |
| 1229 | inst.c0r_format.rs == mtc_op && |
| 1230 | inst.c0r_format.z == 0) { |
| 1231 | int rt = inst.c0r_format.rt; |
| 1232 | int rd = inst.c0r_format.rd; |
| 1233 | int sel = inst.c0r_format.sel; |
| 1234 | unsigned int val = arch->gprs[rt]; |
| 1235 | unsigned int old_val, change; |
| 1236 | |
| 1237 | trace_kvm_hwr(vcpu, KVM_TRACE_MTC0, KVM_TRACE_COP0(rd, sel), |
| 1238 | val); |
| 1239 | |
| 1240 | if ((rd == MIPS_CP0_STATUS) && (sel == 0)) { |
| 1241 | /* FR bit should read as zero if no FPU */ |
| 1242 | if (!kvm_mips_guest_has_fpu(&vcpu->arch)) |
| 1243 | val &= ~(ST0_CU1 | ST0_FR); |
| 1244 | |
| 1245 | /* |
| 1246 | * Also don't allow FR to be set if host doesn't support |
| 1247 | * it. |
| 1248 | */ |
| 1249 | if (!(boot_cpu_data.fpu_id & MIPS_FPIR_F64)) |
| 1250 | val &= ~ST0_FR; |
| 1251 | |
| 1252 | old_val = read_gc0_status(); |
| 1253 | change = val ^ old_val; |
| 1254 | |
| 1255 | if (change & ST0_FR) { |
| 1256 | /* |
| 1257 | * FPU and Vector register state is made |
| 1258 | * UNPREDICTABLE by a change of FR, so don't |
| 1259 | * even bother saving it. |
| 1260 | */ |
| 1261 | kvm_drop_fpu(vcpu); |
| 1262 | } |
| 1263 | |
| 1264 | /* |
| 1265 | * If MSA state is already live, it is undefined how it |
| 1266 | * interacts with FR=0 FPU state, and we don't want to |
| 1267 | * hit reserved instruction exceptions trying to save |
| 1268 | * the MSA state later when CU=1 && FR=1, so play it |
| 1269 | * safe and save it first. |
| 1270 | */ |
| 1271 | if (change & ST0_CU1 && !(val & ST0_FR) && |
| 1272 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) |
| 1273 | kvm_lose_fpu(vcpu); |
| 1274 | |
| 1275 | write_gc0_status(val); |
| 1276 | } else if ((rd == MIPS_CP0_CAUSE) && (sel == 0)) { |
| 1277 | u32 old_cause = read_gc0_cause(); |
| 1278 | u32 change = old_cause ^ val; |
| 1279 | |
| 1280 | /* DC bit enabling/disabling timer? */ |
| 1281 | if (change & CAUSEF_DC) { |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 1282 | if (val & CAUSEF_DC) { |
| 1283 | kvm_vz_lose_htimer(vcpu); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1284 | kvm_mips_count_disable_cause(vcpu); |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 1285 | } else { |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1286 | kvm_mips_count_enable_cause(vcpu); |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 1287 | } |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1288 | } |
| 1289 | |
| 1290 | /* Only certain bits are RW to the guest */ |
| 1291 | change &= (CAUSEF_DC | CAUSEF_IV | CAUSEF_WP | |
| 1292 | CAUSEF_IP0 | CAUSEF_IP1); |
| 1293 | |
| 1294 | /* WP can only be cleared */ |
| 1295 | change &= ~CAUSEF_WP | old_cause; |
| 1296 | |
| 1297 | write_gc0_cause(old_cause ^ change); |
| 1298 | } else if ((rd == MIPS_CP0_STATUS) && (sel == 1)) { /* IntCtl */ |
| 1299 | write_gc0_intctl(val); |
| 1300 | } else if ((rd == MIPS_CP0_CONFIG) && (sel == 5)) { |
| 1301 | old_val = read_gc0_config5(); |
| 1302 | change = val ^ old_val; |
| 1303 | /* Handle changes in FPU/MSA modes */ |
| 1304 | preempt_disable(); |
| 1305 | |
| 1306 | /* |
| 1307 | * Propagate FRE changes immediately if the FPU |
| 1308 | * context is already loaded. |
| 1309 | */ |
| 1310 | if (change & MIPS_CONF5_FRE && |
| 1311 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU) |
| 1312 | change_c0_config5(MIPS_CONF5_FRE, val); |
| 1313 | |
| 1314 | preempt_enable(); |
| 1315 | |
| 1316 | val = old_val ^ |
| 1317 | (change & kvm_vz_config5_guest_wrmask(vcpu)); |
| 1318 | write_gc0_config5(val); |
| 1319 | } else { |
| 1320 | kvm_err("Handle GSFC, unsupported field change @ %p: %#x\n", |
| 1321 | opc, inst.word); |
| 1322 | er = EMULATE_FAIL; |
| 1323 | } |
| 1324 | |
| 1325 | if (er != EMULATE_FAIL) |
| 1326 | er = update_pc(vcpu, cause); |
| 1327 | } else { |
| 1328 | kvm_err("Handle GSFC, unrecognized instruction @ %p: %#x\n", |
| 1329 | opc, inst.word); |
| 1330 | er = EMULATE_FAIL; |
| 1331 | } |
| 1332 | |
| 1333 | return er; |
| 1334 | } |
| 1335 | |
James Hogan | edec9d7 | 2017-03-14 10:15:40 +0000 | [diff] [blame] | 1336 | static enum emulation_result kvm_trap_vz_handle_ghfc(u32 cause, u32 *opc, |
| 1337 | struct kvm_vcpu *vcpu) |
| 1338 | { |
| 1339 | /* |
| 1340 | * Presumably this is due to MC (guest mode change), so lets trace some |
| 1341 | * relevant info. |
| 1342 | */ |
| 1343 | trace_kvm_guest_mode_change(vcpu); |
| 1344 | |
| 1345 | return EMULATE_DONE; |
| 1346 | } |
| 1347 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1348 | static enum emulation_result kvm_trap_vz_handle_hc(u32 cause, u32 *opc, |
| 1349 | struct kvm_vcpu *vcpu) |
| 1350 | { |
| 1351 | enum emulation_result er; |
| 1352 | union mips_instruction inst; |
| 1353 | unsigned long curr_pc; |
| 1354 | int err; |
| 1355 | |
| 1356 | if (cause & CAUSEF_BD) |
| 1357 | opc += 1; |
| 1358 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 1359 | if (err) |
| 1360 | return EMULATE_FAIL; |
| 1361 | |
| 1362 | /* |
| 1363 | * Update PC and hold onto current PC in case there is |
| 1364 | * an error and we want to rollback the PC |
| 1365 | */ |
| 1366 | curr_pc = vcpu->arch.pc; |
| 1367 | er = update_pc(vcpu, cause); |
| 1368 | if (er == EMULATE_FAIL) |
| 1369 | return er; |
| 1370 | |
| 1371 | er = kvm_mips_emul_hypcall(vcpu, inst); |
| 1372 | if (er == EMULATE_FAIL) |
| 1373 | vcpu->arch.pc = curr_pc; |
| 1374 | |
| 1375 | return er; |
| 1376 | } |
| 1377 | |
| 1378 | static enum emulation_result kvm_trap_vz_no_handler_guest_exit(u32 gexccode, |
| 1379 | u32 cause, |
| 1380 | u32 *opc, |
| 1381 | struct kvm_vcpu *vcpu) |
| 1382 | { |
| 1383 | u32 inst; |
| 1384 | |
| 1385 | /* |
| 1386 | * Fetch the instruction. |
| 1387 | */ |
| 1388 | if (cause & CAUSEF_BD) |
| 1389 | opc += 1; |
| 1390 | kvm_get_badinstr(opc, vcpu, &inst); |
| 1391 | |
| 1392 | kvm_err("Guest Exception Code: %d not yet handled @ PC: %p, inst: 0x%08x Status: %#x\n", |
| 1393 | gexccode, opc, inst, read_gc0_status()); |
| 1394 | |
| 1395 | return EMULATE_FAIL; |
| 1396 | } |
| 1397 | |
| 1398 | static int kvm_trap_vz_handle_guest_exit(struct kvm_vcpu *vcpu) |
| 1399 | { |
| 1400 | u32 *opc = (u32 *) vcpu->arch.pc; |
| 1401 | u32 cause = vcpu->arch.host_cp0_cause; |
| 1402 | enum emulation_result er = EMULATE_DONE; |
| 1403 | u32 gexccode = (vcpu->arch.host_cp0_guestctl0 & |
| 1404 | MIPS_GCTL0_GEXC) >> MIPS_GCTL0_GEXC_SHIFT; |
| 1405 | int ret = RESUME_GUEST; |
| 1406 | |
| 1407 | trace_kvm_exit(vcpu, KVM_TRACE_EXIT_GEXCCODE_BASE + gexccode); |
| 1408 | switch (gexccode) { |
| 1409 | case MIPS_GCTL0_GEXC_GPSI: |
| 1410 | ++vcpu->stat.vz_gpsi_exits; |
| 1411 | er = kvm_trap_vz_handle_gpsi(cause, opc, vcpu); |
| 1412 | break; |
| 1413 | case MIPS_GCTL0_GEXC_GSFC: |
| 1414 | ++vcpu->stat.vz_gsfc_exits; |
| 1415 | er = kvm_trap_vz_handle_gsfc(cause, opc, vcpu); |
| 1416 | break; |
| 1417 | case MIPS_GCTL0_GEXC_HC: |
| 1418 | ++vcpu->stat.vz_hc_exits; |
| 1419 | er = kvm_trap_vz_handle_hc(cause, opc, vcpu); |
| 1420 | break; |
| 1421 | case MIPS_GCTL0_GEXC_GRR: |
| 1422 | ++vcpu->stat.vz_grr_exits; |
| 1423 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, |
| 1424 | vcpu); |
| 1425 | break; |
| 1426 | case MIPS_GCTL0_GEXC_GVA: |
| 1427 | ++vcpu->stat.vz_gva_exits; |
| 1428 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, |
| 1429 | vcpu); |
| 1430 | break; |
| 1431 | case MIPS_GCTL0_GEXC_GHFC: |
| 1432 | ++vcpu->stat.vz_ghfc_exits; |
James Hogan | edec9d7 | 2017-03-14 10:15:40 +0000 | [diff] [blame] | 1433 | er = kvm_trap_vz_handle_ghfc(cause, opc, vcpu); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1434 | break; |
| 1435 | case MIPS_GCTL0_GEXC_GPA: |
| 1436 | ++vcpu->stat.vz_gpa_exits; |
| 1437 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, |
| 1438 | vcpu); |
| 1439 | break; |
| 1440 | default: |
| 1441 | ++vcpu->stat.vz_resvd_exits; |
| 1442 | er = kvm_trap_vz_no_handler_guest_exit(gexccode, cause, opc, |
| 1443 | vcpu); |
| 1444 | break; |
| 1445 | |
| 1446 | } |
| 1447 | |
| 1448 | if (er == EMULATE_DONE) { |
| 1449 | ret = RESUME_GUEST; |
| 1450 | } else if (er == EMULATE_HYPERCALL) { |
| 1451 | ret = kvm_mips_handle_hypcall(vcpu); |
| 1452 | } else { |
| 1453 | vcpu->run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1454 | ret = RESUME_HOST; |
| 1455 | } |
| 1456 | return ret; |
| 1457 | } |
| 1458 | |
| 1459 | /** |
| 1460 | * kvm_trap_vz_handle_cop_unusuable() - Guest used unusable coprocessor. |
| 1461 | * @vcpu: Virtual CPU context. |
| 1462 | * |
| 1463 | * Handle when the guest attempts to use a coprocessor which hasn't been allowed |
| 1464 | * by the root context. |
| 1465 | */ |
| 1466 | static int kvm_trap_vz_handle_cop_unusable(struct kvm_vcpu *vcpu) |
| 1467 | { |
| 1468 | struct kvm_run *run = vcpu->run; |
| 1469 | u32 cause = vcpu->arch.host_cp0_cause; |
| 1470 | enum emulation_result er = EMULATE_FAIL; |
| 1471 | int ret = RESUME_GUEST; |
| 1472 | |
| 1473 | if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) { |
| 1474 | /* |
| 1475 | * If guest FPU not present, the FPU operation should have been |
| 1476 | * treated as a reserved instruction! |
| 1477 | * If FPU already in use, we shouldn't get this at all. |
| 1478 | */ |
| 1479 | if (WARN_ON(!kvm_mips_guest_has_fpu(&vcpu->arch) || |
| 1480 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_FPU)) { |
| 1481 | preempt_enable(); |
| 1482 | return EMULATE_FAIL; |
| 1483 | } |
| 1484 | |
| 1485 | kvm_own_fpu(vcpu); |
| 1486 | er = EMULATE_DONE; |
| 1487 | } |
| 1488 | /* other coprocessors not handled */ |
| 1489 | |
| 1490 | switch (er) { |
| 1491 | case EMULATE_DONE: |
| 1492 | ret = RESUME_GUEST; |
| 1493 | break; |
| 1494 | |
| 1495 | case EMULATE_FAIL: |
| 1496 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1497 | ret = RESUME_HOST; |
| 1498 | break; |
| 1499 | |
| 1500 | default: |
| 1501 | BUG(); |
| 1502 | } |
| 1503 | return ret; |
| 1504 | } |
| 1505 | |
| 1506 | /** |
| 1507 | * kvm_trap_vz_handle_msa_disabled() - Guest used MSA while disabled in root. |
| 1508 | * @vcpu: Virtual CPU context. |
| 1509 | * |
| 1510 | * Handle when the guest attempts to use MSA when it is disabled in the root |
| 1511 | * context. |
| 1512 | */ |
| 1513 | static int kvm_trap_vz_handle_msa_disabled(struct kvm_vcpu *vcpu) |
| 1514 | { |
| 1515 | struct kvm_run *run = vcpu->run; |
| 1516 | |
| 1517 | /* |
| 1518 | * If MSA not present or not exposed to guest or FR=0, the MSA operation |
| 1519 | * should have been treated as a reserved instruction! |
| 1520 | * Same if CU1=1, FR=0. |
| 1521 | * If MSA already in use, we shouldn't get this at all. |
| 1522 | */ |
| 1523 | if (!kvm_mips_guest_has_msa(&vcpu->arch) || |
| 1524 | (read_gc0_status() & (ST0_CU1 | ST0_FR)) == ST0_CU1 || |
| 1525 | !(read_gc0_config5() & MIPS_CONF5_MSAEN) || |
| 1526 | vcpu->arch.aux_inuse & KVM_MIPS_AUX_MSA) { |
| 1527 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1528 | return RESUME_HOST; |
| 1529 | } |
| 1530 | |
| 1531 | kvm_own_msa(vcpu); |
| 1532 | |
| 1533 | return RESUME_GUEST; |
| 1534 | } |
| 1535 | |
| 1536 | static int kvm_trap_vz_handle_tlb_ld_miss(struct kvm_vcpu *vcpu) |
| 1537 | { |
| 1538 | struct kvm_run *run = vcpu->run; |
| 1539 | u32 *opc = (u32 *) vcpu->arch.pc; |
| 1540 | u32 cause = vcpu->arch.host_cp0_cause; |
| 1541 | ulong badvaddr = vcpu->arch.host_cp0_badvaddr; |
| 1542 | union mips_instruction inst; |
| 1543 | enum emulation_result er = EMULATE_DONE; |
| 1544 | int err, ret = RESUME_GUEST; |
| 1545 | |
| 1546 | if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, false)) { |
| 1547 | /* A code fetch fault doesn't count as an MMIO */ |
| 1548 | if (kvm_is_ifetch_fault(&vcpu->arch)) { |
| 1549 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1550 | return RESUME_HOST; |
| 1551 | } |
| 1552 | |
| 1553 | /* Fetch the instruction */ |
| 1554 | if (cause & CAUSEF_BD) |
| 1555 | opc += 1; |
| 1556 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 1557 | if (err) { |
| 1558 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1559 | return RESUME_HOST; |
| 1560 | } |
| 1561 | |
| 1562 | /* Treat as MMIO */ |
| 1563 | er = kvm_mips_emulate_load(inst, cause, run, vcpu); |
| 1564 | if (er == EMULATE_FAIL) { |
| 1565 | kvm_err("Guest Emulate Load from MMIO space failed: PC: %p, BadVaddr: %#lx\n", |
| 1566 | opc, badvaddr); |
| 1567 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1568 | } |
| 1569 | } |
| 1570 | |
| 1571 | if (er == EMULATE_DONE) { |
| 1572 | ret = RESUME_GUEST; |
| 1573 | } else if (er == EMULATE_DO_MMIO) { |
| 1574 | run->exit_reason = KVM_EXIT_MMIO; |
| 1575 | ret = RESUME_HOST; |
| 1576 | } else { |
| 1577 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1578 | ret = RESUME_HOST; |
| 1579 | } |
| 1580 | return ret; |
| 1581 | } |
| 1582 | |
| 1583 | static int kvm_trap_vz_handle_tlb_st_miss(struct kvm_vcpu *vcpu) |
| 1584 | { |
| 1585 | struct kvm_run *run = vcpu->run; |
| 1586 | u32 *opc = (u32 *) vcpu->arch.pc; |
| 1587 | u32 cause = vcpu->arch.host_cp0_cause; |
| 1588 | ulong badvaddr = vcpu->arch.host_cp0_badvaddr; |
| 1589 | union mips_instruction inst; |
| 1590 | enum emulation_result er = EMULATE_DONE; |
| 1591 | int err; |
| 1592 | int ret = RESUME_GUEST; |
| 1593 | |
| 1594 | /* Just try the access again if we couldn't do the translation */ |
| 1595 | if (kvm_vz_badvaddr_to_gpa(vcpu, badvaddr, &badvaddr)) |
| 1596 | return RESUME_GUEST; |
| 1597 | vcpu->arch.host_cp0_badvaddr = badvaddr; |
| 1598 | |
| 1599 | if (kvm_mips_handle_vz_root_tlb_fault(badvaddr, vcpu, true)) { |
| 1600 | /* Fetch the instruction */ |
| 1601 | if (cause & CAUSEF_BD) |
| 1602 | opc += 1; |
| 1603 | err = kvm_get_badinstr(opc, vcpu, &inst.word); |
| 1604 | if (err) { |
| 1605 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1606 | return RESUME_HOST; |
| 1607 | } |
| 1608 | |
| 1609 | /* Treat as MMIO */ |
| 1610 | er = kvm_mips_emulate_store(inst, cause, run, vcpu); |
| 1611 | if (er == EMULATE_FAIL) { |
| 1612 | kvm_err("Guest Emulate Store to MMIO space failed: PC: %p, BadVaddr: %#lx\n", |
| 1613 | opc, badvaddr); |
| 1614 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1615 | } |
| 1616 | } |
| 1617 | |
| 1618 | if (er == EMULATE_DONE) { |
| 1619 | ret = RESUME_GUEST; |
| 1620 | } else if (er == EMULATE_DO_MMIO) { |
| 1621 | run->exit_reason = KVM_EXIT_MMIO; |
| 1622 | ret = RESUME_HOST; |
| 1623 | } else { |
| 1624 | run->exit_reason = KVM_EXIT_INTERNAL_ERROR; |
| 1625 | ret = RESUME_HOST; |
| 1626 | } |
| 1627 | return ret; |
| 1628 | } |
| 1629 | |
| 1630 | static u64 kvm_vz_get_one_regs[] = { |
| 1631 | KVM_REG_MIPS_CP0_INDEX, |
| 1632 | KVM_REG_MIPS_CP0_ENTRYLO0, |
| 1633 | KVM_REG_MIPS_CP0_ENTRYLO1, |
| 1634 | KVM_REG_MIPS_CP0_CONTEXT, |
| 1635 | KVM_REG_MIPS_CP0_PAGEMASK, |
| 1636 | KVM_REG_MIPS_CP0_PAGEGRAIN, |
| 1637 | KVM_REG_MIPS_CP0_WIRED, |
| 1638 | KVM_REG_MIPS_CP0_HWRENA, |
| 1639 | KVM_REG_MIPS_CP0_BADVADDR, |
| 1640 | KVM_REG_MIPS_CP0_COUNT, |
| 1641 | KVM_REG_MIPS_CP0_ENTRYHI, |
| 1642 | KVM_REG_MIPS_CP0_COMPARE, |
| 1643 | KVM_REG_MIPS_CP0_STATUS, |
| 1644 | KVM_REG_MIPS_CP0_INTCTL, |
| 1645 | KVM_REG_MIPS_CP0_CAUSE, |
| 1646 | KVM_REG_MIPS_CP0_EPC, |
| 1647 | KVM_REG_MIPS_CP0_PRID, |
| 1648 | KVM_REG_MIPS_CP0_EBASE, |
| 1649 | KVM_REG_MIPS_CP0_CONFIG, |
| 1650 | KVM_REG_MIPS_CP0_CONFIG1, |
| 1651 | KVM_REG_MIPS_CP0_CONFIG2, |
| 1652 | KVM_REG_MIPS_CP0_CONFIG3, |
| 1653 | KVM_REG_MIPS_CP0_CONFIG4, |
| 1654 | KVM_REG_MIPS_CP0_CONFIG5, |
| 1655 | #ifdef CONFIG_64BIT |
| 1656 | KVM_REG_MIPS_CP0_XCONTEXT, |
| 1657 | #endif |
| 1658 | KVM_REG_MIPS_CP0_ERROREPC, |
| 1659 | |
| 1660 | KVM_REG_MIPS_COUNT_CTL, |
| 1661 | KVM_REG_MIPS_COUNT_RESUME, |
| 1662 | KVM_REG_MIPS_COUNT_HZ, |
| 1663 | }; |
| 1664 | |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 1665 | static u64 kvm_vz_get_one_regs_contextconfig[] = { |
| 1666 | KVM_REG_MIPS_CP0_CONTEXTCONFIG, |
| 1667 | #ifdef CONFIG_64BIT |
| 1668 | KVM_REG_MIPS_CP0_XCONTEXTCONFIG, |
| 1669 | #endif |
| 1670 | }; |
| 1671 | |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 1672 | static u64 kvm_vz_get_one_regs_segments[] = { |
| 1673 | KVM_REG_MIPS_CP0_SEGCTL0, |
| 1674 | KVM_REG_MIPS_CP0_SEGCTL1, |
| 1675 | KVM_REG_MIPS_CP0_SEGCTL2, |
| 1676 | }; |
| 1677 | |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 1678 | static u64 kvm_vz_get_one_regs_htw[] = { |
| 1679 | KVM_REG_MIPS_CP0_PWBASE, |
| 1680 | KVM_REG_MIPS_CP0_PWFIELD, |
| 1681 | KVM_REG_MIPS_CP0_PWSIZE, |
| 1682 | KVM_REG_MIPS_CP0_PWCTL, |
| 1683 | }; |
| 1684 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1685 | static u64 kvm_vz_get_one_regs_kscratch[] = { |
| 1686 | KVM_REG_MIPS_CP0_KSCRATCH1, |
| 1687 | KVM_REG_MIPS_CP0_KSCRATCH2, |
| 1688 | KVM_REG_MIPS_CP0_KSCRATCH3, |
| 1689 | KVM_REG_MIPS_CP0_KSCRATCH4, |
| 1690 | KVM_REG_MIPS_CP0_KSCRATCH5, |
| 1691 | KVM_REG_MIPS_CP0_KSCRATCH6, |
| 1692 | }; |
| 1693 | |
| 1694 | static unsigned long kvm_vz_num_regs(struct kvm_vcpu *vcpu) |
| 1695 | { |
| 1696 | unsigned long ret; |
| 1697 | |
| 1698 | ret = ARRAY_SIZE(kvm_vz_get_one_regs); |
| 1699 | if (cpu_guest_has_userlocal) |
| 1700 | ++ret; |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 1701 | if (cpu_guest_has_badinstr) |
| 1702 | ++ret; |
| 1703 | if (cpu_guest_has_badinstrp) |
| 1704 | ++ret; |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 1705 | if (cpu_guest_has_contextconfig) |
| 1706 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 1707 | if (cpu_guest_has_segments) |
| 1708 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_segments); |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 1709 | if (cpu_guest_has_htw) |
| 1710 | ret += ARRAY_SIZE(kvm_vz_get_one_regs_htw); |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 1711 | if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) |
| 1712 | ret += 1 + ARRAY_SIZE(vcpu->arch.maar); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1713 | ret += __arch_hweight8(cpu_data[0].guest.kscratch_mask); |
| 1714 | |
| 1715 | return ret; |
| 1716 | } |
| 1717 | |
| 1718 | static int kvm_vz_copy_reg_indices(struct kvm_vcpu *vcpu, u64 __user *indices) |
| 1719 | { |
| 1720 | u64 index; |
| 1721 | unsigned int i; |
| 1722 | |
| 1723 | if (copy_to_user(indices, kvm_vz_get_one_regs, |
| 1724 | sizeof(kvm_vz_get_one_regs))) |
| 1725 | return -EFAULT; |
| 1726 | indices += ARRAY_SIZE(kvm_vz_get_one_regs); |
| 1727 | |
| 1728 | if (cpu_guest_has_userlocal) { |
| 1729 | index = KVM_REG_MIPS_CP0_USERLOCAL; |
| 1730 | if (copy_to_user(indices, &index, sizeof(index))) |
| 1731 | return -EFAULT; |
| 1732 | ++indices; |
| 1733 | } |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 1734 | if (cpu_guest_has_badinstr) { |
| 1735 | index = KVM_REG_MIPS_CP0_BADINSTR; |
| 1736 | if (copy_to_user(indices, &index, sizeof(index))) |
| 1737 | return -EFAULT; |
| 1738 | ++indices; |
| 1739 | } |
| 1740 | if (cpu_guest_has_badinstrp) { |
| 1741 | index = KVM_REG_MIPS_CP0_BADINSTRP; |
| 1742 | if (copy_to_user(indices, &index, sizeof(index))) |
| 1743 | return -EFAULT; |
| 1744 | ++indices; |
| 1745 | } |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 1746 | if (cpu_guest_has_contextconfig) { |
| 1747 | if (copy_to_user(indices, kvm_vz_get_one_regs_contextconfig, |
| 1748 | sizeof(kvm_vz_get_one_regs_contextconfig))) |
| 1749 | return -EFAULT; |
| 1750 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_contextconfig); |
| 1751 | } |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 1752 | if (cpu_guest_has_segments) { |
| 1753 | if (copy_to_user(indices, kvm_vz_get_one_regs_segments, |
| 1754 | sizeof(kvm_vz_get_one_regs_segments))) |
| 1755 | return -EFAULT; |
| 1756 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_segments); |
| 1757 | } |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 1758 | if (cpu_guest_has_htw) { |
| 1759 | if (copy_to_user(indices, kvm_vz_get_one_regs_htw, |
| 1760 | sizeof(kvm_vz_get_one_regs_htw))) |
| 1761 | return -EFAULT; |
| 1762 | indices += ARRAY_SIZE(kvm_vz_get_one_regs_htw); |
| 1763 | } |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 1764 | if (cpu_guest_has_maar && !cpu_guest_has_dyn_maar) { |
| 1765 | for (i = 0; i < ARRAY_SIZE(vcpu->arch.maar); ++i) { |
| 1766 | index = KVM_REG_MIPS_CP0_MAAR(i); |
| 1767 | if (copy_to_user(indices, &index, sizeof(index))) |
| 1768 | return -EFAULT; |
| 1769 | ++indices; |
| 1770 | } |
| 1771 | |
| 1772 | index = KVM_REG_MIPS_CP0_MAARI; |
| 1773 | if (copy_to_user(indices, &index, sizeof(index))) |
| 1774 | return -EFAULT; |
| 1775 | ++indices; |
| 1776 | } |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1777 | for (i = 0; i < 6; ++i) { |
| 1778 | if (!cpu_guest_has_kscr(i + 2)) |
| 1779 | continue; |
| 1780 | |
| 1781 | if (copy_to_user(indices, &kvm_vz_get_one_regs_kscratch[i], |
| 1782 | sizeof(kvm_vz_get_one_regs_kscratch[i]))) |
| 1783 | return -EFAULT; |
| 1784 | ++indices; |
| 1785 | } |
| 1786 | |
| 1787 | return 0; |
| 1788 | } |
| 1789 | |
| 1790 | static inline s64 entrylo_kvm_to_user(unsigned long v) |
| 1791 | { |
| 1792 | s64 mask, ret = v; |
| 1793 | |
| 1794 | if (BITS_PER_LONG == 32) { |
| 1795 | /* |
| 1796 | * KVM API exposes 64-bit version of the register, so move the |
| 1797 | * RI/XI bits up into place. |
| 1798 | */ |
| 1799 | mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; |
| 1800 | ret &= ~mask; |
| 1801 | ret |= ((s64)v & mask) << 32; |
| 1802 | } |
| 1803 | return ret; |
| 1804 | } |
| 1805 | |
| 1806 | static inline unsigned long entrylo_user_to_kvm(s64 v) |
| 1807 | { |
| 1808 | unsigned long mask, ret = v; |
| 1809 | |
| 1810 | if (BITS_PER_LONG == 32) { |
| 1811 | /* |
| 1812 | * KVM API exposes 64-bit versiono of the register, so move the |
| 1813 | * RI/XI bits down into place. |
| 1814 | */ |
| 1815 | mask = MIPS_ENTRYLO_RI | MIPS_ENTRYLO_XI; |
| 1816 | ret &= ~mask; |
| 1817 | ret |= (v >> 32) & mask; |
| 1818 | } |
| 1819 | return ret; |
| 1820 | } |
| 1821 | |
| 1822 | static int kvm_vz_get_one_reg(struct kvm_vcpu *vcpu, |
| 1823 | const struct kvm_one_reg *reg, |
| 1824 | s64 *v) |
| 1825 | { |
| 1826 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 1827 | unsigned int idx; |
| 1828 | |
| 1829 | switch (reg->id) { |
| 1830 | case KVM_REG_MIPS_CP0_INDEX: |
| 1831 | *v = (long)read_gc0_index(); |
| 1832 | break; |
| 1833 | case KVM_REG_MIPS_CP0_ENTRYLO0: |
| 1834 | *v = entrylo_kvm_to_user(read_gc0_entrylo0()); |
| 1835 | break; |
| 1836 | case KVM_REG_MIPS_CP0_ENTRYLO1: |
| 1837 | *v = entrylo_kvm_to_user(read_gc0_entrylo1()); |
| 1838 | break; |
| 1839 | case KVM_REG_MIPS_CP0_CONTEXT: |
| 1840 | *v = (long)read_gc0_context(); |
| 1841 | break; |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 1842 | case KVM_REG_MIPS_CP0_CONTEXTCONFIG: |
| 1843 | if (!cpu_guest_has_contextconfig) |
| 1844 | return -EINVAL; |
| 1845 | *v = read_gc0_contextconfig(); |
| 1846 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1847 | case KVM_REG_MIPS_CP0_USERLOCAL: |
| 1848 | if (!cpu_guest_has_userlocal) |
| 1849 | return -EINVAL; |
| 1850 | *v = read_gc0_userlocal(); |
| 1851 | break; |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 1852 | #ifdef CONFIG_64BIT |
| 1853 | case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: |
| 1854 | if (!cpu_guest_has_contextconfig) |
| 1855 | return -EINVAL; |
| 1856 | *v = read_gc0_xcontextconfig(); |
| 1857 | break; |
| 1858 | #endif |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1859 | case KVM_REG_MIPS_CP0_PAGEMASK: |
| 1860 | *v = (long)read_gc0_pagemask(); |
| 1861 | break; |
| 1862 | case KVM_REG_MIPS_CP0_PAGEGRAIN: |
| 1863 | *v = (long)read_gc0_pagegrain(); |
| 1864 | break; |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 1865 | case KVM_REG_MIPS_CP0_SEGCTL0: |
| 1866 | if (!cpu_guest_has_segments) |
| 1867 | return -EINVAL; |
| 1868 | *v = read_gc0_segctl0(); |
| 1869 | break; |
| 1870 | case KVM_REG_MIPS_CP0_SEGCTL1: |
| 1871 | if (!cpu_guest_has_segments) |
| 1872 | return -EINVAL; |
| 1873 | *v = read_gc0_segctl1(); |
| 1874 | break; |
| 1875 | case KVM_REG_MIPS_CP0_SEGCTL2: |
| 1876 | if (!cpu_guest_has_segments) |
| 1877 | return -EINVAL; |
| 1878 | *v = read_gc0_segctl2(); |
| 1879 | break; |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 1880 | case KVM_REG_MIPS_CP0_PWBASE: |
| 1881 | if (!cpu_guest_has_htw) |
| 1882 | return -EINVAL; |
| 1883 | *v = read_gc0_pwbase(); |
| 1884 | break; |
| 1885 | case KVM_REG_MIPS_CP0_PWFIELD: |
| 1886 | if (!cpu_guest_has_htw) |
| 1887 | return -EINVAL; |
| 1888 | *v = read_gc0_pwfield(); |
| 1889 | break; |
| 1890 | case KVM_REG_MIPS_CP0_PWSIZE: |
| 1891 | if (!cpu_guest_has_htw) |
| 1892 | return -EINVAL; |
| 1893 | *v = read_gc0_pwsize(); |
| 1894 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1895 | case KVM_REG_MIPS_CP0_WIRED: |
| 1896 | *v = (long)read_gc0_wired(); |
| 1897 | break; |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 1898 | case KVM_REG_MIPS_CP0_PWCTL: |
| 1899 | if (!cpu_guest_has_htw) |
| 1900 | return -EINVAL; |
| 1901 | *v = read_gc0_pwctl(); |
| 1902 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1903 | case KVM_REG_MIPS_CP0_HWRENA: |
| 1904 | *v = (long)read_gc0_hwrena(); |
| 1905 | break; |
| 1906 | case KVM_REG_MIPS_CP0_BADVADDR: |
| 1907 | *v = (long)read_gc0_badvaddr(); |
| 1908 | break; |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 1909 | case KVM_REG_MIPS_CP0_BADINSTR: |
| 1910 | if (!cpu_guest_has_badinstr) |
| 1911 | return -EINVAL; |
| 1912 | *v = read_gc0_badinstr(); |
| 1913 | break; |
| 1914 | case KVM_REG_MIPS_CP0_BADINSTRP: |
| 1915 | if (!cpu_guest_has_badinstrp) |
| 1916 | return -EINVAL; |
| 1917 | *v = read_gc0_badinstrp(); |
| 1918 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1919 | case KVM_REG_MIPS_CP0_COUNT: |
| 1920 | *v = kvm_mips_read_count(vcpu); |
| 1921 | break; |
| 1922 | case KVM_REG_MIPS_CP0_ENTRYHI: |
| 1923 | *v = (long)read_gc0_entryhi(); |
| 1924 | break; |
| 1925 | case KVM_REG_MIPS_CP0_COMPARE: |
| 1926 | *v = (long)read_gc0_compare(); |
| 1927 | break; |
| 1928 | case KVM_REG_MIPS_CP0_STATUS: |
| 1929 | *v = (long)read_gc0_status(); |
| 1930 | break; |
| 1931 | case KVM_REG_MIPS_CP0_INTCTL: |
| 1932 | *v = read_gc0_intctl(); |
| 1933 | break; |
| 1934 | case KVM_REG_MIPS_CP0_CAUSE: |
| 1935 | *v = (long)read_gc0_cause(); |
| 1936 | break; |
| 1937 | case KVM_REG_MIPS_CP0_EPC: |
| 1938 | *v = (long)read_gc0_epc(); |
| 1939 | break; |
| 1940 | case KVM_REG_MIPS_CP0_PRID: |
James Hogan | 1f48f9b | 2017-03-14 10:25:50 +0000 | [diff] [blame] | 1941 | switch (boot_cpu_type()) { |
| 1942 | case CPU_CAVIUM_OCTEON3: |
| 1943 | /* Octeon III has a read-only guest.PRid */ |
| 1944 | *v = read_gc0_prid(); |
| 1945 | break; |
| 1946 | default: |
| 1947 | *v = (long)kvm_read_c0_guest_prid(cop0); |
| 1948 | break; |
| 1949 | }; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1950 | break; |
| 1951 | case KVM_REG_MIPS_CP0_EBASE: |
| 1952 | *v = kvm_vz_read_gc0_ebase(); |
| 1953 | break; |
| 1954 | case KVM_REG_MIPS_CP0_CONFIG: |
| 1955 | *v = read_gc0_config(); |
| 1956 | break; |
| 1957 | case KVM_REG_MIPS_CP0_CONFIG1: |
| 1958 | if (!cpu_guest_has_conf1) |
| 1959 | return -EINVAL; |
| 1960 | *v = read_gc0_config1(); |
| 1961 | break; |
| 1962 | case KVM_REG_MIPS_CP0_CONFIG2: |
| 1963 | if (!cpu_guest_has_conf2) |
| 1964 | return -EINVAL; |
| 1965 | *v = read_gc0_config2(); |
| 1966 | break; |
| 1967 | case KVM_REG_MIPS_CP0_CONFIG3: |
| 1968 | if (!cpu_guest_has_conf3) |
| 1969 | return -EINVAL; |
| 1970 | *v = read_gc0_config3(); |
| 1971 | break; |
| 1972 | case KVM_REG_MIPS_CP0_CONFIG4: |
| 1973 | if (!cpu_guest_has_conf4) |
| 1974 | return -EINVAL; |
| 1975 | *v = read_gc0_config4(); |
| 1976 | break; |
| 1977 | case KVM_REG_MIPS_CP0_CONFIG5: |
| 1978 | if (!cpu_guest_has_conf5) |
| 1979 | return -EINVAL; |
| 1980 | *v = read_gc0_config5(); |
| 1981 | break; |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 1982 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): |
| 1983 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) |
| 1984 | return -EINVAL; |
| 1985 | idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); |
| 1986 | if (idx >= ARRAY_SIZE(vcpu->arch.maar)) |
| 1987 | return -EINVAL; |
| 1988 | *v = vcpu->arch.maar[idx]; |
| 1989 | break; |
| 1990 | case KVM_REG_MIPS_CP0_MAARI: |
| 1991 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) |
| 1992 | return -EINVAL; |
| 1993 | *v = kvm_read_sw_gc0_maari(vcpu->arch.cop0); |
| 1994 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 1995 | #ifdef CONFIG_64BIT |
| 1996 | case KVM_REG_MIPS_CP0_XCONTEXT: |
| 1997 | *v = read_gc0_xcontext(); |
| 1998 | break; |
| 1999 | #endif |
| 2000 | case KVM_REG_MIPS_CP0_ERROREPC: |
| 2001 | *v = (long)read_gc0_errorepc(); |
| 2002 | break; |
| 2003 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: |
| 2004 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; |
| 2005 | if (!cpu_guest_has_kscr(idx)) |
| 2006 | return -EINVAL; |
| 2007 | switch (idx) { |
| 2008 | case 2: |
| 2009 | *v = (long)read_gc0_kscratch1(); |
| 2010 | break; |
| 2011 | case 3: |
| 2012 | *v = (long)read_gc0_kscratch2(); |
| 2013 | break; |
| 2014 | case 4: |
| 2015 | *v = (long)read_gc0_kscratch3(); |
| 2016 | break; |
| 2017 | case 5: |
| 2018 | *v = (long)read_gc0_kscratch4(); |
| 2019 | break; |
| 2020 | case 6: |
| 2021 | *v = (long)read_gc0_kscratch5(); |
| 2022 | break; |
| 2023 | case 7: |
| 2024 | *v = (long)read_gc0_kscratch6(); |
| 2025 | break; |
| 2026 | } |
| 2027 | break; |
| 2028 | case KVM_REG_MIPS_COUNT_CTL: |
| 2029 | *v = vcpu->arch.count_ctl; |
| 2030 | break; |
| 2031 | case KVM_REG_MIPS_COUNT_RESUME: |
| 2032 | *v = ktime_to_ns(vcpu->arch.count_resume); |
| 2033 | break; |
| 2034 | case KVM_REG_MIPS_COUNT_HZ: |
| 2035 | *v = vcpu->arch.count_hz; |
| 2036 | break; |
| 2037 | default: |
| 2038 | return -EINVAL; |
| 2039 | } |
| 2040 | return 0; |
| 2041 | } |
| 2042 | |
| 2043 | static int kvm_vz_set_one_reg(struct kvm_vcpu *vcpu, |
| 2044 | const struct kvm_one_reg *reg, |
| 2045 | s64 v) |
| 2046 | { |
| 2047 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 2048 | unsigned int idx; |
| 2049 | int ret = 0; |
| 2050 | unsigned int cur, change; |
| 2051 | |
| 2052 | switch (reg->id) { |
| 2053 | case KVM_REG_MIPS_CP0_INDEX: |
| 2054 | write_gc0_index(v); |
| 2055 | break; |
| 2056 | case KVM_REG_MIPS_CP0_ENTRYLO0: |
| 2057 | write_gc0_entrylo0(entrylo_user_to_kvm(v)); |
| 2058 | break; |
| 2059 | case KVM_REG_MIPS_CP0_ENTRYLO1: |
| 2060 | write_gc0_entrylo1(entrylo_user_to_kvm(v)); |
| 2061 | break; |
| 2062 | case KVM_REG_MIPS_CP0_CONTEXT: |
| 2063 | write_gc0_context(v); |
| 2064 | break; |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2065 | case KVM_REG_MIPS_CP0_CONTEXTCONFIG: |
| 2066 | if (!cpu_guest_has_contextconfig) |
| 2067 | return -EINVAL; |
| 2068 | write_gc0_contextconfig(v); |
| 2069 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2070 | case KVM_REG_MIPS_CP0_USERLOCAL: |
| 2071 | if (!cpu_guest_has_userlocal) |
| 2072 | return -EINVAL; |
| 2073 | write_gc0_userlocal(v); |
| 2074 | break; |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2075 | #ifdef CONFIG_64BIT |
| 2076 | case KVM_REG_MIPS_CP0_XCONTEXTCONFIG: |
| 2077 | if (!cpu_guest_has_contextconfig) |
| 2078 | return -EINVAL; |
| 2079 | write_gc0_xcontextconfig(v); |
| 2080 | break; |
| 2081 | #endif |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2082 | case KVM_REG_MIPS_CP0_PAGEMASK: |
| 2083 | write_gc0_pagemask(v); |
| 2084 | break; |
| 2085 | case KVM_REG_MIPS_CP0_PAGEGRAIN: |
| 2086 | write_gc0_pagegrain(v); |
| 2087 | break; |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 2088 | case KVM_REG_MIPS_CP0_SEGCTL0: |
| 2089 | if (!cpu_guest_has_segments) |
| 2090 | return -EINVAL; |
| 2091 | write_gc0_segctl0(v); |
| 2092 | break; |
| 2093 | case KVM_REG_MIPS_CP0_SEGCTL1: |
| 2094 | if (!cpu_guest_has_segments) |
| 2095 | return -EINVAL; |
| 2096 | write_gc0_segctl1(v); |
| 2097 | break; |
| 2098 | case KVM_REG_MIPS_CP0_SEGCTL2: |
| 2099 | if (!cpu_guest_has_segments) |
| 2100 | return -EINVAL; |
| 2101 | write_gc0_segctl2(v); |
| 2102 | break; |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 2103 | case KVM_REG_MIPS_CP0_PWBASE: |
| 2104 | if (!cpu_guest_has_htw) |
| 2105 | return -EINVAL; |
| 2106 | write_gc0_pwbase(v); |
| 2107 | break; |
| 2108 | case KVM_REG_MIPS_CP0_PWFIELD: |
| 2109 | if (!cpu_guest_has_htw) |
| 2110 | return -EINVAL; |
| 2111 | write_gc0_pwfield(v); |
| 2112 | break; |
| 2113 | case KVM_REG_MIPS_CP0_PWSIZE: |
| 2114 | if (!cpu_guest_has_htw) |
| 2115 | return -EINVAL; |
| 2116 | write_gc0_pwsize(v); |
| 2117 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2118 | case KVM_REG_MIPS_CP0_WIRED: |
| 2119 | change_gc0_wired(MIPSR6_WIRED_WIRED, v); |
| 2120 | break; |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 2121 | case KVM_REG_MIPS_CP0_PWCTL: |
| 2122 | if (!cpu_guest_has_htw) |
| 2123 | return -EINVAL; |
| 2124 | write_gc0_pwctl(v); |
| 2125 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2126 | case KVM_REG_MIPS_CP0_HWRENA: |
| 2127 | write_gc0_hwrena(v); |
| 2128 | break; |
| 2129 | case KVM_REG_MIPS_CP0_BADVADDR: |
| 2130 | write_gc0_badvaddr(v); |
| 2131 | break; |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 2132 | case KVM_REG_MIPS_CP0_BADINSTR: |
| 2133 | if (!cpu_guest_has_badinstr) |
| 2134 | return -EINVAL; |
| 2135 | write_gc0_badinstr(v); |
| 2136 | break; |
| 2137 | case KVM_REG_MIPS_CP0_BADINSTRP: |
| 2138 | if (!cpu_guest_has_badinstrp) |
| 2139 | return -EINVAL; |
| 2140 | write_gc0_badinstrp(v); |
| 2141 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2142 | case KVM_REG_MIPS_CP0_COUNT: |
| 2143 | kvm_mips_write_count(vcpu, v); |
| 2144 | break; |
| 2145 | case KVM_REG_MIPS_CP0_ENTRYHI: |
| 2146 | write_gc0_entryhi(v); |
| 2147 | break; |
| 2148 | case KVM_REG_MIPS_CP0_COMPARE: |
| 2149 | kvm_mips_write_compare(vcpu, v, false); |
| 2150 | break; |
| 2151 | case KVM_REG_MIPS_CP0_STATUS: |
| 2152 | write_gc0_status(v); |
| 2153 | break; |
| 2154 | case KVM_REG_MIPS_CP0_INTCTL: |
| 2155 | write_gc0_intctl(v); |
| 2156 | break; |
| 2157 | case KVM_REG_MIPS_CP0_CAUSE: |
| 2158 | /* |
| 2159 | * If the timer is stopped or started (DC bit) it must look |
| 2160 | * atomic with changes to the timer interrupt pending bit (TI). |
| 2161 | * A timer interrupt should not happen in between. |
| 2162 | */ |
| 2163 | if ((read_gc0_cause() ^ v) & CAUSEF_DC) { |
| 2164 | if (v & CAUSEF_DC) { |
| 2165 | /* disable timer first */ |
| 2166 | kvm_mips_count_disable_cause(vcpu); |
| 2167 | change_gc0_cause((u32)~CAUSEF_DC, v); |
| 2168 | } else { |
| 2169 | /* enable timer last */ |
| 2170 | change_gc0_cause((u32)~CAUSEF_DC, v); |
| 2171 | kvm_mips_count_enable_cause(vcpu); |
| 2172 | } |
| 2173 | } else { |
| 2174 | write_gc0_cause(v); |
| 2175 | } |
| 2176 | break; |
| 2177 | case KVM_REG_MIPS_CP0_EPC: |
| 2178 | write_gc0_epc(v); |
| 2179 | break; |
| 2180 | case KVM_REG_MIPS_CP0_PRID: |
James Hogan | 1f48f9b | 2017-03-14 10:25:50 +0000 | [diff] [blame] | 2181 | switch (boot_cpu_type()) { |
| 2182 | case CPU_CAVIUM_OCTEON3: |
| 2183 | /* Octeon III has a guest.PRid, but its read-only */ |
| 2184 | break; |
| 2185 | default: |
| 2186 | kvm_write_c0_guest_prid(cop0, v); |
| 2187 | break; |
| 2188 | }; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2189 | break; |
| 2190 | case KVM_REG_MIPS_CP0_EBASE: |
| 2191 | kvm_vz_write_gc0_ebase(v); |
| 2192 | break; |
| 2193 | case KVM_REG_MIPS_CP0_CONFIG: |
| 2194 | cur = read_gc0_config(); |
| 2195 | change = (cur ^ v) & kvm_vz_config_user_wrmask(vcpu); |
| 2196 | if (change) { |
| 2197 | v = cur ^ change; |
| 2198 | write_gc0_config(v); |
| 2199 | } |
| 2200 | break; |
| 2201 | case KVM_REG_MIPS_CP0_CONFIG1: |
| 2202 | if (!cpu_guest_has_conf1) |
| 2203 | break; |
| 2204 | cur = read_gc0_config1(); |
| 2205 | change = (cur ^ v) & kvm_vz_config1_user_wrmask(vcpu); |
| 2206 | if (change) { |
| 2207 | v = cur ^ change; |
| 2208 | write_gc0_config1(v); |
| 2209 | } |
| 2210 | break; |
| 2211 | case KVM_REG_MIPS_CP0_CONFIG2: |
| 2212 | if (!cpu_guest_has_conf2) |
| 2213 | break; |
| 2214 | cur = read_gc0_config2(); |
| 2215 | change = (cur ^ v) & kvm_vz_config2_user_wrmask(vcpu); |
| 2216 | if (change) { |
| 2217 | v = cur ^ change; |
| 2218 | write_gc0_config2(v); |
| 2219 | } |
| 2220 | break; |
| 2221 | case KVM_REG_MIPS_CP0_CONFIG3: |
| 2222 | if (!cpu_guest_has_conf3) |
| 2223 | break; |
| 2224 | cur = read_gc0_config3(); |
| 2225 | change = (cur ^ v) & kvm_vz_config3_user_wrmask(vcpu); |
| 2226 | if (change) { |
| 2227 | v = cur ^ change; |
| 2228 | write_gc0_config3(v); |
| 2229 | } |
| 2230 | break; |
| 2231 | case KVM_REG_MIPS_CP0_CONFIG4: |
| 2232 | if (!cpu_guest_has_conf4) |
| 2233 | break; |
| 2234 | cur = read_gc0_config4(); |
| 2235 | change = (cur ^ v) & kvm_vz_config4_user_wrmask(vcpu); |
| 2236 | if (change) { |
| 2237 | v = cur ^ change; |
| 2238 | write_gc0_config4(v); |
| 2239 | } |
| 2240 | break; |
| 2241 | case KVM_REG_MIPS_CP0_CONFIG5: |
| 2242 | if (!cpu_guest_has_conf5) |
| 2243 | break; |
| 2244 | cur = read_gc0_config5(); |
| 2245 | change = (cur ^ v) & kvm_vz_config5_user_wrmask(vcpu); |
| 2246 | if (change) { |
| 2247 | v = cur ^ change; |
| 2248 | write_gc0_config5(v); |
| 2249 | } |
| 2250 | break; |
James Hogan | d42a008 | 2017-03-14 10:15:38 +0000 | [diff] [blame] | 2251 | case KVM_REG_MIPS_CP0_MAAR(0) ... KVM_REG_MIPS_CP0_MAAR(0x3f): |
| 2252 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) |
| 2253 | return -EINVAL; |
| 2254 | idx = reg->id - KVM_REG_MIPS_CP0_MAAR(0); |
| 2255 | if (idx >= ARRAY_SIZE(vcpu->arch.maar)) |
| 2256 | return -EINVAL; |
| 2257 | vcpu->arch.maar[idx] = mips_process_maar(dmtc_op, v); |
| 2258 | break; |
| 2259 | case KVM_REG_MIPS_CP0_MAARI: |
| 2260 | if (!cpu_guest_has_maar || cpu_guest_has_dyn_maar) |
| 2261 | return -EINVAL; |
| 2262 | kvm_write_maari(vcpu, v); |
| 2263 | break; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2264 | #ifdef CONFIG_64BIT |
| 2265 | case KVM_REG_MIPS_CP0_XCONTEXT: |
| 2266 | write_gc0_xcontext(v); |
| 2267 | break; |
| 2268 | #endif |
| 2269 | case KVM_REG_MIPS_CP0_ERROREPC: |
| 2270 | write_gc0_errorepc(v); |
| 2271 | break; |
| 2272 | case KVM_REG_MIPS_CP0_KSCRATCH1 ... KVM_REG_MIPS_CP0_KSCRATCH6: |
| 2273 | idx = reg->id - KVM_REG_MIPS_CP0_KSCRATCH1 + 2; |
| 2274 | if (!cpu_guest_has_kscr(idx)) |
| 2275 | return -EINVAL; |
| 2276 | switch (idx) { |
| 2277 | case 2: |
| 2278 | write_gc0_kscratch1(v); |
| 2279 | break; |
| 2280 | case 3: |
| 2281 | write_gc0_kscratch2(v); |
| 2282 | break; |
| 2283 | case 4: |
| 2284 | write_gc0_kscratch3(v); |
| 2285 | break; |
| 2286 | case 5: |
| 2287 | write_gc0_kscratch4(v); |
| 2288 | break; |
| 2289 | case 6: |
| 2290 | write_gc0_kscratch5(v); |
| 2291 | break; |
| 2292 | case 7: |
| 2293 | write_gc0_kscratch6(v); |
| 2294 | break; |
| 2295 | } |
| 2296 | break; |
| 2297 | case KVM_REG_MIPS_COUNT_CTL: |
| 2298 | ret = kvm_mips_set_count_ctl(vcpu, v); |
| 2299 | break; |
| 2300 | case KVM_REG_MIPS_COUNT_RESUME: |
| 2301 | ret = kvm_mips_set_count_resume(vcpu, v); |
| 2302 | break; |
| 2303 | case KVM_REG_MIPS_COUNT_HZ: |
| 2304 | ret = kvm_mips_set_count_hz(vcpu, v); |
| 2305 | break; |
| 2306 | default: |
| 2307 | return -EINVAL; |
| 2308 | } |
| 2309 | return ret; |
| 2310 | } |
| 2311 | |
| 2312 | #define guestid_cache(cpu) (cpu_data[cpu].guestid_cache) |
| 2313 | static void kvm_vz_get_new_guestid(unsigned long cpu, struct kvm_vcpu *vcpu) |
| 2314 | { |
| 2315 | unsigned long guestid = guestid_cache(cpu); |
| 2316 | |
| 2317 | if (!(++guestid & GUESTID_MASK)) { |
| 2318 | if (cpu_has_vtag_icache) |
| 2319 | flush_icache_all(); |
| 2320 | |
| 2321 | if (!guestid) /* fix version if needed */ |
| 2322 | guestid = GUESTID_FIRST_VERSION; |
| 2323 | |
| 2324 | ++guestid; /* guestid 0 reserved for root */ |
| 2325 | |
| 2326 | /* start new guestid cycle */ |
| 2327 | kvm_vz_local_flush_roottlb_all_guests(); |
| 2328 | kvm_vz_local_flush_guesttlb_all(); |
| 2329 | } |
| 2330 | |
| 2331 | guestid_cache(cpu) = guestid; |
| 2332 | } |
| 2333 | |
| 2334 | /* Returns 1 if the guest TLB may be clobbered */ |
| 2335 | static int kvm_vz_check_requests(struct kvm_vcpu *vcpu, int cpu) |
| 2336 | { |
| 2337 | int ret = 0; |
| 2338 | int i; |
| 2339 | |
| 2340 | if (!vcpu->requests) |
| 2341 | return 0; |
| 2342 | |
| 2343 | if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) { |
| 2344 | if (cpu_has_guestid) { |
| 2345 | /* Drop all GuestIDs for this VCPU */ |
| 2346 | for_each_possible_cpu(i) |
| 2347 | vcpu->arch.vzguestid[i] = 0; |
| 2348 | /* This will clobber guest TLB contents too */ |
| 2349 | ret = 1; |
| 2350 | } |
| 2351 | /* |
| 2352 | * For Root ASID Dealias (RAD) we don't do anything here, but we |
| 2353 | * still need the request to ensure we recheck asid_flush_mask. |
| 2354 | * We can still return 0 as only the root TLB will be affected |
| 2355 | * by a root ASID flush. |
| 2356 | */ |
| 2357 | } |
| 2358 | |
| 2359 | return ret; |
| 2360 | } |
| 2361 | |
| 2362 | static void kvm_vz_vcpu_save_wired(struct kvm_vcpu *vcpu) |
| 2363 | { |
| 2364 | unsigned int wired = read_gc0_wired(); |
| 2365 | struct kvm_mips_tlb *tlbs; |
| 2366 | int i; |
| 2367 | |
| 2368 | /* Expand the wired TLB array if necessary */ |
| 2369 | wired &= MIPSR6_WIRED_WIRED; |
| 2370 | if (wired > vcpu->arch.wired_tlb_limit) { |
| 2371 | tlbs = krealloc(vcpu->arch.wired_tlb, wired * |
| 2372 | sizeof(*vcpu->arch.wired_tlb), GFP_ATOMIC); |
| 2373 | if (WARN_ON(!tlbs)) { |
| 2374 | /* Save whatever we can */ |
| 2375 | wired = vcpu->arch.wired_tlb_limit; |
| 2376 | } else { |
| 2377 | vcpu->arch.wired_tlb = tlbs; |
| 2378 | vcpu->arch.wired_tlb_limit = wired; |
| 2379 | } |
| 2380 | } |
| 2381 | |
| 2382 | if (wired) |
| 2383 | /* Save wired entries from the guest TLB */ |
| 2384 | kvm_vz_save_guesttlb(vcpu->arch.wired_tlb, 0, wired); |
| 2385 | /* Invalidate any dropped entries since last time */ |
| 2386 | for (i = wired; i < vcpu->arch.wired_tlb_used; ++i) { |
| 2387 | vcpu->arch.wired_tlb[i].tlb_hi = UNIQUE_GUEST_ENTRYHI(i); |
| 2388 | vcpu->arch.wired_tlb[i].tlb_lo[0] = 0; |
| 2389 | vcpu->arch.wired_tlb[i].tlb_lo[1] = 0; |
| 2390 | vcpu->arch.wired_tlb[i].tlb_mask = 0; |
| 2391 | } |
| 2392 | vcpu->arch.wired_tlb_used = wired; |
| 2393 | } |
| 2394 | |
| 2395 | static void kvm_vz_vcpu_load_wired(struct kvm_vcpu *vcpu) |
| 2396 | { |
| 2397 | /* Load wired entries into the guest TLB */ |
| 2398 | if (vcpu->arch.wired_tlb) |
| 2399 | kvm_vz_load_guesttlb(vcpu->arch.wired_tlb, 0, |
| 2400 | vcpu->arch.wired_tlb_used); |
| 2401 | } |
| 2402 | |
| 2403 | static void kvm_vz_vcpu_load_tlb(struct kvm_vcpu *vcpu, int cpu) |
| 2404 | { |
| 2405 | struct kvm *kvm = vcpu->kvm; |
| 2406 | struct mm_struct *gpa_mm = &kvm->arch.gpa_mm; |
| 2407 | bool migrated; |
| 2408 | |
| 2409 | /* |
| 2410 | * Are we entering guest context on a different CPU to last time? |
| 2411 | * If so, the VCPU's guest TLB state on this CPU may be stale. |
| 2412 | */ |
| 2413 | migrated = (vcpu->arch.last_exec_cpu != cpu); |
| 2414 | vcpu->arch.last_exec_cpu = cpu; |
| 2415 | |
| 2416 | /* |
| 2417 | * A vcpu's GuestID is set in GuestCtl1.ID when the vcpu is loaded and |
| 2418 | * remains set until another vcpu is loaded in. As a rule GuestRID |
| 2419 | * remains zeroed when in root context unless the kernel is busy |
| 2420 | * manipulating guest tlb entries. |
| 2421 | */ |
| 2422 | if (cpu_has_guestid) { |
| 2423 | /* |
| 2424 | * Check if our GuestID is of an older version and thus invalid. |
| 2425 | * |
| 2426 | * We also discard the stored GuestID if we've executed on |
| 2427 | * another CPU, as the guest mappings may have changed without |
| 2428 | * hypervisor knowledge. |
| 2429 | */ |
| 2430 | if (migrated || |
| 2431 | (vcpu->arch.vzguestid[cpu] ^ guestid_cache(cpu)) & |
| 2432 | GUESTID_VERSION_MASK) { |
| 2433 | kvm_vz_get_new_guestid(cpu, vcpu); |
| 2434 | vcpu->arch.vzguestid[cpu] = guestid_cache(cpu); |
| 2435 | trace_kvm_guestid_change(vcpu, |
| 2436 | vcpu->arch.vzguestid[cpu]); |
| 2437 | } |
| 2438 | |
| 2439 | /* Restore GuestID */ |
| 2440 | change_c0_guestctl1(GUESTID_MASK, vcpu->arch.vzguestid[cpu]); |
| 2441 | } else { |
| 2442 | /* |
| 2443 | * The Guest TLB only stores a single guest's TLB state, so |
| 2444 | * flush it if another VCPU has executed on this CPU. |
| 2445 | * |
| 2446 | * We also flush if we've executed on another CPU, as the guest |
| 2447 | * mappings may have changed without hypervisor knowledge. |
| 2448 | */ |
| 2449 | if (migrated || last_exec_vcpu[cpu] != vcpu) |
| 2450 | kvm_vz_local_flush_guesttlb_all(); |
| 2451 | last_exec_vcpu[cpu] = vcpu; |
| 2452 | |
| 2453 | /* |
| 2454 | * Root ASID dealiases guest GPA mappings in the root TLB. |
| 2455 | * Allocate new root ASID if needed. |
| 2456 | */ |
| 2457 | if (cpumask_test_and_clear_cpu(cpu, &kvm->arch.asid_flush_mask) |
| 2458 | || (cpu_context(cpu, gpa_mm) ^ asid_cache(cpu)) & |
| 2459 | asid_version_mask(cpu)) |
| 2460 | get_new_mmu_context(gpa_mm, cpu); |
| 2461 | } |
| 2462 | } |
| 2463 | |
| 2464 | static int kvm_vz_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 2465 | { |
| 2466 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 2467 | bool migrated, all; |
| 2468 | |
| 2469 | /* |
| 2470 | * Have we migrated to a different CPU? |
| 2471 | * If so, any old guest TLB state may be stale. |
| 2472 | */ |
| 2473 | migrated = (vcpu->arch.last_sched_cpu != cpu); |
| 2474 | |
| 2475 | /* |
| 2476 | * Was this the last VCPU to run on this CPU? |
| 2477 | * If not, any old guest state from this VCPU will have been clobbered. |
| 2478 | */ |
| 2479 | all = migrated || (last_vcpu[cpu] != vcpu); |
| 2480 | last_vcpu[cpu] = vcpu; |
| 2481 | |
| 2482 | /* |
| 2483 | * Restore CP0_Wired unconditionally as we clear it after use, and |
| 2484 | * restore wired guest TLB entries (while in guest context). |
| 2485 | */ |
| 2486 | kvm_restore_gc0_wired(cop0); |
| 2487 | if (current->flags & PF_VCPU) { |
| 2488 | tlbw_use_hazard(); |
| 2489 | kvm_vz_vcpu_load_tlb(vcpu, cpu); |
| 2490 | kvm_vz_vcpu_load_wired(vcpu); |
| 2491 | } |
| 2492 | |
| 2493 | /* |
| 2494 | * Restore timer state regardless, as e.g. Cause.TI can change over time |
| 2495 | * if left unmaintained. |
| 2496 | */ |
| 2497 | kvm_vz_restore_timer(vcpu); |
| 2498 | |
James Hogan | edec9d7 | 2017-03-14 10:15:40 +0000 | [diff] [blame] | 2499 | /* Set MC bit if we want to trace guest mode changes */ |
| 2500 | if (kvm_trace_guest_mode_change) |
| 2501 | set_c0_guestctl0(MIPS_GCTL0_MC); |
| 2502 | else |
| 2503 | clear_c0_guestctl0(MIPS_GCTL0_MC); |
| 2504 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2505 | /* Don't bother restoring registers multiple times unless necessary */ |
| 2506 | if (!all) |
| 2507 | return 0; |
| 2508 | |
| 2509 | /* |
| 2510 | * Restore config registers first, as some implementations restrict |
| 2511 | * writes to other registers when the corresponding feature bits aren't |
| 2512 | * set. For example Status.CU1 cannot be set unless Config1.FP is set. |
| 2513 | */ |
| 2514 | kvm_restore_gc0_config(cop0); |
| 2515 | if (cpu_guest_has_conf1) |
| 2516 | kvm_restore_gc0_config1(cop0); |
| 2517 | if (cpu_guest_has_conf2) |
| 2518 | kvm_restore_gc0_config2(cop0); |
| 2519 | if (cpu_guest_has_conf3) |
| 2520 | kvm_restore_gc0_config3(cop0); |
| 2521 | if (cpu_guest_has_conf4) |
| 2522 | kvm_restore_gc0_config4(cop0); |
| 2523 | if (cpu_guest_has_conf5) |
| 2524 | kvm_restore_gc0_config5(cop0); |
| 2525 | if (cpu_guest_has_conf6) |
| 2526 | kvm_restore_gc0_config6(cop0); |
| 2527 | if (cpu_guest_has_conf7) |
| 2528 | kvm_restore_gc0_config7(cop0); |
| 2529 | |
| 2530 | kvm_restore_gc0_index(cop0); |
| 2531 | kvm_restore_gc0_entrylo0(cop0); |
| 2532 | kvm_restore_gc0_entrylo1(cop0); |
| 2533 | kvm_restore_gc0_context(cop0); |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2534 | if (cpu_guest_has_contextconfig) |
| 2535 | kvm_restore_gc0_contextconfig(cop0); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2536 | #ifdef CONFIG_64BIT |
| 2537 | kvm_restore_gc0_xcontext(cop0); |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2538 | if (cpu_guest_has_contextconfig) |
| 2539 | kvm_restore_gc0_xcontextconfig(cop0); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2540 | #endif |
| 2541 | kvm_restore_gc0_pagemask(cop0); |
| 2542 | kvm_restore_gc0_pagegrain(cop0); |
| 2543 | kvm_restore_gc0_hwrena(cop0); |
| 2544 | kvm_restore_gc0_badvaddr(cop0); |
| 2545 | kvm_restore_gc0_entryhi(cop0); |
| 2546 | kvm_restore_gc0_status(cop0); |
| 2547 | kvm_restore_gc0_intctl(cop0); |
| 2548 | kvm_restore_gc0_epc(cop0); |
| 2549 | kvm_vz_write_gc0_ebase(kvm_read_sw_gc0_ebase(cop0)); |
| 2550 | if (cpu_guest_has_userlocal) |
| 2551 | kvm_restore_gc0_userlocal(cop0); |
| 2552 | |
| 2553 | kvm_restore_gc0_errorepc(cop0); |
| 2554 | |
| 2555 | /* restore KScratch registers if enabled in guest */ |
| 2556 | if (cpu_guest_has_conf4) { |
| 2557 | if (cpu_guest_has_kscr(2)) |
| 2558 | kvm_restore_gc0_kscratch1(cop0); |
| 2559 | if (cpu_guest_has_kscr(3)) |
| 2560 | kvm_restore_gc0_kscratch2(cop0); |
| 2561 | if (cpu_guest_has_kscr(4)) |
| 2562 | kvm_restore_gc0_kscratch3(cop0); |
| 2563 | if (cpu_guest_has_kscr(5)) |
| 2564 | kvm_restore_gc0_kscratch4(cop0); |
| 2565 | if (cpu_guest_has_kscr(6)) |
| 2566 | kvm_restore_gc0_kscratch5(cop0); |
| 2567 | if (cpu_guest_has_kscr(7)) |
| 2568 | kvm_restore_gc0_kscratch6(cop0); |
| 2569 | } |
| 2570 | |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 2571 | if (cpu_guest_has_badinstr) |
| 2572 | kvm_restore_gc0_badinstr(cop0); |
| 2573 | if (cpu_guest_has_badinstrp) |
| 2574 | kvm_restore_gc0_badinstrp(cop0); |
| 2575 | |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 2576 | if (cpu_guest_has_segments) { |
| 2577 | kvm_restore_gc0_segctl0(cop0); |
| 2578 | kvm_restore_gc0_segctl1(cop0); |
| 2579 | kvm_restore_gc0_segctl2(cop0); |
| 2580 | } |
| 2581 | |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 2582 | /* restore HTW registers */ |
| 2583 | if (cpu_guest_has_htw) { |
| 2584 | kvm_restore_gc0_pwbase(cop0); |
| 2585 | kvm_restore_gc0_pwfield(cop0); |
| 2586 | kvm_restore_gc0_pwsize(cop0); |
| 2587 | kvm_restore_gc0_pwctl(cop0); |
| 2588 | } |
| 2589 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2590 | /* restore Root.GuestCtl2 from unused Guest guestctl2 register */ |
| 2591 | if (cpu_has_guestctl2) |
| 2592 | write_c0_guestctl2( |
| 2593 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL]); |
| 2594 | |
James Hogan | 273819a6 | 2017-03-14 10:15:37 +0000 | [diff] [blame] | 2595 | /* |
| 2596 | * We should clear linked load bit to break interrupted atomics. This |
| 2597 | * prevents a SC on the next VCPU from succeeding by matching a LL on |
| 2598 | * the previous VCPU. |
| 2599 | */ |
| 2600 | if (cpu_guest_has_rw_llb) |
| 2601 | write_gc0_lladdr(0); |
| 2602 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2603 | return 0; |
| 2604 | } |
| 2605 | |
| 2606 | static int kvm_vz_vcpu_put(struct kvm_vcpu *vcpu, int cpu) |
| 2607 | { |
| 2608 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 2609 | |
| 2610 | if (current->flags & PF_VCPU) |
| 2611 | kvm_vz_vcpu_save_wired(vcpu); |
| 2612 | |
| 2613 | kvm_lose_fpu(vcpu); |
| 2614 | |
| 2615 | kvm_save_gc0_index(cop0); |
| 2616 | kvm_save_gc0_entrylo0(cop0); |
| 2617 | kvm_save_gc0_entrylo1(cop0); |
| 2618 | kvm_save_gc0_context(cop0); |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2619 | if (cpu_guest_has_contextconfig) |
| 2620 | kvm_save_gc0_contextconfig(cop0); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2621 | #ifdef CONFIG_64BIT |
| 2622 | kvm_save_gc0_xcontext(cop0); |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 2623 | if (cpu_guest_has_contextconfig) |
| 2624 | kvm_save_gc0_xcontextconfig(cop0); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2625 | #endif |
| 2626 | kvm_save_gc0_pagemask(cop0); |
| 2627 | kvm_save_gc0_pagegrain(cop0); |
| 2628 | kvm_save_gc0_wired(cop0); |
| 2629 | /* allow wired TLB entries to be overwritten */ |
| 2630 | clear_gc0_wired(MIPSR6_WIRED_WIRED); |
| 2631 | kvm_save_gc0_hwrena(cop0); |
| 2632 | kvm_save_gc0_badvaddr(cop0); |
| 2633 | kvm_save_gc0_entryhi(cop0); |
| 2634 | kvm_save_gc0_status(cop0); |
| 2635 | kvm_save_gc0_intctl(cop0); |
| 2636 | kvm_save_gc0_epc(cop0); |
| 2637 | kvm_write_sw_gc0_ebase(cop0, kvm_vz_read_gc0_ebase()); |
| 2638 | if (cpu_guest_has_userlocal) |
| 2639 | kvm_save_gc0_userlocal(cop0); |
| 2640 | |
| 2641 | /* only save implemented config registers */ |
| 2642 | kvm_save_gc0_config(cop0); |
| 2643 | if (cpu_guest_has_conf1) |
| 2644 | kvm_save_gc0_config1(cop0); |
| 2645 | if (cpu_guest_has_conf2) |
| 2646 | kvm_save_gc0_config2(cop0); |
| 2647 | if (cpu_guest_has_conf3) |
| 2648 | kvm_save_gc0_config3(cop0); |
| 2649 | if (cpu_guest_has_conf4) |
| 2650 | kvm_save_gc0_config4(cop0); |
| 2651 | if (cpu_guest_has_conf5) |
| 2652 | kvm_save_gc0_config5(cop0); |
| 2653 | if (cpu_guest_has_conf6) |
| 2654 | kvm_save_gc0_config6(cop0); |
| 2655 | if (cpu_guest_has_conf7) |
| 2656 | kvm_save_gc0_config7(cop0); |
| 2657 | |
| 2658 | kvm_save_gc0_errorepc(cop0); |
| 2659 | |
| 2660 | /* save KScratch registers if enabled in guest */ |
| 2661 | if (cpu_guest_has_conf4) { |
| 2662 | if (cpu_guest_has_kscr(2)) |
| 2663 | kvm_save_gc0_kscratch1(cop0); |
| 2664 | if (cpu_guest_has_kscr(3)) |
| 2665 | kvm_save_gc0_kscratch2(cop0); |
| 2666 | if (cpu_guest_has_kscr(4)) |
| 2667 | kvm_save_gc0_kscratch3(cop0); |
| 2668 | if (cpu_guest_has_kscr(5)) |
| 2669 | kvm_save_gc0_kscratch4(cop0); |
| 2670 | if (cpu_guest_has_kscr(6)) |
| 2671 | kvm_save_gc0_kscratch5(cop0); |
| 2672 | if (cpu_guest_has_kscr(7)) |
| 2673 | kvm_save_gc0_kscratch6(cop0); |
| 2674 | } |
| 2675 | |
James Hogan | edc8926 | 2017-03-14 10:15:33 +0000 | [diff] [blame] | 2676 | if (cpu_guest_has_badinstr) |
| 2677 | kvm_save_gc0_badinstr(cop0); |
| 2678 | if (cpu_guest_has_badinstrp) |
| 2679 | kvm_save_gc0_badinstrp(cop0); |
| 2680 | |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 2681 | if (cpu_guest_has_segments) { |
| 2682 | kvm_save_gc0_segctl0(cop0); |
| 2683 | kvm_save_gc0_segctl1(cop0); |
| 2684 | kvm_save_gc0_segctl2(cop0); |
| 2685 | } |
| 2686 | |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 2687 | /* save HTW registers if enabled in guest */ |
| 2688 | if (cpu_guest_has_htw && |
| 2689 | kvm_read_sw_gc0_config3(cop0) & MIPS_CONF3_PW) { |
| 2690 | kvm_save_gc0_pwbase(cop0); |
| 2691 | kvm_save_gc0_pwfield(cop0); |
| 2692 | kvm_save_gc0_pwsize(cop0); |
| 2693 | kvm_save_gc0_pwctl(cop0); |
| 2694 | } |
| 2695 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2696 | kvm_vz_save_timer(vcpu); |
| 2697 | |
| 2698 | /* save Root.GuestCtl2 in unused Guest guestctl2 register */ |
| 2699 | if (cpu_has_guestctl2) |
| 2700 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = |
| 2701 | read_c0_guestctl2(); |
| 2702 | |
| 2703 | return 0; |
| 2704 | } |
| 2705 | |
| 2706 | /** |
| 2707 | * kvm_vz_resize_guest_vtlb() - Attempt to resize guest VTLB. |
| 2708 | * @size: Number of guest VTLB entries (0 < @size <= root VTLB entries). |
| 2709 | * |
| 2710 | * Attempt to resize the guest VTLB by writing guest Config registers. This is |
| 2711 | * necessary for cores with a shared root/guest TLB to avoid overlap with wired |
| 2712 | * entries in the root VTLB. |
| 2713 | * |
| 2714 | * Returns: The resulting guest VTLB size. |
| 2715 | */ |
| 2716 | static unsigned int kvm_vz_resize_guest_vtlb(unsigned int size) |
| 2717 | { |
| 2718 | unsigned int config4 = 0, ret = 0, limit; |
| 2719 | |
| 2720 | /* Write MMUSize - 1 into guest Config registers */ |
| 2721 | if (cpu_guest_has_conf1) |
| 2722 | change_gc0_config1(MIPS_CONF1_TLBS, |
| 2723 | (size - 1) << MIPS_CONF1_TLBS_SHIFT); |
| 2724 | if (cpu_guest_has_conf4) { |
| 2725 | config4 = read_gc0_config4(); |
| 2726 | if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == |
| 2727 | MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) { |
| 2728 | config4 &= ~MIPS_CONF4_VTLBSIZEEXT; |
| 2729 | config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << |
| 2730 | MIPS_CONF4_VTLBSIZEEXT_SHIFT; |
| 2731 | } else if ((config4 & MIPS_CONF4_MMUEXTDEF) == |
| 2732 | MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) { |
| 2733 | config4 &= ~MIPS_CONF4_MMUSIZEEXT; |
| 2734 | config4 |= ((size - 1) >> MIPS_CONF1_TLBS_SIZE) << |
| 2735 | MIPS_CONF4_MMUSIZEEXT_SHIFT; |
| 2736 | } |
| 2737 | write_gc0_config4(config4); |
| 2738 | } |
| 2739 | |
| 2740 | /* |
| 2741 | * Set Guest.Wired.Limit = 0 (no limit up to Guest.MMUSize-1), unless it |
| 2742 | * would exceed Root.Wired.Limit (clearing Guest.Wired.Wired so write |
| 2743 | * not dropped) |
| 2744 | */ |
| 2745 | if (cpu_has_mips_r6) { |
| 2746 | limit = (read_c0_wired() & MIPSR6_WIRED_LIMIT) >> |
| 2747 | MIPSR6_WIRED_LIMIT_SHIFT; |
| 2748 | if (size - 1 <= limit) |
| 2749 | limit = 0; |
| 2750 | write_gc0_wired(limit << MIPSR6_WIRED_LIMIT_SHIFT); |
| 2751 | } |
| 2752 | |
| 2753 | /* Read back MMUSize - 1 */ |
| 2754 | back_to_back_c0_hazard(); |
| 2755 | if (cpu_guest_has_conf1) |
| 2756 | ret = (read_gc0_config1() & MIPS_CONF1_TLBS) >> |
| 2757 | MIPS_CONF1_TLBS_SHIFT; |
| 2758 | if (config4) { |
| 2759 | if (cpu_has_mips_r6 || (config4 & MIPS_CONF4_MMUEXTDEF) == |
| 2760 | MIPS_CONF4_MMUEXTDEF_VTLBSIZEEXT) |
| 2761 | ret |= ((config4 & MIPS_CONF4_VTLBSIZEEXT) >> |
| 2762 | MIPS_CONF4_VTLBSIZEEXT_SHIFT) << |
| 2763 | MIPS_CONF1_TLBS_SIZE; |
| 2764 | else if ((config4 & MIPS_CONF4_MMUEXTDEF) == |
| 2765 | MIPS_CONF4_MMUEXTDEF_MMUSIZEEXT) |
| 2766 | ret |= ((config4 & MIPS_CONF4_MMUSIZEEXT) >> |
| 2767 | MIPS_CONF4_MMUSIZEEXT_SHIFT) << |
| 2768 | MIPS_CONF1_TLBS_SIZE; |
| 2769 | } |
| 2770 | return ret + 1; |
| 2771 | } |
| 2772 | |
| 2773 | static int kvm_vz_hardware_enable(void) |
| 2774 | { |
| 2775 | unsigned int mmu_size, guest_mmu_size, ftlb_size; |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2776 | u64 guest_cvmctl, cvmvmconfig; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2777 | |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2778 | switch (current_cpu_type()) { |
| 2779 | case CPU_CAVIUM_OCTEON3: |
| 2780 | /* Set up guest timer/perfcount IRQ lines */ |
| 2781 | guest_cvmctl = read_gc0_cvmctl(); |
| 2782 | guest_cvmctl &= ~CVMCTL_IPTI; |
| 2783 | guest_cvmctl |= 7ull << CVMCTL_IPTI_SHIFT; |
| 2784 | guest_cvmctl &= ~CVMCTL_IPPCI; |
| 2785 | guest_cvmctl |= 6ull << CVMCTL_IPPCI_SHIFT; |
| 2786 | write_gc0_cvmctl(guest_cvmctl); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2787 | |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2788 | cvmvmconfig = read_c0_cvmvmconfig(); |
| 2789 | /* No I/O hole translation. */ |
| 2790 | cvmvmconfig |= CVMVMCONF_DGHT; |
| 2791 | /* Halve the root MMU size */ |
| 2792 | mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) |
| 2793 | >> CVMVMCONF_MMUSIZEM1_S) + 1; |
| 2794 | guest_mmu_size = mmu_size / 2; |
| 2795 | mmu_size -= guest_mmu_size; |
| 2796 | cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; |
| 2797 | cvmvmconfig |= mmu_size - 1; |
| 2798 | write_c0_cvmvmconfig(cvmvmconfig); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2799 | |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2800 | /* Update our records */ |
| 2801 | current_cpu_data.tlbsize = mmu_size; |
| 2802 | current_cpu_data.tlbsizevtlb = mmu_size; |
| 2803 | current_cpu_data.guest.tlbsize = guest_mmu_size; |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2804 | |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2805 | /* Flush moved entries in new (guest) context */ |
| 2806 | kvm_vz_local_flush_guesttlb_all(); |
| 2807 | break; |
| 2808 | default: |
| 2809 | /* |
| 2810 | * ImgTec cores tend to use a shared root/guest TLB. To avoid |
| 2811 | * overlap of root wired and guest entries, the guest TLB may |
| 2812 | * need resizing. |
| 2813 | */ |
| 2814 | mmu_size = current_cpu_data.tlbsizevtlb; |
| 2815 | ftlb_size = current_cpu_data.tlbsize - mmu_size; |
| 2816 | |
| 2817 | /* Try switching to maximum guest VTLB size for flush */ |
| 2818 | guest_mmu_size = kvm_vz_resize_guest_vtlb(mmu_size); |
| 2819 | current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; |
| 2820 | kvm_vz_local_flush_guesttlb_all(); |
| 2821 | |
| 2822 | /* |
| 2823 | * Reduce to make space for root wired entries and at least 2 |
| 2824 | * root non-wired entries. This does assume that long-term wired |
| 2825 | * entries won't be added later. |
| 2826 | */ |
| 2827 | guest_mmu_size = mmu_size - num_wired_entries() - 2; |
| 2828 | guest_mmu_size = kvm_vz_resize_guest_vtlb(guest_mmu_size); |
| 2829 | current_cpu_data.guest.tlbsize = guest_mmu_size + ftlb_size; |
| 2830 | |
| 2831 | /* |
| 2832 | * Write the VTLB size, but if another CPU has already written, |
| 2833 | * check it matches or we won't provide a consistent view to the |
| 2834 | * guest. If this ever happens it suggests an asymmetric number |
| 2835 | * of wired entries. |
| 2836 | */ |
| 2837 | if (cmpxchg(&kvm_vz_guest_vtlb_size, 0, guest_mmu_size) && |
| 2838 | WARN(guest_mmu_size != kvm_vz_guest_vtlb_size, |
| 2839 | "Available guest VTLB size mismatch")) |
| 2840 | return -EINVAL; |
| 2841 | break; |
| 2842 | } |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2843 | |
| 2844 | /* |
| 2845 | * Enable virtualization features granting guest direct control of |
| 2846 | * certain features: |
| 2847 | * CP0=1: Guest coprocessor 0 context. |
| 2848 | * AT=Guest: Guest MMU. |
| 2849 | * CG=1: Hit (virtual address) CACHE operations (optional). |
| 2850 | * CF=1: Guest Config registers. |
| 2851 | * CGI=1: Indexed flush CACHE operations (optional). |
| 2852 | */ |
| 2853 | write_c0_guestctl0(MIPS_GCTL0_CP0 | |
| 2854 | (MIPS_GCTL0_AT_GUEST << MIPS_GCTL0_AT_SHIFT) | |
| 2855 | MIPS_GCTL0_CG | MIPS_GCTL0_CF); |
| 2856 | if (cpu_has_guestctl0ext) |
| 2857 | set_c0_guestctl0ext(MIPS_GCTL0EXT_CGI); |
| 2858 | |
| 2859 | if (cpu_has_guestid) { |
| 2860 | write_c0_guestctl1(0); |
| 2861 | kvm_vz_local_flush_roottlb_all_guests(); |
| 2862 | |
| 2863 | GUESTID_MASK = current_cpu_data.guestid_mask; |
| 2864 | GUESTID_FIRST_VERSION = GUESTID_MASK + 1; |
| 2865 | GUESTID_VERSION_MASK = ~GUESTID_MASK; |
| 2866 | |
| 2867 | current_cpu_data.guestid_cache = GUESTID_FIRST_VERSION; |
| 2868 | } |
| 2869 | |
| 2870 | /* clear any pending injected virtual guest interrupts */ |
| 2871 | if (cpu_has_guestctl2) |
| 2872 | clear_c0_guestctl2(0x3f << 10); |
| 2873 | |
| 2874 | return 0; |
| 2875 | } |
| 2876 | |
| 2877 | static void kvm_vz_hardware_disable(void) |
| 2878 | { |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2879 | u64 cvmvmconfig; |
| 2880 | unsigned int mmu_size; |
| 2881 | |
| 2882 | /* Flush any remaining guest TLB entries */ |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2883 | kvm_vz_local_flush_guesttlb_all(); |
| 2884 | |
James Hogan | 824533a | 2017-03-14 10:25:48 +0000 | [diff] [blame] | 2885 | switch (current_cpu_type()) { |
| 2886 | case CPU_CAVIUM_OCTEON3: |
| 2887 | /* |
| 2888 | * Allocate whole TLB for root. Existing guest TLB entries will |
| 2889 | * change ownership to the root TLB. We should be safe though as |
| 2890 | * they've already been flushed above while in guest TLB. |
| 2891 | */ |
| 2892 | cvmvmconfig = read_c0_cvmvmconfig(); |
| 2893 | mmu_size = ((cvmvmconfig & CVMVMCONF_MMUSIZEM1) |
| 2894 | >> CVMVMCONF_MMUSIZEM1_S) + 1; |
| 2895 | cvmvmconfig &= ~CVMVMCONF_RMMUSIZEM1; |
| 2896 | cvmvmconfig |= mmu_size - 1; |
| 2897 | write_c0_cvmvmconfig(cvmvmconfig); |
| 2898 | |
| 2899 | /* Update our records */ |
| 2900 | current_cpu_data.tlbsize = mmu_size; |
| 2901 | current_cpu_data.tlbsizevtlb = mmu_size; |
| 2902 | current_cpu_data.guest.tlbsize = 0; |
| 2903 | |
| 2904 | /* Flush moved entries in new (root) context */ |
| 2905 | local_flush_tlb_all(); |
| 2906 | break; |
| 2907 | } |
| 2908 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 2909 | if (cpu_has_guestid) { |
| 2910 | write_c0_guestctl1(0); |
| 2911 | kvm_vz_local_flush_roottlb_all_guests(); |
| 2912 | } |
| 2913 | } |
| 2914 | |
| 2915 | static int kvm_vz_check_extension(struct kvm *kvm, long ext) |
| 2916 | { |
| 2917 | int r; |
| 2918 | |
| 2919 | switch (ext) { |
| 2920 | case KVM_CAP_MIPS_VZ: |
| 2921 | /* we wouldn't be here unless cpu_has_vz */ |
| 2922 | r = 1; |
| 2923 | break; |
| 2924 | #ifdef CONFIG_64BIT |
| 2925 | case KVM_CAP_MIPS_64BIT: |
| 2926 | /* We support 64-bit registers/operations and addresses */ |
| 2927 | r = 2; |
| 2928 | break; |
| 2929 | #endif |
| 2930 | default: |
| 2931 | r = 0; |
| 2932 | break; |
| 2933 | } |
| 2934 | |
| 2935 | return r; |
| 2936 | } |
| 2937 | |
| 2938 | static int kvm_vz_vcpu_init(struct kvm_vcpu *vcpu) |
| 2939 | { |
| 2940 | int i; |
| 2941 | |
| 2942 | for_each_possible_cpu(i) |
| 2943 | vcpu->arch.vzguestid[i] = 0; |
| 2944 | |
| 2945 | return 0; |
| 2946 | } |
| 2947 | |
| 2948 | static void kvm_vz_vcpu_uninit(struct kvm_vcpu *vcpu) |
| 2949 | { |
| 2950 | int cpu; |
| 2951 | |
| 2952 | /* |
| 2953 | * If the VCPU is freed and reused as another VCPU, we don't want the |
| 2954 | * matching pointer wrongly hanging around in last_vcpu[] or |
| 2955 | * last_exec_vcpu[]. |
| 2956 | */ |
| 2957 | for_each_possible_cpu(cpu) { |
| 2958 | if (last_vcpu[cpu] == vcpu) |
| 2959 | last_vcpu[cpu] = NULL; |
| 2960 | if (last_exec_vcpu[cpu] == vcpu) |
| 2961 | last_exec_vcpu[cpu] = NULL; |
| 2962 | } |
| 2963 | } |
| 2964 | |
| 2965 | static int kvm_vz_vcpu_setup(struct kvm_vcpu *vcpu) |
| 2966 | { |
| 2967 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 2968 | unsigned long count_hz = 100*1000*1000; /* default to 100 MHz */ |
| 2969 | |
| 2970 | /* |
| 2971 | * Start off the timer at the same frequency as the host timer, but the |
| 2972 | * soft timer doesn't handle frequencies greater than 1GHz yet. |
| 2973 | */ |
| 2974 | if (mips_hpt_frequency && mips_hpt_frequency <= NSEC_PER_SEC) |
| 2975 | count_hz = mips_hpt_frequency; |
| 2976 | kvm_mips_init_count(vcpu, count_hz); |
| 2977 | |
| 2978 | /* |
| 2979 | * Initialize guest register state to valid architectural reset state. |
| 2980 | */ |
| 2981 | |
| 2982 | /* PageGrain */ |
| 2983 | if (cpu_has_mips_r6) |
| 2984 | kvm_write_sw_gc0_pagegrain(cop0, PG_RIE | PG_XIE | PG_IEC); |
| 2985 | /* Wired */ |
| 2986 | if (cpu_has_mips_r6) |
| 2987 | kvm_write_sw_gc0_wired(cop0, |
| 2988 | read_gc0_wired() & MIPSR6_WIRED_LIMIT); |
| 2989 | /* Status */ |
| 2990 | kvm_write_sw_gc0_status(cop0, ST0_BEV | ST0_ERL); |
| 2991 | if (cpu_has_mips_r6) |
| 2992 | kvm_change_sw_gc0_status(cop0, ST0_FR, read_gc0_status()); |
| 2993 | /* IntCtl */ |
| 2994 | kvm_write_sw_gc0_intctl(cop0, read_gc0_intctl() & |
| 2995 | (INTCTLF_IPFDC | INTCTLF_IPPCI | INTCTLF_IPTI)); |
| 2996 | /* PRId */ |
| 2997 | kvm_write_sw_gc0_prid(cop0, boot_cpu_data.processor_id); |
| 2998 | /* EBase */ |
| 2999 | kvm_write_sw_gc0_ebase(cop0, (s32)0x80000000 | vcpu->vcpu_id); |
| 3000 | /* Config */ |
| 3001 | kvm_save_gc0_config(cop0); |
| 3002 | /* architecturally writable (e.g. from guest) */ |
| 3003 | kvm_change_sw_gc0_config(cop0, CONF_CM_CMASK, |
| 3004 | _page_cachable_default >> _CACHE_SHIFT); |
| 3005 | /* architecturally read only, but maybe writable from root */ |
| 3006 | kvm_change_sw_gc0_config(cop0, MIPS_CONF_MT, read_c0_config()); |
| 3007 | if (cpu_guest_has_conf1) { |
| 3008 | kvm_set_sw_gc0_config(cop0, MIPS_CONF_M); |
| 3009 | /* Config1 */ |
| 3010 | kvm_save_gc0_config1(cop0); |
| 3011 | /* architecturally read only, but maybe writable from root */ |
| 3012 | kvm_clear_sw_gc0_config1(cop0, MIPS_CONF1_C2 | |
| 3013 | MIPS_CONF1_MD | |
| 3014 | MIPS_CONF1_PC | |
| 3015 | MIPS_CONF1_WR | |
| 3016 | MIPS_CONF1_CA | |
| 3017 | MIPS_CONF1_FP); |
| 3018 | } |
| 3019 | if (cpu_guest_has_conf2) { |
| 3020 | kvm_set_sw_gc0_config1(cop0, MIPS_CONF_M); |
| 3021 | /* Config2 */ |
| 3022 | kvm_save_gc0_config2(cop0); |
| 3023 | } |
| 3024 | if (cpu_guest_has_conf3) { |
| 3025 | kvm_set_sw_gc0_config2(cop0, MIPS_CONF_M); |
| 3026 | /* Config3 */ |
| 3027 | kvm_save_gc0_config3(cop0); |
| 3028 | /* architecturally writable (e.g. from guest) */ |
| 3029 | kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_ISA_OE); |
| 3030 | /* architecturally read only, but maybe writable from root */ |
| 3031 | kvm_clear_sw_gc0_config3(cop0, MIPS_CONF3_MSA | |
| 3032 | MIPS_CONF3_BPG | |
| 3033 | MIPS_CONF3_ULRI | |
| 3034 | MIPS_CONF3_DSP | |
| 3035 | MIPS_CONF3_CTXTC | |
| 3036 | MIPS_CONF3_ITL | |
| 3037 | MIPS_CONF3_LPA | |
| 3038 | MIPS_CONF3_VEIC | |
| 3039 | MIPS_CONF3_VINT | |
| 3040 | MIPS_CONF3_SP | |
| 3041 | MIPS_CONF3_CDMM | |
| 3042 | MIPS_CONF3_MT | |
| 3043 | MIPS_CONF3_SM | |
| 3044 | MIPS_CONF3_TL); |
| 3045 | } |
| 3046 | if (cpu_guest_has_conf4) { |
| 3047 | kvm_set_sw_gc0_config3(cop0, MIPS_CONF_M); |
| 3048 | /* Config4 */ |
| 3049 | kvm_save_gc0_config4(cop0); |
| 3050 | } |
| 3051 | if (cpu_guest_has_conf5) { |
| 3052 | kvm_set_sw_gc0_config4(cop0, MIPS_CONF_M); |
| 3053 | /* Config5 */ |
| 3054 | kvm_save_gc0_config5(cop0); |
| 3055 | /* architecturally writable (e.g. from guest) */ |
| 3056 | kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_K | |
| 3057 | MIPS_CONF5_CV | |
| 3058 | MIPS_CONF5_MSAEN | |
| 3059 | MIPS_CONF5_UFE | |
| 3060 | MIPS_CONF5_FRE | |
| 3061 | MIPS_CONF5_SBRI | |
| 3062 | MIPS_CONF5_UFR); |
| 3063 | /* architecturally read only, but maybe writable from root */ |
| 3064 | kvm_clear_sw_gc0_config5(cop0, MIPS_CONF5_MRP); |
| 3065 | } |
| 3066 | |
James Hogan | dffe042 | 2017-03-14 10:15:34 +0000 | [diff] [blame] | 3067 | if (cpu_guest_has_contextconfig) { |
| 3068 | /* ContextConfig */ |
| 3069 | kvm_write_sw_gc0_contextconfig(cop0, 0x007ffff0); |
| 3070 | #ifdef CONFIG_64BIT |
| 3071 | /* XContextConfig */ |
| 3072 | /* bits SEGBITS-13+3:4 set */ |
| 3073 | kvm_write_sw_gc0_xcontextconfig(cop0, |
| 3074 | ((1ull << (cpu_vmbits - 13)) - 1) << 4); |
| 3075 | #endif |
| 3076 | } |
| 3077 | |
James Hogan | 4b7de02 | 2017-03-14 10:15:35 +0000 | [diff] [blame] | 3078 | /* Implementation dependent, use the legacy layout */ |
| 3079 | if (cpu_guest_has_segments) { |
| 3080 | /* SegCtl0, SegCtl1, SegCtl2 */ |
| 3081 | kvm_write_sw_gc0_segctl0(cop0, 0x00200010); |
| 3082 | kvm_write_sw_gc0_segctl1(cop0, 0x00000002 | |
| 3083 | (_page_cachable_default >> _CACHE_SHIFT) << |
| 3084 | (16 + MIPS_SEGCFG_C_SHIFT)); |
| 3085 | kvm_write_sw_gc0_segctl2(cop0, 0x00380438); |
| 3086 | } |
| 3087 | |
James Hogan | 5a2f352 | 2017-03-14 10:15:36 +0000 | [diff] [blame] | 3088 | /* reset HTW registers */ |
| 3089 | if (cpu_guest_has_htw && cpu_has_mips_r6) { |
| 3090 | /* PWField */ |
| 3091 | kvm_write_sw_gc0_pwfield(cop0, 0x0c30c302); |
| 3092 | /* PWSize */ |
| 3093 | kvm_write_sw_gc0_pwsize(cop0, 1 << MIPS_PWSIZE_PTW_SHIFT); |
| 3094 | } |
| 3095 | |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 3096 | /* start with no pending virtual guest interrupts */ |
| 3097 | if (cpu_has_guestctl2) |
| 3098 | cop0->reg[MIPS_CP0_GUESTCTL2][MIPS_CP0_GUESTCTL2_SEL] = 0; |
| 3099 | |
| 3100 | /* Put PC at reset vector */ |
| 3101 | vcpu->arch.pc = CKSEG1ADDR(0x1fc00000); |
| 3102 | |
| 3103 | return 0; |
| 3104 | } |
| 3105 | |
| 3106 | static void kvm_vz_flush_shadow_all(struct kvm *kvm) |
| 3107 | { |
| 3108 | if (cpu_has_guestid) { |
| 3109 | /* Flush GuestID for each VCPU individually */ |
| 3110 | kvm_flush_remote_tlbs(kvm); |
| 3111 | } else { |
| 3112 | /* |
| 3113 | * For each CPU there is a single GPA ASID used by all VCPUs in |
| 3114 | * the VM, so it doesn't make sense for the VCPUs to handle |
| 3115 | * invalidation of these ASIDs individually. |
| 3116 | * |
| 3117 | * Instead mark all CPUs as needing ASID invalidation in |
| 3118 | * asid_flush_mask, and just use kvm_flush_remote_tlbs(kvm) to |
| 3119 | * kick any running VCPUs so they check asid_flush_mask. |
| 3120 | */ |
| 3121 | cpumask_setall(&kvm->arch.asid_flush_mask); |
| 3122 | kvm_flush_remote_tlbs(kvm); |
| 3123 | } |
| 3124 | } |
| 3125 | |
| 3126 | static void kvm_vz_flush_shadow_memslot(struct kvm *kvm, |
| 3127 | const struct kvm_memory_slot *slot) |
| 3128 | { |
| 3129 | kvm_vz_flush_shadow_all(kvm); |
| 3130 | } |
| 3131 | |
| 3132 | static void kvm_vz_vcpu_reenter(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 3133 | { |
| 3134 | int cpu = smp_processor_id(); |
| 3135 | int preserve_guest_tlb; |
| 3136 | |
| 3137 | preserve_guest_tlb = kvm_vz_check_requests(vcpu, cpu); |
| 3138 | |
| 3139 | if (preserve_guest_tlb) |
| 3140 | kvm_vz_vcpu_save_wired(vcpu); |
| 3141 | |
| 3142 | kvm_vz_vcpu_load_tlb(vcpu, cpu); |
| 3143 | |
| 3144 | if (preserve_guest_tlb) |
| 3145 | kvm_vz_vcpu_load_wired(vcpu); |
| 3146 | } |
| 3147 | |
| 3148 | static int kvm_vz_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu) |
| 3149 | { |
| 3150 | int cpu = smp_processor_id(); |
| 3151 | int r; |
| 3152 | |
James Hogan | f4474d5 | 2017-03-14 10:15:39 +0000 | [diff] [blame] | 3153 | kvm_vz_acquire_htimer(vcpu); |
James Hogan | c992a4f | 2017-03-14 10:15:31 +0000 | [diff] [blame] | 3154 | /* Check if we have any exceptions/interrupts pending */ |
| 3155 | kvm_mips_deliver_interrupts(vcpu, read_gc0_cause()); |
| 3156 | |
| 3157 | kvm_vz_check_requests(vcpu, cpu); |
| 3158 | kvm_vz_vcpu_load_tlb(vcpu, cpu); |
| 3159 | kvm_vz_vcpu_load_wired(vcpu); |
| 3160 | |
| 3161 | r = vcpu->arch.vcpu_run(run, vcpu); |
| 3162 | |
| 3163 | kvm_vz_vcpu_save_wired(vcpu); |
| 3164 | |
| 3165 | return r; |
| 3166 | } |
| 3167 | |
| 3168 | static struct kvm_mips_callbacks kvm_vz_callbacks = { |
| 3169 | .handle_cop_unusable = kvm_trap_vz_handle_cop_unusable, |
| 3170 | .handle_tlb_mod = kvm_trap_vz_handle_tlb_st_miss, |
| 3171 | .handle_tlb_ld_miss = kvm_trap_vz_handle_tlb_ld_miss, |
| 3172 | .handle_tlb_st_miss = kvm_trap_vz_handle_tlb_st_miss, |
| 3173 | .handle_addr_err_st = kvm_trap_vz_no_handler, |
| 3174 | .handle_addr_err_ld = kvm_trap_vz_no_handler, |
| 3175 | .handle_syscall = kvm_trap_vz_no_handler, |
| 3176 | .handle_res_inst = kvm_trap_vz_no_handler, |
| 3177 | .handle_break = kvm_trap_vz_no_handler, |
| 3178 | .handle_msa_disabled = kvm_trap_vz_handle_msa_disabled, |
| 3179 | .handle_guest_exit = kvm_trap_vz_handle_guest_exit, |
| 3180 | |
| 3181 | .hardware_enable = kvm_vz_hardware_enable, |
| 3182 | .hardware_disable = kvm_vz_hardware_disable, |
| 3183 | .check_extension = kvm_vz_check_extension, |
| 3184 | .vcpu_init = kvm_vz_vcpu_init, |
| 3185 | .vcpu_uninit = kvm_vz_vcpu_uninit, |
| 3186 | .vcpu_setup = kvm_vz_vcpu_setup, |
| 3187 | .flush_shadow_all = kvm_vz_flush_shadow_all, |
| 3188 | .flush_shadow_memslot = kvm_vz_flush_shadow_memslot, |
| 3189 | .gva_to_gpa = kvm_vz_gva_to_gpa_cb, |
| 3190 | .queue_timer_int = kvm_vz_queue_timer_int_cb, |
| 3191 | .dequeue_timer_int = kvm_vz_dequeue_timer_int_cb, |
| 3192 | .queue_io_int = kvm_vz_queue_io_int_cb, |
| 3193 | .dequeue_io_int = kvm_vz_dequeue_io_int_cb, |
| 3194 | .irq_deliver = kvm_vz_irq_deliver_cb, |
| 3195 | .irq_clear = kvm_vz_irq_clear_cb, |
| 3196 | .num_regs = kvm_vz_num_regs, |
| 3197 | .copy_reg_indices = kvm_vz_copy_reg_indices, |
| 3198 | .get_one_reg = kvm_vz_get_one_reg, |
| 3199 | .set_one_reg = kvm_vz_set_one_reg, |
| 3200 | .vcpu_load = kvm_vz_vcpu_load, |
| 3201 | .vcpu_put = kvm_vz_vcpu_put, |
| 3202 | .vcpu_run = kvm_vz_vcpu_run, |
| 3203 | .vcpu_reenter = kvm_vz_vcpu_reenter, |
| 3204 | }; |
| 3205 | |
| 3206 | int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks) |
| 3207 | { |
| 3208 | if (!cpu_has_vz) |
| 3209 | return -ENODEV; |
| 3210 | |
| 3211 | /* |
| 3212 | * VZ requires at least 2 KScratch registers, so it should have been |
| 3213 | * possible to allocate pgd_reg. |
| 3214 | */ |
| 3215 | if (WARN(pgd_reg == -1, |
| 3216 | "pgd_reg not allocated even though cpu_has_vz\n")) |
| 3217 | return -ENODEV; |
| 3218 | |
| 3219 | pr_info("Starting KVM with MIPS VZ extensions\n"); |
| 3220 | |
| 3221 | *install_callbacks = &kvm_vz_callbacks; |
| 3222 | return 0; |
| 3223 | } |