blob: 50b812bfe083214f1ac2c67b316f808c61668657 [file] [log] [blame]
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001/*
Deng-Cheng Zhud116e812014-06-26 12:11:34 -07002 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS: Deliver/Emulate exceptions to the guest kernel
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -080011
12#include <linux/errno.h>
13#include <linux/err.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080014#include <linux/kvm_host.h>
James Hogandacc3ed2016-08-19 15:27:22 +010015#include <linux/uaccess.h>
James Hogan1581ff32016-11-16 23:48:56 +000016#include <linux/vmalloc.h>
17#include <asm/mmu_context.h>
James Hoganf7f14272016-09-08 22:57:03 +010018#include <asm/pgalloc.h>
Sanjay Lalf5c236d2012-11-21 18:34:09 -080019
Deng-Cheng Zhud7d5b052014-06-26 12:11:38 -070020#include "interrupt.h"
Sanjay Lalf5c236d2012-11-21 18:34:09 -080021
22static gpa_t kvm_trap_emul_gva_to_gpa_cb(gva_t gva)
23{
24 gpa_t gpa;
James Hogan8cffd192016-06-09 14:19:08 +010025 gva_t kseg = KSEGX(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010026 gva_t gkseg = KVM_GUEST_KSEGX(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080027
28 if ((kseg == CKSEG0) || (kseg == CKSEG1))
29 gpa = CPHYSADDR(gva);
James Hoganb8f79dd2015-05-11 23:31:45 +010030 else if (gkseg == KVM_GUEST_KSEG0)
31 gpa = KVM_GUEST_CPHYSADDR(gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080032 else {
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -070033 kvm_err("%s: cannot find GPA for GVA: %#lx\n", __func__, gva);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080034 kvm_mips_dump_host_tlbs();
35 gpa = KVM_INVALID_ADDR;
36 }
37
Sanjay Lalf5c236d2012-11-21 18:34:09 -080038 kvm_debug("%s: gva %#lx, gpa: %#llx\n", __func__, gva, gpa);
Sanjay Lalf5c236d2012-11-21 18:34:09 -080039
40 return gpa;
41}
42
Sanjay Lalf5c236d2012-11-21 18:34:09 -080043static int kvm_trap_emul_handle_cop_unusable(struct kvm_vcpu *vcpu)
44{
James Hogan1c0cd662015-02-06 10:56:27 +000045 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080046 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +010047 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +010048 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -080049 enum emulation_result er = EMULATE_DONE;
50 int ret = RESUME_GUEST;
51
James Hogan1c0cd662015-02-06 10:56:27 +000052 if (((cause & CAUSEF_CE) >> CAUSEB_CE) == 1) {
53 /* FPU Unusable */
54 if (!kvm_mips_guest_has_fpu(&vcpu->arch) ||
55 (kvm_read_c0_guest_status(cop0) & ST0_CU1) == 0) {
56 /*
57 * Unusable/no FPU in guest:
58 * deliver guest COP1 Unusable Exception
59 */
60 er = kvm_mips_emulate_fpu_exc(cause, opc, run, vcpu);
61 } else {
62 /* Restore FPU state */
63 kvm_own_fpu(vcpu);
64 er = EMULATE_DONE;
65 }
66 } else {
Sanjay Lalf5c236d2012-11-21 18:34:09 -080067 er = kvm_mips_emulate_inst(cause, opc, run, vcpu);
James Hogan1c0cd662015-02-06 10:56:27 +000068 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -080069
70 switch (er) {
71 case EMULATE_DONE:
72 ret = RESUME_GUEST;
73 break;
74
75 case EMULATE_FAIL:
76 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
77 ret = RESUME_HOST;
78 break;
79
80 case EMULATE_WAIT:
81 run->exit_reason = KVM_EXIT_INTR;
82 ret = RESUME_HOST;
83 break;
84
James Hogan955d8dc2017-03-14 10:15:14 +000085 case EMULATE_HYPERCALL:
86 ret = kvm_mips_handle_hypcall(vcpu);
87 break;
88
Sanjay Lalf5c236d2012-11-21 18:34:09 -080089 default:
90 BUG();
91 }
92 return ret;
93}
94
James Hogan420ea092016-12-06 19:27:18 +000095static int kvm_mips_bad_load(u32 cause, u32 *opc, struct kvm_run *run,
96 struct kvm_vcpu *vcpu)
97{
98 enum emulation_result er;
99 union mips_instruction inst;
100 int err;
101
102 /* A code fetch fault doesn't count as an MMIO */
103 if (kvm_is_ifetch_fault(&vcpu->arch)) {
104 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
105 return RESUME_HOST;
106 }
107
108 /* Fetch the instruction. */
109 if (cause & CAUSEF_BD)
110 opc += 1;
111 err = kvm_get_badinstr(opc, vcpu, &inst.word);
112 if (err) {
113 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
114 return RESUME_HOST;
115 }
116
117 /* Emulate the load */
118 er = kvm_mips_emulate_load(inst, cause, run, vcpu);
119 if (er == EMULATE_FAIL) {
120 kvm_err("Emulate load from MMIO space failed\n");
121 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
122 } else {
123 run->exit_reason = KVM_EXIT_MMIO;
124 }
125 return RESUME_HOST;
126}
127
128static int kvm_mips_bad_store(u32 cause, u32 *opc, struct kvm_run *run,
129 struct kvm_vcpu *vcpu)
130{
131 enum emulation_result er;
132 union mips_instruction inst;
133 int err;
134
135 /* Fetch the instruction. */
136 if (cause & CAUSEF_BD)
137 opc += 1;
138 err = kvm_get_badinstr(opc, vcpu, &inst.word);
139 if (err) {
140 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
141 return RESUME_HOST;
142 }
143
144 /* Emulate the store */
145 er = kvm_mips_emulate_store(inst, cause, run, vcpu);
146 if (er == EMULATE_FAIL) {
147 kvm_err("Emulate store to MMIO space failed\n");
148 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
149 } else {
150 run->exit_reason = KVM_EXIT_MMIO;
151 }
152 return RESUME_HOST;
153}
154
155static int kvm_mips_bad_access(u32 cause, u32 *opc, struct kvm_run *run,
156 struct kvm_vcpu *vcpu, bool store)
157{
158 if (store)
159 return kvm_mips_bad_store(cause, opc, run, vcpu);
160 else
161 return kvm_mips_bad_load(cause, opc, run, vcpu);
162}
163
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800164static int kvm_trap_emul_handle_tlb_mod(struct kvm_vcpu *vcpu)
165{
James Hogan64ebc9e2016-12-13 13:02:36 +0000166 struct mips_coproc *cop0 = vcpu->arch.cop0;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800167 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100168 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800169 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100170 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan64ebc9e2016-12-13 13:02:36 +0000171 struct kvm_mips_tlb *tlb;
172 unsigned long entryhi;
173 int index;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800174
175 if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
176 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700177 /*
James Hogan64ebc9e2016-12-13 13:02:36 +0000178 * First find the mapping in the guest TLB. If the failure to
179 * write was due to the guest TLB, it should be up to the guest
180 * to handle it.
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800181 */
James Hogan64ebc9e2016-12-13 13:02:36 +0000182 entryhi = (badvaddr & VPN2_MASK) |
183 (kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID);
184 index = kvm_mips_guest_tlb_lookup(vcpu, entryhi);
185
186 /*
187 * These should never happen.
188 * They would indicate stale host TLB entries.
189 */
190 if (unlikely(index < 0)) {
191 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
192 return RESUME_HOST;
193 }
194 tlb = vcpu->arch.guest_tlb + index;
195 if (unlikely(!TLB_IS_VALID(*tlb, badvaddr))) {
196 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
197 return RESUME_HOST;
198 }
199
200 /*
201 * Guest entry not dirty? That would explain the TLB modified
202 * exception. Relay that on to the guest so it can handle it.
203 */
204 if (!TLB_IS_DIRTY(*tlb, badvaddr)) {
205 kvm_mips_emulate_tlbmod(cause, opc, run, vcpu);
206 return RESUME_GUEST;
207 }
208
209 if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, tlb, badvaddr,
210 true))
211 /* Not writable, needs handling as MMIO */
212 return kvm_mips_bad_store(cause, opc, run, vcpu);
213 return RESUME_GUEST;
214 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
215 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, true) < 0)
216 /* Not writable, needs handling as MMIO */
217 return kvm_mips_bad_store(cause, opc, run, vcpu);
218 return RESUME_GUEST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800219 } else {
James Hogan64ebc9e2016-12-13 13:02:36 +0000220 /* host kernel addresses are all handled as MMIO */
221 return kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800222 }
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800223}
224
James Hogan3b08aec2016-06-09 14:19:20 +0100225static int kvm_trap_emul_handle_tlb_miss(struct kvm_vcpu *vcpu, bool store)
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800226{
227 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100228 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800229 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100230 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800231 enum emulation_result er = EMULATE_DONE;
232 int ret = RESUME_GUEST;
233
234 if (((badvaddr & PAGE_MASK) == KVM_GUEST_COMMPAGE_ADDR)
235 && KVM_GUEST_KERNEL_MODE(vcpu)) {
236 if (kvm_mips_handle_commpage_tlb_fault(badvaddr, vcpu) < 0) {
237 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
238 ret = RESUME_HOST;
239 }
240 } else if (KVM_GUEST_KSEGX(badvaddr) < KVM_GUEST_KSEG0
241 || KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG23) {
James Hogan3b08aec2016-06-09 14:19:20 +0100242 kvm_debug("USER ADDR TLB %s fault: cause %#x, PC: %p, BadVaddr: %#lx\n",
243 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800244
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700245 /*
246 * User Address (UA) fault, this could happen if
247 * (1) TLB entry not present/valid in both Guest and shadow host
248 * TLBs, in this case we pass on the fault to the guest
249 * kernel and let it handle it.
250 * (2) TLB entry is present in the Guest TLB but not in the
251 * shadow, in this case we inject the TLB from the Guest TLB
252 * into the shadow host TLB
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800253 */
254
James Hogan577ed7f2015-05-01 14:56:31 +0100255 er = kvm_mips_handle_tlbmiss(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800256 if (er == EMULATE_DONE)
257 ret = RESUME_GUEST;
258 else {
259 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
260 ret = RESUME_HOST;
261 }
262 } else if (KVM_GUEST_KSEGX(badvaddr) == KVM_GUEST_KSEG0) {
James Hogan3b08aec2016-06-09 14:19:20 +0100263 /*
264 * All KSEG0 faults are handled by KVM, as the guest kernel does
265 * not expect to ever get them
266 */
James Hoganb8f79dd2015-05-11 23:31:45 +0100267 if (kvm_mips_handle_kseg0_tlb_fault(badvaddr, vcpu, store) < 0)
268 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
James Hogand5888472016-08-19 15:09:47 +0100269 } else if (KVM_GUEST_KERNEL_MODE(vcpu)
270 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
271 /*
272 * With EVA we may get a TLB exception instead of an address
273 * error when the guest performs MMIO to KSeg1 addresses.
274 */
James Hogan420ea092016-12-06 19:27:18 +0000275 ret = kvm_mips_bad_access(cause, opc, run, vcpu, store);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800276 } else {
James Hogan3b08aec2016-06-09 14:19:20 +0100277 kvm_err("Illegal TLB %s fault address , cause %#x, PC: %p, BadVaddr: %#lx\n",
278 store ? "ST" : "LD", cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800279 kvm_mips_dump_host_tlbs();
280 kvm_arch_vcpu_dump_regs(vcpu);
281 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
282 ret = RESUME_HOST;
283 }
284 return ret;
285}
286
James Hogan3b08aec2016-06-09 14:19:20 +0100287static int kvm_trap_emul_handle_tlb_st_miss(struct kvm_vcpu *vcpu)
288{
289 return kvm_trap_emul_handle_tlb_miss(vcpu, true);
290}
291
292static int kvm_trap_emul_handle_tlb_ld_miss(struct kvm_vcpu *vcpu)
293{
294 return kvm_trap_emul_handle_tlb_miss(vcpu, false);
295}
296
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800297static int kvm_trap_emul_handle_addr_err_st(struct kvm_vcpu *vcpu)
298{
299 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100300 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800301 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100302 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800303 int ret = RESUME_GUEST;
304
305 if (KVM_GUEST_KERNEL_MODE(vcpu)
306 && (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1)) {
James Hogan420ea092016-12-06 19:27:18 +0000307 ret = kvm_mips_bad_store(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800308 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100309 kvm_err("Address Error (STORE): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700310 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800311 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
312 ret = RESUME_HOST;
313 }
314 return ret;
315}
316
317static int kvm_trap_emul_handle_addr_err_ld(struct kvm_vcpu *vcpu)
318{
319 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100320 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800321 unsigned long badvaddr = vcpu->arch.host_cp0_badvaddr;
James Hogan31cf7492016-06-09 14:19:09 +0100322 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800323 int ret = RESUME_GUEST;
324
325 if (KSEGX(badvaddr) == CKSEG0 || KSEGX(badvaddr) == CKSEG1) {
James Hogan420ea092016-12-06 19:27:18 +0000326 ret = kvm_mips_bad_load(cause, opc, run, vcpu);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800327 } else {
James Hogan31cf7492016-06-09 14:19:09 +0100328 kvm_err("Address Error (LOAD): cause %#x, PC: %p, BadVaddr: %#lx\n",
Deng-Cheng Zhu6ad78a52014-06-26 12:11:35 -0700329 cause, opc, badvaddr);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800330 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
331 ret = RESUME_HOST;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800332 }
333 return ret;
334}
335
336static int kvm_trap_emul_handle_syscall(struct kvm_vcpu *vcpu)
337{
338 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100339 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100340 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800341 enum emulation_result er = EMULATE_DONE;
342 int ret = RESUME_GUEST;
343
344 er = kvm_mips_emulate_syscall(cause, opc, run, vcpu);
345 if (er == EMULATE_DONE)
346 ret = RESUME_GUEST;
347 else {
348 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
349 ret = RESUME_HOST;
350 }
351 return ret;
352}
353
354static int kvm_trap_emul_handle_res_inst(struct kvm_vcpu *vcpu)
355{
356 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100357 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100358 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800359 enum emulation_result er = EMULATE_DONE;
360 int ret = RESUME_GUEST;
361
362 er = kvm_mips_handle_ri(cause, opc, run, vcpu);
363 if (er == EMULATE_DONE)
364 ret = RESUME_GUEST;
365 else {
366 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
367 ret = RESUME_HOST;
368 }
369 return ret;
370}
371
372static int kvm_trap_emul_handle_break(struct kvm_vcpu *vcpu)
373{
374 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100375 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100376 u32 cause = vcpu->arch.host_cp0_cause;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800377 enum emulation_result er = EMULATE_DONE;
378 int ret = RESUME_GUEST;
379
380 er = kvm_mips_emulate_bp_exc(cause, opc, run, vcpu);
381 if (er == EMULATE_DONE)
382 ret = RESUME_GUEST;
383 else {
384 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
385 ret = RESUME_HOST;
386 }
387 return ret;
388}
389
James Hogan0a560422015-02-06 16:03:57 +0000390static int kvm_trap_emul_handle_trap(struct kvm_vcpu *vcpu)
391{
392 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100393 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100394 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan0a560422015-02-06 16:03:57 +0000395 enum emulation_result er = EMULATE_DONE;
396 int ret = RESUME_GUEST;
397
398 er = kvm_mips_emulate_trap_exc(cause, opc, run, vcpu);
399 if (er == EMULATE_DONE) {
400 ret = RESUME_GUEST;
401 } else {
402 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
403 ret = RESUME_HOST;
404 }
405 return ret;
406}
407
James Hoganc2537ed2015-02-06 10:56:27 +0000408static int kvm_trap_emul_handle_msa_fpe(struct kvm_vcpu *vcpu)
409{
410 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100411 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100412 u32 cause = vcpu->arch.host_cp0_cause;
James Hoganc2537ed2015-02-06 10:56:27 +0000413 enum emulation_result er = EMULATE_DONE;
414 int ret = RESUME_GUEST;
415
416 er = kvm_mips_emulate_msafpe_exc(cause, opc, run, vcpu);
417 if (er == EMULATE_DONE) {
418 ret = RESUME_GUEST;
419 } else {
420 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
421 ret = RESUME_HOST;
422 }
423 return ret;
424}
425
James Hogan1c0cd662015-02-06 10:56:27 +0000426static int kvm_trap_emul_handle_fpe(struct kvm_vcpu *vcpu)
427{
428 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100429 u32 __user *opc = (u32 __user *)vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100430 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan1c0cd662015-02-06 10:56:27 +0000431 enum emulation_result er = EMULATE_DONE;
432 int ret = RESUME_GUEST;
433
434 er = kvm_mips_emulate_fpe_exc(cause, opc, run, vcpu);
435 if (er == EMULATE_DONE) {
436 ret = RESUME_GUEST;
437 } else {
438 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
439 ret = RESUME_HOST;
440 }
441 return ret;
442}
443
James Hoganc2537ed2015-02-06 10:56:27 +0000444/**
445 * kvm_trap_emul_handle_msa_disabled() - Guest used MSA while disabled in root.
446 * @vcpu: Virtual CPU context.
447 *
448 * Handle when the guest attempts to use MSA when it is disabled.
449 */
James Hogan98119ad2015-02-06 11:11:56 +0000450static int kvm_trap_emul_handle_msa_disabled(struct kvm_vcpu *vcpu)
451{
James Hoganc2537ed2015-02-06 10:56:27 +0000452 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogan98119ad2015-02-06 11:11:56 +0000453 struct kvm_run *run = vcpu->run;
James Hogan8cffd192016-06-09 14:19:08 +0100454 u32 __user *opc = (u32 __user *) vcpu->arch.pc;
James Hogan31cf7492016-06-09 14:19:09 +0100455 u32 cause = vcpu->arch.host_cp0_cause;
James Hogan98119ad2015-02-06 11:11:56 +0000456 enum emulation_result er = EMULATE_DONE;
457 int ret = RESUME_GUEST;
458
James Hoganc2537ed2015-02-06 10:56:27 +0000459 if (!kvm_mips_guest_has_msa(&vcpu->arch) ||
460 (kvm_read_c0_guest_status(cop0) & (ST0_CU1 | ST0_FR)) == ST0_CU1) {
461 /*
462 * No MSA in guest, or FPU enabled and not in FR=1 mode,
463 * guest reserved instruction exception
464 */
465 er = kvm_mips_emulate_ri_exc(cause, opc, run, vcpu);
466 } else if (!(kvm_read_c0_guest_config5(cop0) & MIPS_CONF5_MSAEN)) {
467 /* MSA disabled by guest, guest MSA disabled exception */
468 er = kvm_mips_emulate_msadis_exc(cause, opc, run, vcpu);
469 } else {
470 /* Restore MSA/FPU state */
471 kvm_own_msa(vcpu);
472 er = EMULATE_DONE;
473 }
James Hogan98119ad2015-02-06 11:11:56 +0000474
475 switch (er) {
476 case EMULATE_DONE:
477 ret = RESUME_GUEST;
478 break;
479
480 case EMULATE_FAIL:
481 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
482 ret = RESUME_HOST;
483 break;
484
485 default:
486 BUG();
487 }
488 return ret;
489}
490
James Hoganedab4fe2017-03-14 10:15:23 +0000491static int kvm_trap_emul_hardware_enable(void)
492{
493 return 0;
494}
495
496static void kvm_trap_emul_hardware_disable(void)
497{
498}
499
James Hogan607ef2f2017-03-14 10:15:22 +0000500static int kvm_trap_emul_check_extension(struct kvm *kvm, long ext)
501{
502 int r;
503
504 switch (ext) {
505 case KVM_CAP_MIPS_TE:
506 r = 1;
507 break;
508 default:
509 r = 0;
510 break;
511 }
512
513 return r;
514}
515
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800516static int kvm_trap_emul_vcpu_init(struct kvm_vcpu *vcpu)
517{
James Hoganf7f14272016-09-08 22:57:03 +0100518 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
519 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
520
James Hoganf7f14272016-09-08 22:57:03 +0100521 /*
522 * Allocate GVA -> HPA page tables.
523 * MIPS doesn't use the mm_struct pointer argument.
524 */
525 kern_mm->pgd = pgd_alloc(kern_mm);
526 if (!kern_mm->pgd)
527 return -ENOMEM;
528
529 user_mm->pgd = pgd_alloc(user_mm);
530 if (!user_mm->pgd) {
531 pgd_free(kern_mm, kern_mm->pgd);
532 return -ENOMEM;
533 }
534
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800535 return 0;
536}
537
James Hoganf7f14272016-09-08 22:57:03 +0100538static void kvm_mips_emul_free_gva_pt(pgd_t *pgd)
539{
540 /* Don't free host kernel page tables copied from init_mm.pgd */
541 const unsigned long end = 0x80000000;
542 unsigned long pgd_va, pud_va, pmd_va;
543 pud_t *pud;
544 pmd_t *pmd;
545 pte_t *pte;
546 int i, j, k;
547
548 for (i = 0; i < USER_PTRS_PER_PGD; i++) {
549 if (pgd_none(pgd[i]))
550 continue;
551
552 pgd_va = (unsigned long)i << PGDIR_SHIFT;
553 if (pgd_va >= end)
554 break;
555 pud = pud_offset(pgd + i, 0);
556 for (j = 0; j < PTRS_PER_PUD; j++) {
557 if (pud_none(pud[j]))
558 continue;
559
560 pud_va = pgd_va | ((unsigned long)j << PUD_SHIFT);
561 if (pud_va >= end)
562 break;
563 pmd = pmd_offset(pud + j, 0);
564 for (k = 0; k < PTRS_PER_PMD; k++) {
565 if (pmd_none(pmd[k]))
566 continue;
567
568 pmd_va = pud_va | (k << PMD_SHIFT);
569 if (pmd_va >= end)
570 break;
571 pte = pte_offset(pmd + k, 0);
572 pte_free_kernel(NULL, pte);
573 }
574 pmd_free(NULL, pmd);
575 }
576 pud_free(NULL, pud);
577 }
578 pgd_free(NULL, pgd);
579}
580
James Hogan630766b2016-09-08 23:00:24 +0100581static void kvm_trap_emul_vcpu_uninit(struct kvm_vcpu *vcpu)
582{
James Hoganf7f14272016-09-08 22:57:03 +0100583 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_kernel_mm.pgd);
584 kvm_mips_emul_free_gva_pt(vcpu->arch.guest_user_mm.pgd);
James Hogan630766b2016-09-08 23:00:24 +0100585}
586
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800587static int kvm_trap_emul_vcpu_setup(struct kvm_vcpu *vcpu)
588{
589 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hogane3429252016-06-15 19:30:00 +0100590 u32 config, config1;
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800591 int vcpu_id = vcpu->vcpu_id;
592
James Hogana517c1a2017-03-14 10:15:21 +0000593 /* Start off the timer at 100 MHz */
594 kvm_mips_init_count(vcpu, 100*1000*1000);
595
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700596 /*
597 * Arch specific stuff, set up config registers properly so that the
James Hogan84260972016-07-04 19:35:15 +0100598 * guest will come up as expected
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800599 */
James Hogan84260972016-07-04 19:35:15 +0100600#ifndef CONFIG_CPU_MIPSR6
601 /* r2-r5, simulate a MIPS 24kc */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800602 kvm_write_c0_guest_prid(cop0, 0x00019300);
James Hogan84260972016-07-04 19:35:15 +0100603#else
604 /* r6+, simulate a generic QEMU machine */
605 kvm_write_c0_guest_prid(cop0, 0x00010000);
606#endif
James Hogane3429252016-06-15 19:30:00 +0100607 /*
608 * Have config1, Cacheable, noncoherent, write-back, write allocate.
609 * Endianness, arch revision & virtually tagged icache should match
610 * host.
611 */
612 config = read_c0_config() & MIPS_CONF_AR;
James Hogan4e10b762016-06-15 19:30:01 +0100613 config |= MIPS_CONF_M | CONF_CM_CACHABLE_NONCOHERENT | MIPS_CONF_MT_TLB;
James Hogane3429252016-06-15 19:30:00 +0100614#ifdef CONFIG_CPU_BIG_ENDIAN
615 config |= CONF_BE;
616#endif
617 if (cpu_has_vtag_icache)
618 config |= MIPS_CONF_VI;
619 kvm_write_c0_guest_config(cop0, config);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800620
621 /* Read the cache characteristics from the host Config1 Register */
622 config1 = (read_c0_config1() & ~0x7f);
623
624 /* Set up MMU size */
625 config1 &= ~(0x3f << 25);
626 config1 |= ((KVM_MIPS_GUEST_TLB_SIZE - 1) << 25);
627
628 /* We unset some bits that we aren't emulating */
James Hogan4e10b762016-06-15 19:30:01 +0100629 config1 &= ~(MIPS_CONF1_C2 | MIPS_CONF1_MD | MIPS_CONF1_PC |
630 MIPS_CONF1_WR | MIPS_CONF1_CA);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800631 kvm_write_c0_guest_config1(cop0, config1);
632
James Hogan2211ee82015-03-04 15:56:47 +0000633 /* Have config3, no tertiary/secondary caches implemented */
634 kvm_write_c0_guest_config2(cop0, MIPS_CONF_M);
635 /* MIPS_CONF_M | (read_c0_config2() & 0xfff) */
636
James Hoganc7716072014-06-26 15:11:29 +0100637 /* Have config4, UserLocal */
638 kvm_write_c0_guest_config3(cop0, MIPS_CONF_M | MIPS_CONF3_ULRI);
639
640 /* Have config5 */
641 kvm_write_c0_guest_config4(cop0, MIPS_CONF_M);
642
643 /* No config6 */
644 kvm_write_c0_guest_config5(cop0, 0);
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800645
646 /* Set Wait IE/IXMT Ignore in Config7, IAR, AR */
647 kvm_write_c0_guest_config7(cop0, (MIPS_CONF7_WII) | (1 << 10));
648
James Hoganbe67a0b2017-01-18 16:20:31 +0000649 /* Status */
650 kvm_write_c0_guest_status(cop0, ST0_BEV | ST0_ERL);
651
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700652 /*
Adam Buchbinder92a76f62016-02-25 00:44:58 -0800653 * Setup IntCtl defaults, compatibility mode for timer interrupts (HW5)
Deng-Cheng Zhud116e812014-06-26 12:11:34 -0700654 */
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800655 kvm_write_c0_guest_intctl(cop0, 0xFC000000);
656
657 /* Put in vcpu id as CPUNum into Ebase Reg to handle SMP Guests */
James Hogan37af2f32016-05-11 13:50:49 +0100658 kvm_write_c0_guest_ebase(cop0, KVM_GUEST_KSEG0 |
659 (vcpu_id & MIPS_EBASE_CPUNUM));
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800660
James Hoganbe67a0b2017-01-18 16:20:31 +0000661 /* Put PC at guest reset vector */
662 vcpu->arch.pc = KVM_GUEST_CKSEG1ADDR(0x1fc00000);
663
Sanjay Lalf5c236d2012-11-21 18:34:09 -0800664 return 0;
665}
666
James Hoganb6209112016-10-25 00:01:37 +0100667static void kvm_trap_emul_flush_shadow_all(struct kvm *kvm)
668{
669 /* Flush GVA page tables and invalidate GVA ASIDs on all VCPUs */
670 kvm_flush_remote_tlbs(kvm);
671}
672
673static void kvm_trap_emul_flush_shadow_memslot(struct kvm *kvm,
674 const struct kvm_memory_slot *slot)
675{
676 kvm_trap_emul_flush_shadow_all(kvm);
677}
678
James Hogan654229a2016-12-08 22:46:41 +0000679static u64 kvm_trap_emul_get_one_regs[] = {
680 KVM_REG_MIPS_CP0_INDEX,
James Hogan013044c2016-12-07 17:16:37 +0000681 KVM_REG_MIPS_CP0_ENTRYLO0,
682 KVM_REG_MIPS_CP0_ENTRYLO1,
James Hogan654229a2016-12-08 22:46:41 +0000683 KVM_REG_MIPS_CP0_CONTEXT,
684 KVM_REG_MIPS_CP0_USERLOCAL,
685 KVM_REG_MIPS_CP0_PAGEMASK,
686 KVM_REG_MIPS_CP0_WIRED,
687 KVM_REG_MIPS_CP0_HWRENA,
688 KVM_REG_MIPS_CP0_BADVADDR,
689 KVM_REG_MIPS_CP0_COUNT,
690 KVM_REG_MIPS_CP0_ENTRYHI,
691 KVM_REG_MIPS_CP0_COMPARE,
692 KVM_REG_MIPS_CP0_STATUS,
James Hoganad58d4d2015-02-02 22:55:17 +0000693 KVM_REG_MIPS_CP0_INTCTL,
James Hogan654229a2016-12-08 22:46:41 +0000694 KVM_REG_MIPS_CP0_CAUSE,
695 KVM_REG_MIPS_CP0_EPC,
696 KVM_REG_MIPS_CP0_PRID,
James Hogan7801bbe2016-11-14 23:59:27 +0000697 KVM_REG_MIPS_CP0_EBASE,
James Hogan654229a2016-12-08 22:46:41 +0000698 KVM_REG_MIPS_CP0_CONFIG,
699 KVM_REG_MIPS_CP0_CONFIG1,
700 KVM_REG_MIPS_CP0_CONFIG2,
701 KVM_REG_MIPS_CP0_CONFIG3,
702 KVM_REG_MIPS_CP0_CONFIG4,
703 KVM_REG_MIPS_CP0_CONFIG5,
704 KVM_REG_MIPS_CP0_CONFIG7,
705 KVM_REG_MIPS_CP0_ERROREPC,
706 KVM_REG_MIPS_CP0_KSCRATCH1,
707 KVM_REG_MIPS_CP0_KSCRATCH2,
708 KVM_REG_MIPS_CP0_KSCRATCH3,
709 KVM_REG_MIPS_CP0_KSCRATCH4,
710 KVM_REG_MIPS_CP0_KSCRATCH5,
711 KVM_REG_MIPS_CP0_KSCRATCH6,
712
713 KVM_REG_MIPS_COUNT_CTL,
714 KVM_REG_MIPS_COUNT_RESUME,
715 KVM_REG_MIPS_COUNT_HZ,
716};
717
James Hoganf5c43bd2016-06-15 19:29:49 +0100718static unsigned long kvm_trap_emul_num_regs(struct kvm_vcpu *vcpu)
719{
James Hogan654229a2016-12-08 22:46:41 +0000720 return ARRAY_SIZE(kvm_trap_emul_get_one_regs);
James Hoganf5c43bd2016-06-15 19:29:49 +0100721}
722
723static int kvm_trap_emul_copy_reg_indices(struct kvm_vcpu *vcpu,
724 u64 __user *indices)
725{
James Hogan654229a2016-12-08 22:46:41 +0000726 if (copy_to_user(indices, kvm_trap_emul_get_one_regs,
727 sizeof(kvm_trap_emul_get_one_regs)))
728 return -EFAULT;
729 indices += ARRAY_SIZE(kvm_trap_emul_get_one_regs);
730
James Hoganf5c43bd2016-06-15 19:29:49 +0100731 return 0;
732}
733
James Hoganf8be02d2014-05-29 10:16:29 +0100734static int kvm_trap_emul_get_one_reg(struct kvm_vcpu *vcpu,
735 const struct kvm_one_reg *reg,
736 s64 *v)
737{
James Hogan654229a2016-12-08 22:46:41 +0000738 struct mips_coproc *cop0 = vcpu->arch.cop0;
739
James Hoganf8be02d2014-05-29 10:16:29 +0100740 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000741 case KVM_REG_MIPS_CP0_INDEX:
742 *v = (long)kvm_read_c0_guest_index(cop0);
743 break;
James Hogan013044c2016-12-07 17:16:37 +0000744 case KVM_REG_MIPS_CP0_ENTRYLO0:
745 *v = kvm_read_c0_guest_entrylo0(cop0);
746 break;
747 case KVM_REG_MIPS_CP0_ENTRYLO1:
748 *v = kvm_read_c0_guest_entrylo1(cop0);
749 break;
James Hogan654229a2016-12-08 22:46:41 +0000750 case KVM_REG_MIPS_CP0_CONTEXT:
751 *v = (long)kvm_read_c0_guest_context(cop0);
752 break;
753 case KVM_REG_MIPS_CP0_USERLOCAL:
754 *v = (long)kvm_read_c0_guest_userlocal(cop0);
755 break;
756 case KVM_REG_MIPS_CP0_PAGEMASK:
757 *v = (long)kvm_read_c0_guest_pagemask(cop0);
758 break;
759 case KVM_REG_MIPS_CP0_WIRED:
760 *v = (long)kvm_read_c0_guest_wired(cop0);
761 break;
762 case KVM_REG_MIPS_CP0_HWRENA:
763 *v = (long)kvm_read_c0_guest_hwrena(cop0);
764 break;
765 case KVM_REG_MIPS_CP0_BADVADDR:
766 *v = (long)kvm_read_c0_guest_badvaddr(cop0);
767 break;
768 case KVM_REG_MIPS_CP0_ENTRYHI:
769 *v = (long)kvm_read_c0_guest_entryhi(cop0);
770 break;
771 case KVM_REG_MIPS_CP0_COMPARE:
772 *v = (long)kvm_read_c0_guest_compare(cop0);
773 break;
774 case KVM_REG_MIPS_CP0_STATUS:
775 *v = (long)kvm_read_c0_guest_status(cop0);
776 break;
James Hoganad58d4d2015-02-02 22:55:17 +0000777 case KVM_REG_MIPS_CP0_INTCTL:
778 *v = (long)kvm_read_c0_guest_intctl(cop0);
779 break;
James Hogan654229a2016-12-08 22:46:41 +0000780 case KVM_REG_MIPS_CP0_CAUSE:
781 *v = (long)kvm_read_c0_guest_cause(cop0);
782 break;
783 case KVM_REG_MIPS_CP0_EPC:
784 *v = (long)kvm_read_c0_guest_epc(cop0);
785 break;
786 case KVM_REG_MIPS_CP0_PRID:
787 *v = (long)kvm_read_c0_guest_prid(cop0);
788 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000789 case KVM_REG_MIPS_CP0_EBASE:
790 *v = (long)kvm_read_c0_guest_ebase(cop0);
791 break;
James Hogan654229a2016-12-08 22:46:41 +0000792 case KVM_REG_MIPS_CP0_CONFIG:
793 *v = (long)kvm_read_c0_guest_config(cop0);
794 break;
795 case KVM_REG_MIPS_CP0_CONFIG1:
796 *v = (long)kvm_read_c0_guest_config1(cop0);
797 break;
798 case KVM_REG_MIPS_CP0_CONFIG2:
799 *v = (long)kvm_read_c0_guest_config2(cop0);
800 break;
801 case KVM_REG_MIPS_CP0_CONFIG3:
802 *v = (long)kvm_read_c0_guest_config3(cop0);
803 break;
804 case KVM_REG_MIPS_CP0_CONFIG4:
805 *v = (long)kvm_read_c0_guest_config4(cop0);
806 break;
807 case KVM_REG_MIPS_CP0_CONFIG5:
808 *v = (long)kvm_read_c0_guest_config5(cop0);
809 break;
810 case KVM_REG_MIPS_CP0_CONFIG7:
811 *v = (long)kvm_read_c0_guest_config7(cop0);
812 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100813 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100814 *v = kvm_mips_read_count(vcpu);
James Hoganf8be02d2014-05-29 10:16:29 +0100815 break;
James Hoganf8239342014-05-29 10:16:37 +0100816 case KVM_REG_MIPS_COUNT_CTL:
817 *v = vcpu->arch.count_ctl;
818 break;
819 case KVM_REG_MIPS_COUNT_RESUME:
820 *v = ktime_to_ns(vcpu->arch.count_resume);
821 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100822 case KVM_REG_MIPS_COUNT_HZ:
823 *v = vcpu->arch.count_hz;
824 break;
James Hogan654229a2016-12-08 22:46:41 +0000825 case KVM_REG_MIPS_CP0_ERROREPC:
826 *v = (long)kvm_read_c0_guest_errorepc(cop0);
827 break;
828 case KVM_REG_MIPS_CP0_KSCRATCH1:
829 *v = (long)kvm_read_c0_guest_kscratch1(cop0);
830 break;
831 case KVM_REG_MIPS_CP0_KSCRATCH2:
832 *v = (long)kvm_read_c0_guest_kscratch2(cop0);
833 break;
834 case KVM_REG_MIPS_CP0_KSCRATCH3:
835 *v = (long)kvm_read_c0_guest_kscratch3(cop0);
836 break;
837 case KVM_REG_MIPS_CP0_KSCRATCH4:
838 *v = (long)kvm_read_c0_guest_kscratch4(cop0);
839 break;
840 case KVM_REG_MIPS_CP0_KSCRATCH5:
841 *v = (long)kvm_read_c0_guest_kscratch5(cop0);
842 break;
843 case KVM_REG_MIPS_CP0_KSCRATCH6:
844 *v = (long)kvm_read_c0_guest_kscratch6(cop0);
845 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100846 default:
847 return -EINVAL;
848 }
849 return 0;
850}
851
852static int kvm_trap_emul_set_one_reg(struct kvm_vcpu *vcpu,
853 const struct kvm_one_reg *reg,
854 s64 v)
855{
856 struct mips_coproc *cop0 = vcpu->arch.cop0;
James Hoganf8239342014-05-29 10:16:37 +0100857 int ret = 0;
James Hoganc7716072014-06-26 15:11:29 +0100858 unsigned int cur, change;
James Hoganf8be02d2014-05-29 10:16:29 +0100859
860 switch (reg->id) {
James Hogan654229a2016-12-08 22:46:41 +0000861 case KVM_REG_MIPS_CP0_INDEX:
862 kvm_write_c0_guest_index(cop0, v);
863 break;
James Hogan013044c2016-12-07 17:16:37 +0000864 case KVM_REG_MIPS_CP0_ENTRYLO0:
865 kvm_write_c0_guest_entrylo0(cop0, v);
866 break;
867 case KVM_REG_MIPS_CP0_ENTRYLO1:
868 kvm_write_c0_guest_entrylo1(cop0, v);
869 break;
James Hogan654229a2016-12-08 22:46:41 +0000870 case KVM_REG_MIPS_CP0_CONTEXT:
871 kvm_write_c0_guest_context(cop0, v);
872 break;
873 case KVM_REG_MIPS_CP0_USERLOCAL:
874 kvm_write_c0_guest_userlocal(cop0, v);
875 break;
876 case KVM_REG_MIPS_CP0_PAGEMASK:
877 kvm_write_c0_guest_pagemask(cop0, v);
878 break;
879 case KVM_REG_MIPS_CP0_WIRED:
880 kvm_write_c0_guest_wired(cop0, v);
881 break;
882 case KVM_REG_MIPS_CP0_HWRENA:
883 kvm_write_c0_guest_hwrena(cop0, v);
884 break;
885 case KVM_REG_MIPS_CP0_BADVADDR:
886 kvm_write_c0_guest_badvaddr(cop0, v);
887 break;
888 case KVM_REG_MIPS_CP0_ENTRYHI:
889 kvm_write_c0_guest_entryhi(cop0, v);
890 break;
891 case KVM_REG_MIPS_CP0_STATUS:
892 kvm_write_c0_guest_status(cop0, v);
893 break;
James Hoganad58d4d2015-02-02 22:55:17 +0000894 case KVM_REG_MIPS_CP0_INTCTL:
895 /* No VInt, so no VS, read-only for now */
896 break;
James Hogan654229a2016-12-08 22:46:41 +0000897 case KVM_REG_MIPS_CP0_EPC:
898 kvm_write_c0_guest_epc(cop0, v);
899 break;
900 case KVM_REG_MIPS_CP0_PRID:
901 kvm_write_c0_guest_prid(cop0, v);
902 break;
James Hogan7801bbe2016-11-14 23:59:27 +0000903 case KVM_REG_MIPS_CP0_EBASE:
904 /*
905 * Allow core number to be written, but the exception base must
906 * remain in guest KSeg0.
907 */
908 kvm_change_c0_guest_ebase(cop0, 0x1ffff000 | MIPS_EBASE_CPUNUM,
909 v);
910 break;
James Hoganf8be02d2014-05-29 10:16:29 +0100911 case KVM_REG_MIPS_CP0_COUNT:
James Hogane30492b2014-05-29 10:16:35 +0100912 kvm_mips_write_count(vcpu, v);
James Hoganf8be02d2014-05-29 10:16:29 +0100913 break;
914 case KVM_REG_MIPS_CP0_COMPARE:
James Hoganb45bacd2016-04-22 10:38:46 +0100915 kvm_mips_write_compare(vcpu, v, false);
James Hogane30492b2014-05-29 10:16:35 +0100916 break;
917 case KVM_REG_MIPS_CP0_CAUSE:
918 /*
919 * If the timer is stopped or started (DC bit) it must look
920 * atomic with changes to the interrupt pending bits (TI, IRQ5).
921 * A timer interrupt should not happen in between.
922 */
923 if ((kvm_read_c0_guest_cause(cop0) ^ v) & CAUSEF_DC) {
924 if (v & CAUSEF_DC) {
925 /* disable timer first */
926 kvm_mips_count_disable_cause(vcpu);
927 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
928 } else {
929 /* enable timer last */
930 kvm_change_c0_guest_cause(cop0, ~CAUSEF_DC, v);
931 kvm_mips_count_enable_cause(vcpu);
932 }
933 } else {
934 kvm_write_c0_guest_cause(cop0, v);
935 }
James Hoganf8be02d2014-05-29 10:16:29 +0100936 break;
James Hoganc7716072014-06-26 15:11:29 +0100937 case KVM_REG_MIPS_CP0_CONFIG:
938 /* read-only for now */
939 break;
940 case KVM_REG_MIPS_CP0_CONFIG1:
941 cur = kvm_read_c0_guest_config1(cop0);
942 change = (cur ^ v) & kvm_mips_config1_wrmask(vcpu);
943 if (change) {
944 v = cur ^ change;
945 kvm_write_c0_guest_config1(cop0, v);
946 }
947 break;
948 case KVM_REG_MIPS_CP0_CONFIG2:
949 /* read-only for now */
950 break;
951 case KVM_REG_MIPS_CP0_CONFIG3:
952 cur = kvm_read_c0_guest_config3(cop0);
953 change = (cur ^ v) & kvm_mips_config3_wrmask(vcpu);
954 if (change) {
955 v = cur ^ change;
956 kvm_write_c0_guest_config3(cop0, v);
957 }
958 break;
959 case KVM_REG_MIPS_CP0_CONFIG4:
960 cur = kvm_read_c0_guest_config4(cop0);
961 change = (cur ^ v) & kvm_mips_config4_wrmask(vcpu);
962 if (change) {
963 v = cur ^ change;
964 kvm_write_c0_guest_config4(cop0, v);
965 }
966 break;
967 case KVM_REG_MIPS_CP0_CONFIG5:
968 cur = kvm_read_c0_guest_config5(cop0);
969 change = (cur ^ v) & kvm_mips_config5_wrmask(vcpu);
970 if (change) {
971 v = cur ^ change;
972 kvm_write_c0_guest_config5(cop0, v);
973 }
974 break;
James Hogan89d6ad82016-12-14 01:58:44 +0000975 case KVM_REG_MIPS_CP0_CONFIG7:
976 /* writes ignored */
977 break;
James Hoganf8239342014-05-29 10:16:37 +0100978 case KVM_REG_MIPS_COUNT_CTL:
979 ret = kvm_mips_set_count_ctl(vcpu, v);
980 break;
981 case KVM_REG_MIPS_COUNT_RESUME:
982 ret = kvm_mips_set_count_resume(vcpu, v);
983 break;
James Hoganf74a8e22014-05-29 10:16:38 +0100984 case KVM_REG_MIPS_COUNT_HZ:
985 ret = kvm_mips_set_count_hz(vcpu, v);
986 break;
James Hogan654229a2016-12-08 22:46:41 +0000987 case KVM_REG_MIPS_CP0_ERROREPC:
988 kvm_write_c0_guest_errorepc(cop0, v);
989 break;
990 case KVM_REG_MIPS_CP0_KSCRATCH1:
991 kvm_write_c0_guest_kscratch1(cop0, v);
992 break;
993 case KVM_REG_MIPS_CP0_KSCRATCH2:
994 kvm_write_c0_guest_kscratch2(cop0, v);
995 break;
996 case KVM_REG_MIPS_CP0_KSCRATCH3:
997 kvm_write_c0_guest_kscratch3(cop0, v);
998 break;
999 case KVM_REG_MIPS_CP0_KSCRATCH4:
1000 kvm_write_c0_guest_kscratch4(cop0, v);
1001 break;
1002 case KVM_REG_MIPS_CP0_KSCRATCH5:
1003 kvm_write_c0_guest_kscratch5(cop0, v);
1004 break;
1005 case KVM_REG_MIPS_CP0_KSCRATCH6:
1006 kvm_write_c0_guest_kscratch6(cop0, v);
1007 break;
James Hoganf8be02d2014-05-29 10:16:29 +01001008 default:
1009 return -EINVAL;
1010 }
James Hoganf8239342014-05-29 10:16:37 +01001011 return ret;
James Hoganf8be02d2014-05-29 10:16:29 +01001012}
1013
James Hogana60b8432016-11-12 00:00:13 +00001014static int kvm_trap_emul_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +00001015{
James Hoganc550d532016-10-11 23:14:39 +01001016 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1017 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hogan7faa6ee2016-10-07 23:58:53 +01001018 struct mm_struct *mm;
James Hogan1581ff32016-11-16 23:48:56 +00001019
James Hogan1581ff32016-11-16 23:48:56 +00001020 /*
James Hogan91737ea2016-12-02 23:40:52 +00001021 * Were we in guest context? If so, restore the appropriate ASID based
1022 * on the mode of the Guest (Kernel/User).
James Hogan1581ff32016-11-16 23:48:56 +00001023 */
1024 if (current->flags & PF_VCPU) {
James Hogan7faa6ee2016-10-07 23:58:53 +01001025 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
James Hogan91737ea2016-12-02 23:40:52 +00001026 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1027 asid_version_mask(cpu))
1028 get_new_mmu_context(mm, cpu);
James Hogan7faa6ee2016-10-07 23:58:53 +01001029 write_c0_entryhi(cpu_asid(cpu, mm));
1030 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +00001031 kvm_mips_suspend_mm(cpu);
James Hogan1581ff32016-11-16 23:48:56 +00001032 ehb();
1033 }
1034
James Hoganb86ecb32015-02-09 16:35:20 +00001035 return 0;
1036}
1037
James Hogana60b8432016-11-12 00:00:13 +00001038static int kvm_trap_emul_vcpu_put(struct kvm_vcpu *vcpu, int cpu)
James Hoganb86ecb32015-02-09 16:35:20 +00001039{
James Hogana60b8432016-11-12 00:00:13 +00001040 kvm_lose_fpu(vcpu);
1041
James Hogan91cdee52016-11-18 13:25:24 +00001042 if (current->flags & PF_VCPU) {
1043 /* Restore normal Linux process memory map */
1044 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
James Hogan91737ea2016-12-02 23:40:52 +00001045 asid_version_mask(cpu)))
James Hogan91cdee52016-11-18 13:25:24 +00001046 get_new_mmu_context(current->mm, cpu);
James Hogan91cdee52016-11-18 13:25:24 +00001047 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +01001048 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +00001049 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +00001050 ehb();
James Hogan1581ff32016-11-16 23:48:56 +00001051 }
James Hogan1581ff32016-11-16 23:48:56 +00001052
James Hoganb86ecb32015-02-09 16:35:20 +00001053 return 0;
1054}
1055
James Hoganb29e1152016-11-28 23:19:32 +00001056static void kvm_trap_emul_check_requests(struct kvm_vcpu *vcpu, int cpu,
1057 bool reload_asid)
1058{
1059 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
1060 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
1061 struct mm_struct *mm;
1062 int i;
1063
1064 if (likely(!vcpu->requests))
1065 return;
1066
1067 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu)) {
1068 /*
1069 * Both kernel & user GVA mappings must be invalidated. The
1070 * caller is just about to check whether the ASID is stale
1071 * anyway so no need to reload it here.
1072 */
1073 kvm_mips_flush_gva_pt(kern_mm->pgd, KMF_GPA | KMF_KERN);
1074 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_GPA | KMF_USER);
1075 for_each_possible_cpu(i) {
1076 cpu_context(i, kern_mm) = 0;
1077 cpu_context(i, user_mm) = 0;
1078 }
1079
1080 /* Generate new ASID for current mode */
1081 if (reload_asid) {
1082 mm = KVM_GUEST_KERNEL_MODE(vcpu) ? kern_mm : user_mm;
1083 get_new_mmu_context(mm, cpu);
1084 htw_stop();
1085 write_c0_entryhi(cpu_asid(cpu, mm));
1086 TLBMISS_HANDLER_SETUP_PGD(mm->pgd);
1087 htw_start();
1088 }
1089 }
1090}
1091
James Hogan1880afd2016-11-28 23:04:52 +00001092/**
1093 * kvm_trap_emul_gva_lockless_begin() - Begin lockless access to GVA space.
1094 * @vcpu: VCPU pointer.
1095 *
1096 * Call before a GVA space access outside of guest mode, to ensure that
1097 * asynchronous TLB flush requests are handled or delayed until completion of
1098 * the GVA access (as indicated by a matching kvm_trap_emul_gva_lockless_end()).
1099 *
1100 * Should be called with IRQs already enabled.
1101 */
1102void kvm_trap_emul_gva_lockless_begin(struct kvm_vcpu *vcpu)
1103{
1104 /* We re-enable IRQs in kvm_trap_emul_gva_lockless_end() */
1105 WARN_ON_ONCE(irqs_disabled());
1106
1107 /*
1108 * The caller is about to access the GVA space, so we set the mode to
1109 * force TLB flush requests to send an IPI, and also disable IRQs to
1110 * delay IPI handling until kvm_trap_emul_gva_lockless_end().
1111 */
1112 local_irq_disable();
1113
1114 /*
1115 * Make sure the read of VCPU requests is not reordered ahead of the
1116 * write to vcpu->mode, or we could miss a TLB flush request while
1117 * the requester sees the VCPU as outside of guest mode and not needing
1118 * an IPI.
1119 */
1120 smp_store_mb(vcpu->mode, READING_SHADOW_PAGE_TABLES);
1121
1122 /*
1123 * If a TLB flush has been requested (potentially while
1124 * OUTSIDE_GUEST_MODE and assumed immediately effective), perform it
1125 * before accessing the GVA space, and be sure to reload the ASID if
1126 * necessary as it'll be immediately used.
1127 *
1128 * TLB flush requests after this check will trigger an IPI due to the
1129 * mode change above, which will be delayed due to IRQs disabled.
1130 */
1131 kvm_trap_emul_check_requests(vcpu, smp_processor_id(), true);
1132}
1133
1134/**
1135 * kvm_trap_emul_gva_lockless_end() - End lockless access to GVA space.
1136 * @vcpu: VCPU pointer.
1137 *
1138 * Called after a GVA space access outside of guest mode. Should have a matching
1139 * call to kvm_trap_emul_gva_lockless_begin().
1140 */
1141void kvm_trap_emul_gva_lockless_end(struct kvm_vcpu *vcpu)
1142{
1143 /*
1144 * Make sure the write to vcpu->mode is not reordered in front of GVA
1145 * accesses, or a TLB flush requester may not think it necessary to send
1146 * an IPI.
1147 */
1148 smp_store_release(&vcpu->mode, OUTSIDE_GUEST_MODE);
1149
1150 /*
1151 * Now that the access to GVA space is complete, its safe for pending
1152 * TLB flush request IPIs to be handled (which indicates completion).
1153 */
1154 local_irq_enable();
1155}
1156
James Hogana2c046e2016-11-18 13:14:37 +00001157static void kvm_trap_emul_vcpu_reenter(struct kvm_run *run,
1158 struct kvm_vcpu *vcpu)
1159{
James Hoganb29e1152016-11-28 23:19:32 +00001160 struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm;
James Hogana2c046e2016-11-18 13:14:37 +00001161 struct mm_struct *user_mm = &vcpu->arch.guest_user_mm;
James Hoganb29e1152016-11-28 23:19:32 +00001162 struct mm_struct *mm;
James Hogana2c046e2016-11-18 13:14:37 +00001163 struct mips_coproc *cop0 = vcpu->arch.cop0;
1164 int i, cpu = smp_processor_id();
1165 unsigned int gasid;
1166
1167 /*
James Hoganb29e1152016-11-28 23:19:32 +00001168 * No need to reload ASID, IRQs are disabled already so there's no rush,
1169 * and we'll check if we need to regenerate below anyway before
1170 * re-entering the guest.
James Hogana2c046e2016-11-18 13:14:37 +00001171 */
James Hoganb29e1152016-11-28 23:19:32 +00001172 kvm_trap_emul_check_requests(vcpu, cpu, false);
1173
1174 if (KVM_GUEST_KERNEL_MODE(vcpu)) {
1175 mm = kern_mm;
1176 } else {
1177 mm = user_mm;
1178
1179 /*
1180 * Lazy host ASID regeneration / PT flush for guest user mode.
1181 * If the guest ASID has changed since the last guest usermode
1182 * execution, invalidate the stale TLB entries and flush GVA PT
1183 * entries too.
1184 */
James Hogana2c046e2016-11-18 13:14:37 +00001185 gasid = kvm_read_c0_guest_entryhi(cop0) & KVM_ENTRYHI_ASID;
1186 if (gasid != vcpu->arch.last_user_gasid) {
James Hogana31b50d2016-12-16 15:57:00 +00001187 kvm_mips_flush_gva_pt(user_mm->pgd, KMF_USER);
James Hogana2c046e2016-11-18 13:14:37 +00001188 for_each_possible_cpu(i)
James Hoganb29e1152016-11-28 23:19:32 +00001189 cpu_context(i, user_mm) = 0;
James Hogana2c046e2016-11-18 13:14:37 +00001190 vcpu->arch.last_user_gasid = gasid;
1191 }
1192 }
James Hoganb29e1152016-11-28 23:19:32 +00001193
1194 /*
1195 * Check if ASID is stale. This may happen due to a TLB flush request or
1196 * a lazy user MM invalidation.
1197 */
1198 if ((cpu_context(cpu, mm) ^ asid_cache(cpu)) &
1199 asid_version_mask(cpu))
1200 get_new_mmu_context(mm, cpu);
James Hogana2c046e2016-11-18 13:14:37 +00001201}
1202
1203static int kvm_trap_emul_vcpu_run(struct kvm_run *run, struct kvm_vcpu *vcpu)
1204{
James Hogana7ebb2e2016-11-15 00:06:05 +00001205 int cpu = smp_processor_id();
James Hogana2c046e2016-11-18 13:14:37 +00001206 int r;
1207
1208 /* Check if we have any exceptions/interrupts pending */
1209 kvm_mips_deliver_interrupts(vcpu,
1210 kvm_read_c0_guest_cause(vcpu->arch.cop0));
1211
1212 kvm_trap_emul_vcpu_reenter(run, vcpu);
1213
James Hogandacc3ed2016-08-19 15:27:22 +01001214 /*
1215 * We use user accessors to access guest memory, but we don't want to
1216 * invoke Linux page faulting.
1217 */
1218 pagefault_disable();
1219
James Hogana2c046e2016-11-18 13:14:37 +00001220 /* Disable hardware page table walking while in guest */
1221 htw_stop();
1222
James Hogana7ebb2e2016-11-15 00:06:05 +00001223 /*
1224 * While in guest context we're in the guest's address space, not the
1225 * host process address space, so we need to be careful not to confuse
1226 * e.g. cache management IPIs.
1227 */
1228 kvm_mips_suspend_mm(cpu);
1229
James Hogana2c046e2016-11-18 13:14:37 +00001230 r = vcpu->arch.vcpu_run(run, vcpu);
1231
James Hogan91cdee52016-11-18 13:25:24 +00001232 /* We may have migrated while handling guest exits */
1233 cpu = smp_processor_id();
1234
1235 /* Restore normal Linux process memory map */
1236 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
1237 asid_version_mask(cpu)))
1238 get_new_mmu_context(current->mm, cpu);
1239 write_c0_entryhi(cpu_asid(cpu, current->mm));
James Hogan7faa6ee2016-10-07 23:58:53 +01001240 TLBMISS_HANDLER_SETUP_PGD(current->mm->pgd);
James Hogana7ebb2e2016-11-15 00:06:05 +00001241 kvm_mips_resume_mm(cpu);
James Hogan91cdee52016-11-18 13:25:24 +00001242
James Hogana2c046e2016-11-18 13:14:37 +00001243 htw_start();
1244
James Hogandacc3ed2016-08-19 15:27:22 +01001245 pagefault_enable();
1246
James Hogana2c046e2016-11-18 13:14:37 +00001247 return r;
1248}
1249
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001250static struct kvm_mips_callbacks kvm_trap_emul_callbacks = {
1251 /* exit handlers */
1252 .handle_cop_unusable = kvm_trap_emul_handle_cop_unusable,
1253 .handle_tlb_mod = kvm_trap_emul_handle_tlb_mod,
1254 .handle_tlb_st_miss = kvm_trap_emul_handle_tlb_st_miss,
1255 .handle_tlb_ld_miss = kvm_trap_emul_handle_tlb_ld_miss,
1256 .handle_addr_err_st = kvm_trap_emul_handle_addr_err_st,
1257 .handle_addr_err_ld = kvm_trap_emul_handle_addr_err_ld,
1258 .handle_syscall = kvm_trap_emul_handle_syscall,
1259 .handle_res_inst = kvm_trap_emul_handle_res_inst,
1260 .handle_break = kvm_trap_emul_handle_break,
James Hogan0a560422015-02-06 16:03:57 +00001261 .handle_trap = kvm_trap_emul_handle_trap,
James Hoganc2537ed2015-02-06 10:56:27 +00001262 .handle_msa_fpe = kvm_trap_emul_handle_msa_fpe,
James Hogan1c0cd662015-02-06 10:56:27 +00001263 .handle_fpe = kvm_trap_emul_handle_fpe,
James Hogan98119ad2015-02-06 11:11:56 +00001264 .handle_msa_disabled = kvm_trap_emul_handle_msa_disabled,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001265
James Hoganedab4fe2017-03-14 10:15:23 +00001266 .hardware_enable = kvm_trap_emul_hardware_enable,
1267 .hardware_disable = kvm_trap_emul_hardware_disable,
James Hogan607ef2f2017-03-14 10:15:22 +00001268 .check_extension = kvm_trap_emul_check_extension,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001269 .vcpu_init = kvm_trap_emul_vcpu_init,
James Hogan630766b2016-09-08 23:00:24 +01001270 .vcpu_uninit = kvm_trap_emul_vcpu_uninit,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001271 .vcpu_setup = kvm_trap_emul_vcpu_setup,
James Hoganb6209112016-10-25 00:01:37 +01001272 .flush_shadow_all = kvm_trap_emul_flush_shadow_all,
1273 .flush_shadow_memslot = kvm_trap_emul_flush_shadow_memslot,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001274 .gva_to_gpa = kvm_trap_emul_gva_to_gpa_cb,
1275 .queue_timer_int = kvm_mips_queue_timer_int_cb,
1276 .dequeue_timer_int = kvm_mips_dequeue_timer_int_cb,
1277 .queue_io_int = kvm_mips_queue_io_int_cb,
1278 .dequeue_io_int = kvm_mips_dequeue_io_int_cb,
1279 .irq_deliver = kvm_mips_irq_deliver_cb,
1280 .irq_clear = kvm_mips_irq_clear_cb,
James Hoganf5c43bd2016-06-15 19:29:49 +01001281 .num_regs = kvm_trap_emul_num_regs,
1282 .copy_reg_indices = kvm_trap_emul_copy_reg_indices,
James Hoganf8be02d2014-05-29 10:16:29 +01001283 .get_one_reg = kvm_trap_emul_get_one_reg,
1284 .set_one_reg = kvm_trap_emul_set_one_reg,
James Hogana60b8432016-11-12 00:00:13 +00001285 .vcpu_load = kvm_trap_emul_vcpu_load,
1286 .vcpu_put = kvm_trap_emul_vcpu_put,
James Hogana2c046e2016-11-18 13:14:37 +00001287 .vcpu_run = kvm_trap_emul_vcpu_run,
1288 .vcpu_reenter = kvm_trap_emul_vcpu_reenter,
Sanjay Lalf5c236d2012-11-21 18:34:09 -08001289};
1290
1291int kvm_mips_emulation_init(struct kvm_mips_callbacks **install_callbacks)
1292{
1293 *install_callbacks = &kvm_trap_emul_callbacks;
1294 return 0;
1295}