blob: 9d371ee0a755c328ea50a3b6f8a99827020a148d [file] [log] [blame]
Sanjay Lal858dd5d2012-11-21 18:34:05 -08001/*
2* This file is subject to the terms and conditions of the GNU General Public
3* License. See the file "COPYING" in the main directory of this archive
4* for more details.
5*
6* KVM/MIPS TLB handling, this file is part of the Linux host kernel so that
7* TLB handlers run from KSEG0
8*
9* Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
10* Authors: Sanjay Lal <sanjayl@kymasys.com>
11*/
12
Sanjay Lal858dd5d2012-11-21 18:34:05 -080013#include <linux/sched.h>
14#include <linux/smp.h>
15#include <linux/mm.h>
16#include <linux/delay.h>
17#include <linux/module.h>
18#include <linux/kvm_host.h>
Sanjay Lal6d17c0d2013-05-18 06:54:24 -070019#include <linux/srcu.h>
20
Sanjay Lal858dd5d2012-11-21 18:34:05 -080021
22#include <asm/cpu.h>
23#include <asm/bootinfo.h>
24#include <asm/mmu_context.h>
25#include <asm/pgtable.h>
26#include <asm/cacheflush.h>
James Hogane36059e2014-01-17 12:01:30 +000027#include <asm/tlb.h>
Sanjay Lal858dd5d2012-11-21 18:34:05 -080028
29#undef CONFIG_MIPS_MT
30#include <asm/r4kcache.h>
31#define CONFIG_MIPS_MT
32
33#define KVM_GUEST_PC_TLB 0
34#define KVM_GUEST_SP_TLB 1
35
36#define PRIx64 "llx"
37
Sanjay Lal858dd5d2012-11-21 18:34:05 -080038atomic_t kvm_mips_instance;
39EXPORT_SYMBOL(kvm_mips_instance);
40
41/* These function pointers are initialized once the KVM module is loaded */
42pfn_t(*kvm_mips_gfn_to_pfn) (struct kvm *kvm, gfn_t gfn);
43EXPORT_SYMBOL(kvm_mips_gfn_to_pfn);
44
45void (*kvm_mips_release_pfn_clean) (pfn_t pfn);
46EXPORT_SYMBOL(kvm_mips_release_pfn_clean);
47
48bool(*kvm_mips_is_error_pfn) (pfn_t pfn);
49EXPORT_SYMBOL(kvm_mips_is_error_pfn);
50
51uint32_t kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu)
52{
David Daney48c4ac92013-05-13 13:56:44 -070053 return vcpu->arch.guest_kernel_asid[smp_processor_id()] & ASID_MASK;
Sanjay Lal858dd5d2012-11-21 18:34:05 -080054}
55
56
57uint32_t kvm_mips_get_user_asid(struct kvm_vcpu *vcpu)
58{
David Daney48c4ac92013-05-13 13:56:44 -070059 return vcpu->arch.guest_user_asid[smp_processor_id()] & ASID_MASK;
Sanjay Lal858dd5d2012-11-21 18:34:05 -080060}
61
62inline uint32_t kvm_mips_get_commpage_asid (struct kvm_vcpu *vcpu)
63{
64 return vcpu->kvm->arch.commpage_tlb;
65}
66
67
68/*
69 * Structure defining an tlb entry data set.
70 */
71
72void kvm_mips_dump_host_tlbs(void)
73{
74 unsigned long old_entryhi;
75 unsigned long old_pagemask;
76 struct kvm_mips_tlb tlb;
77 unsigned long flags;
78 int i;
79
80 local_irq_save(flags);
81
82 old_entryhi = read_c0_entryhi();
83 old_pagemask = read_c0_pagemask();
84
85 printk("HOST TLBs:\n");
David Daney48c4ac92013-05-13 13:56:44 -070086 printk("ASID: %#lx\n", read_c0_entryhi() & ASID_MASK);
Sanjay Lal858dd5d2012-11-21 18:34:05 -080087
88 for (i = 0; i < current_cpu_data.tlbsize; i++) {
89 write_c0_index(i);
90 mtc0_tlbw_hazard();
91
92 tlb_read();
93 tlbw_use_hazard();
94
95 tlb.tlb_hi = read_c0_entryhi();
96 tlb.tlb_lo0 = read_c0_entrylo0();
97 tlb.tlb_lo1 = read_c0_entrylo1();
98 tlb.tlb_mask = read_c0_pagemask();
99
100 printk("TLB%c%3d Hi 0x%08lx ",
101 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
102 i, tlb.tlb_hi);
103 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
104 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
105 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
106 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
107 (tlb.tlb_lo0 >> 3) & 7);
108 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
109 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
110 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
111 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
112 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
113 }
114 write_c0_entryhi(old_entryhi);
115 write_c0_pagemask(old_pagemask);
116 mtc0_tlbw_hazard();
117 local_irq_restore(flags);
118}
119
120void kvm_mips_dump_guest_tlbs(struct kvm_vcpu *vcpu)
121{
122 struct mips_coproc *cop0 = vcpu->arch.cop0;
123 struct kvm_mips_tlb tlb;
124 int i;
125
126 printk("Guest TLBs:\n");
127 printk("Guest EntryHi: %#lx\n", kvm_read_c0_guest_entryhi(cop0));
128
129 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
130 tlb = vcpu->arch.guest_tlb[i];
131 printk("TLB%c%3d Hi 0x%08lx ",
132 (tlb.tlb_lo0 | tlb.tlb_lo1) & MIPS3_PG_V ? ' ' : '*',
133 i, tlb.tlb_hi);
134 printk("Lo0=0x%09" PRIx64 " %c%c attr %lx ",
135 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo0),
136 (tlb.tlb_lo0 & MIPS3_PG_D) ? 'D' : ' ',
137 (tlb.tlb_lo0 & MIPS3_PG_G) ? 'G' : ' ',
138 (tlb.tlb_lo0 >> 3) & 7);
139 printk("Lo1=0x%09" PRIx64 " %c%c attr %lx sz=%lx\n",
140 (uint64_t) mips3_tlbpfn_to_paddr(tlb.tlb_lo1),
141 (tlb.tlb_lo1 & MIPS3_PG_D) ? 'D' : ' ',
142 (tlb.tlb_lo1 & MIPS3_PG_G) ? 'G' : ' ',
143 (tlb.tlb_lo1 >> 3) & 7, tlb.tlb_mask);
144 }
145}
146
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700147static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800148{
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700149 int srcu_idx, err = 0;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800150 pfn_t pfn;
151
152 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700153 return 0;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800154
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700155 srcu_idx = srcu_read_lock(&kvm->srcu);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800156 pfn = kvm_mips_gfn_to_pfn(kvm, gfn);
157
158 if (kvm_mips_is_error_pfn(pfn)) {
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700159 kvm_err("Couldn't get pfn for gfn %#" PRIx64 "!\n", gfn);
160 err = -EFAULT;
161 goto out;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800162 }
163
164 kvm->arch.guest_pmap[gfn] = pfn;
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700165out:
166 srcu_read_unlock(&kvm->srcu, srcu_idx);
167 return err;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800168}
169
170/* Translate guest KSEG0 addresses to Host PA */
171unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu,
172 unsigned long gva)
173{
174 gfn_t gfn;
175 uint32_t offset = gva & ~PAGE_MASK;
176 struct kvm *kvm = vcpu->kvm;
177
178 if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) {
179 kvm_err("%s/%p: Invalid gva: %#lx\n", __func__,
180 __builtin_return_address(0), gva);
181 return KVM_INVALID_PAGE;
182 }
183
184 gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT);
185
186 if (gfn >= kvm->arch.guest_pmap_npages) {
187 kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn,
188 gva);
189 return KVM_INVALID_PAGE;
190 }
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700191
192 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
193 return KVM_INVALID_ADDR;
194
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800195 return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset;
196}
197
198/* XXXKYMA: Must be called with interrupts disabled */
199/* set flush_dcache_mask == 0 if no dcache flush required */
200int
201kvm_mips_host_tlb_write(struct kvm_vcpu *vcpu, unsigned long entryhi,
202 unsigned long entrylo0, unsigned long entrylo1, int flush_dcache_mask)
203{
204 unsigned long flags;
205 unsigned long old_entryhi;
206 volatile int idx;
207
208 local_irq_save(flags);
209
210
211 old_entryhi = read_c0_entryhi();
212 write_c0_entryhi(entryhi);
213 mtc0_tlbw_hazard();
214
215 tlb_probe();
216 tlb_probe_hazard();
217 idx = read_c0_index();
218
219 if (idx > current_cpu_data.tlbsize) {
220 kvm_err("%s: Invalid Index: %d\n", __func__, idx);
221 kvm_mips_dump_host_tlbs();
222 return -1;
223 }
224
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800225 write_c0_entrylo0(entrylo0);
226 write_c0_entrylo1(entrylo1);
227 mtc0_tlbw_hazard();
228
James Hoganb5dfc6c2014-05-29 10:16:26 +0100229 if (idx < 0)
230 tlb_write_random();
231 else
232 tlb_write_indexed();
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800233 tlbw_use_hazard();
234
235#ifdef DEBUG
236 if (debug) {
237 kvm_debug("@ %#lx idx: %2d [entryhi(R): %#lx] "
238 "entrylo0(R): 0x%08lx, entrylo1(R): 0x%08lx\n",
239 vcpu->arch.pc, idx, read_c0_entryhi(),
240 read_c0_entrylo0(), read_c0_entrylo1());
241 }
242#endif
243
244 /* Flush D-cache */
245 if (flush_dcache_mask) {
246 if (entrylo0 & MIPS3_PG_V) {
247 ++vcpu->stat.flush_dcache_exits;
248 flush_data_cache_page((entryhi & VPN2_MASK) & ~flush_dcache_mask);
249 }
250 if (entrylo1 & MIPS3_PG_V) {
251 ++vcpu->stat.flush_dcache_exits;
252 flush_data_cache_page(((entryhi & VPN2_MASK) & ~flush_dcache_mask) |
253 (0x1 << PAGE_SHIFT));
254 }
255 }
256
257 /* Restore old ASID */
258 write_c0_entryhi(old_entryhi);
259 mtc0_tlbw_hazard();
260 tlbw_use_hazard();
261 local_irq_restore(flags);
262 return 0;
263}
264
265
266/* XXXKYMA: Must be called with interrupts disabled */
267int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
268 struct kvm_vcpu *vcpu)
269{
270 gfn_t gfn;
271 pfn_t pfn0, pfn1;
272 unsigned long vaddr = 0;
273 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
274 int even;
275 struct kvm *kvm = vcpu->kvm;
276 const int flush_dcache_mask = 0;
277
278
279 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
280 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
281 kvm_mips_dump_host_tlbs();
282 return -1;
283 }
284
285 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
286 if (gfn >= kvm->arch.guest_pmap_npages) {
287 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
288 gfn, badvaddr);
289 kvm_mips_dump_host_tlbs();
290 return -1;
291 }
292 even = !(gfn & 0x1);
293 vaddr = badvaddr & (PAGE_MASK << 1);
294
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700295 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
296 return -1;
297
298 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
299 return -1;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800300
301 if (even) {
302 pfn0 = kvm->arch.guest_pmap[gfn];
303 pfn1 = kvm->arch.guest_pmap[gfn ^ 0x1];
304 } else {
305 pfn0 = kvm->arch.guest_pmap[gfn ^ 0x1];
306 pfn1 = kvm->arch.guest_pmap[gfn];
307 }
308
309 entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu));
310 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
311 (0x1 << 1);
312 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
313 (0x1 << 1);
314
315 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
316 flush_dcache_mask);
317}
318
319int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
320 struct kvm_vcpu *vcpu)
321{
322 pfn_t pfn0, pfn1;
323 unsigned long flags, old_entryhi = 0, vaddr = 0;
324 unsigned long entrylo0 = 0, entrylo1 = 0;
325
326
327 pfn0 = CPHYSADDR(vcpu->arch.kseg0_commpage) >> PAGE_SHIFT;
328 pfn1 = 0;
329 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) | (1 << 2) |
330 (0x1 << 1);
331 entrylo1 = 0;
332
333 local_irq_save(flags);
334
335 old_entryhi = read_c0_entryhi();
336 vaddr = badvaddr & (PAGE_MASK << 1);
337 write_c0_entryhi(vaddr | kvm_mips_get_kernel_asid(vcpu));
338 mtc0_tlbw_hazard();
339 write_c0_entrylo0(entrylo0);
340 mtc0_tlbw_hazard();
341 write_c0_entrylo1(entrylo1);
342 mtc0_tlbw_hazard();
343 write_c0_index(kvm_mips_get_commpage_asid(vcpu));
344 mtc0_tlbw_hazard();
345 tlb_write_indexed();
346 mtc0_tlbw_hazard();
347 tlbw_use_hazard();
348
349#ifdef DEBUG
350 kvm_debug ("@ %#lx idx: %2d [entryhi(R): %#lx] entrylo0 (R): 0x%08lx, entrylo1(R): 0x%08lx\n",
351 vcpu->arch.pc, read_c0_index(), read_c0_entryhi(),
352 read_c0_entrylo0(), read_c0_entrylo1());
353#endif
354
355 /* Restore old ASID */
356 write_c0_entryhi(old_entryhi);
357 mtc0_tlbw_hazard();
358 tlbw_use_hazard();
359 local_irq_restore(flags);
360
361 return 0;
362}
363
364int
365kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
366 struct kvm_mips_tlb *tlb, unsigned long *hpa0, unsigned long *hpa1)
367{
368 unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0;
369 struct kvm *kvm = vcpu->kvm;
370 pfn_t pfn0, pfn1;
371
372
373 if ((tlb->tlb_hi & VPN2_MASK) == 0) {
374 pfn0 = 0;
375 pfn1 = 0;
376 } else {
Sanjay Lal6d17c0d2013-05-18 06:54:24 -0700377 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT) < 0)
378 return -1;
379
380 if (kvm_mips_map_page(kvm, mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT) < 0)
381 return -1;
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800382
383 pfn0 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo0) >> PAGE_SHIFT];
384 pfn1 = kvm->arch.guest_pmap[mips3_tlbpfn_to_paddr(tlb->tlb_lo1) >> PAGE_SHIFT];
385 }
386
387 if (hpa0)
388 *hpa0 = pfn0 << PAGE_SHIFT;
389
390 if (hpa1)
391 *hpa1 = pfn1 << PAGE_SHIFT;
392
393 /* Get attributes from the Guest TLB */
394 entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ?
395 kvm_mips_get_kernel_asid(vcpu) : kvm_mips_get_user_asid(vcpu));
396 entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | (0x3 << 3) |
397 (tlb->tlb_lo0 & MIPS3_PG_D) | (tlb->tlb_lo0 & MIPS3_PG_V);
398 entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | (0x3 << 3) |
399 (tlb->tlb_lo1 & MIPS3_PG_D) | (tlb->tlb_lo1 & MIPS3_PG_V);
400
401#ifdef DEBUG
402 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
403 tlb->tlb_lo0, tlb->tlb_lo1);
404#endif
405
406 return kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1,
407 tlb->tlb_mask);
408}
409
410int kvm_mips_guest_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long entryhi)
411{
412 int i;
413 int index = -1;
414 struct kvm_mips_tlb *tlb = vcpu->arch.guest_tlb;
415
416
417 for (i = 0; i < KVM_MIPS_GUEST_TLB_SIZE; i++) {
418 if (((TLB_VPN2(tlb[i]) & ~tlb[i].tlb_mask) == ((entryhi & VPN2_MASK) & ~tlb[i].tlb_mask)) &&
David Daney48c4ac92013-05-13 13:56:44 -0700419 (TLB_IS_GLOBAL(tlb[i]) || (TLB_ASID(tlb[i]) == (entryhi & ASID_MASK)))) {
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800420 index = i;
421 break;
422 }
423 }
424
425#ifdef DEBUG
426 kvm_debug("%s: entryhi: %#lx, index: %d lo0: %#lx, lo1: %#lx\n",
427 __func__, entryhi, index, tlb[i].tlb_lo0, tlb[i].tlb_lo1);
428#endif
429
430 return index;
431}
432
433int kvm_mips_host_tlb_lookup(struct kvm_vcpu *vcpu, unsigned long vaddr)
434{
435 unsigned long old_entryhi, flags;
436 volatile int idx;
437
438
439 local_irq_save(flags);
440
441 old_entryhi = read_c0_entryhi();
442
443 if (KVM_GUEST_KERNEL_MODE(vcpu))
444 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_kernel_asid(vcpu));
445 else {
446 write_c0_entryhi((vaddr & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
447 }
448
449 mtc0_tlbw_hazard();
450
451 tlb_probe();
452 tlb_probe_hazard();
453 idx = read_c0_index();
454
455 /* Restore old ASID */
456 write_c0_entryhi(old_entryhi);
457 mtc0_tlbw_hazard();
458 tlbw_use_hazard();
459
460 local_irq_restore(flags);
461
462#ifdef DEBUG
463 kvm_debug("Host TLB lookup, %#lx, idx: %2d\n", vaddr, idx);
464#endif
465
466 return idx;
467}
468
469int kvm_mips_host_tlb_inv(struct kvm_vcpu *vcpu, unsigned long va)
470{
471 int idx;
472 unsigned long flags, old_entryhi;
473
474 local_irq_save(flags);
475
476
477 old_entryhi = read_c0_entryhi();
478
479 write_c0_entryhi((va & VPN2_MASK) | kvm_mips_get_user_asid(vcpu));
480 mtc0_tlbw_hazard();
481
482 tlb_probe();
483 tlb_probe_hazard();
484 idx = read_c0_index();
485
486 if (idx >= current_cpu_data.tlbsize)
487 BUG();
488
489 if (idx > 0) {
490 write_c0_entryhi(UNIQUE_ENTRYHI(idx));
491 mtc0_tlbw_hazard();
492
493 write_c0_entrylo0(0);
494 mtc0_tlbw_hazard();
495
496 write_c0_entrylo1(0);
497 mtc0_tlbw_hazard();
498
499 tlb_write_indexed();
500 mtc0_tlbw_hazard();
501 }
502
503 write_c0_entryhi(old_entryhi);
504 mtc0_tlbw_hazard();
505 tlbw_use_hazard();
506
507 local_irq_restore(flags);
508
509#ifdef DEBUG
510 if (idx > 0) {
511 kvm_debug("%s: Invalidated entryhi %#lx @ idx %d\n", __func__,
512 (va & VPN2_MASK) | (vcpu->arch.asid_map[va & ASID_MASK] & ASID_MASK), idx);
513 }
514#endif
515
516 return 0;
517}
518
519/* XXXKYMA: Fix Guest USER/KERNEL no longer share the same ASID*/
520int kvm_mips_host_tlb_inv_index(struct kvm_vcpu *vcpu, int index)
521{
522 unsigned long flags, old_entryhi;
523
524 if (index >= current_cpu_data.tlbsize)
525 BUG();
526
527 local_irq_save(flags);
528
529
530 old_entryhi = read_c0_entryhi();
531
532 write_c0_entryhi(UNIQUE_ENTRYHI(index));
533 mtc0_tlbw_hazard();
534
535 write_c0_index(index);
536 mtc0_tlbw_hazard();
537
538 write_c0_entrylo0(0);
539 mtc0_tlbw_hazard();
540
541 write_c0_entrylo1(0);
542 mtc0_tlbw_hazard();
543
544 tlb_write_indexed();
545 mtc0_tlbw_hazard();
546 tlbw_use_hazard();
547
548 write_c0_entryhi(old_entryhi);
549 mtc0_tlbw_hazard();
550 tlbw_use_hazard();
551
552 local_irq_restore(flags);
553
554 return 0;
555}
556
557void kvm_mips_flush_host_tlb(int skip_kseg0)
558{
559 unsigned long flags;
560 unsigned long old_entryhi, entryhi;
561 unsigned long old_pagemask;
562 int entry = 0;
563 int maxentry = current_cpu_data.tlbsize;
564
565
566 local_irq_save(flags);
567
568 old_entryhi = read_c0_entryhi();
569 old_pagemask = read_c0_pagemask();
570
571 /* Blast 'em all away. */
572 for (entry = 0; entry < maxentry; entry++) {
573
574 write_c0_index(entry);
575 mtc0_tlbw_hazard();
576
577 if (skip_kseg0) {
578 tlb_read();
579 tlbw_use_hazard();
580
581 entryhi = read_c0_entryhi();
582
583 /* Don't blow away guest kernel entries */
584 if (KVM_GUEST_KSEGX(entryhi) == KVM_GUEST_KSEG0) {
585 continue;
586 }
587 }
588
589 /* Make sure all entries differ. */
590 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
591 mtc0_tlbw_hazard();
592 write_c0_entrylo0(0);
593 mtc0_tlbw_hazard();
594 write_c0_entrylo1(0);
595 mtc0_tlbw_hazard();
596
597 tlb_write_indexed();
598 mtc0_tlbw_hazard();
599 }
600
601 tlbw_use_hazard();
602
603 write_c0_entryhi(old_entryhi);
604 write_c0_pagemask(old_pagemask);
605 mtc0_tlbw_hazard();
606 tlbw_use_hazard();
607
608 local_irq_restore(flags);
609}
610
611void
612kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu,
613 struct kvm_vcpu *vcpu)
614{
615 unsigned long asid = asid_cache(cpu);
616
David Daney48c4ac92013-05-13 13:56:44 -0700617 if (!((asid += ASID_INC) & ASID_MASK)) {
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800618 if (cpu_has_vtag_icache) {
619 flush_icache_all();
620 }
621
622 kvm_local_flush_tlb_all(); /* start new asid cycle */
623
624 if (!asid) /* fix version if needed */
625 asid = ASID_FIRST_VERSION;
626 }
627
628 cpu_context(cpu, mm) = asid_cache(cpu) = asid;
629}
630
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800631void kvm_local_flush_tlb_all(void)
632{
633 unsigned long flags;
634 unsigned long old_ctx;
635 int entry = 0;
636
637 local_irq_save(flags);
638 /* Save old context and create impossible VPN2 value */
639 old_ctx = read_c0_entryhi();
640 write_c0_entrylo0(0);
641 write_c0_entrylo1(0);
642
643 /* Blast 'em all away. */
644 while (entry < current_cpu_data.tlbsize) {
645 /* Make sure all entries differ. */
646 write_c0_entryhi(UNIQUE_ENTRYHI(entry));
647 write_c0_index(entry);
648 mtc0_tlbw_hazard();
649 tlb_write_indexed();
650 entry++;
651 }
652 tlbw_use_hazard();
653 write_c0_entryhi(old_ctx);
654 mtc0_tlbw_hazard();
655
656 local_irq_restore(flags);
657}
658
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800659/* Restore ASID once we are scheduled back after preemption */
660void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
661{
662 unsigned long flags;
663 int newasid = 0;
664
665#ifdef DEBUG
666 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
667#endif
668
669 /* Alocate new kernel and user ASIDs if needed */
670
671 local_irq_save(flags);
672
673 if (((vcpu->arch.
674 guest_kernel_asid[cpu] ^ asid_cache(cpu)) & ASID_VERSION_MASK)) {
675 kvm_get_new_mmu_context(&vcpu->arch.guest_kernel_mm, cpu, vcpu);
676 vcpu->arch.guest_kernel_asid[cpu] =
677 vcpu->arch.guest_kernel_mm.context.asid[cpu];
678 kvm_get_new_mmu_context(&vcpu->arch.guest_user_mm, cpu, vcpu);
679 vcpu->arch.guest_user_asid[cpu] =
680 vcpu->arch.guest_user_mm.context.asid[cpu];
681 newasid++;
682
683 kvm_info("[%d]: cpu_context: %#lx\n", cpu,
684 cpu_context(cpu, current->mm));
685 kvm_info("[%d]: Allocated new ASID for Guest Kernel: %#x\n",
686 cpu, vcpu->arch.guest_kernel_asid[cpu]);
687 kvm_info("[%d]: Allocated new ASID for Guest User: %#x\n", cpu,
688 vcpu->arch.guest_user_asid[cpu]);
689 }
690
691 if (vcpu->arch.last_sched_cpu != cpu) {
692 kvm_info("[%d->%d]KVM VCPU[%d] switch\n",
693 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
694 }
695
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800696 if (!newasid) {
697 /* If we preempted while the guest was executing, then reload the pre-empted ASID */
698 if (current->flags & PF_VCPU) {
David Daney48c4ac92013-05-13 13:56:44 -0700699 write_c0_entryhi(vcpu->arch.
700 preempt_entryhi & ASID_MASK);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800701 ehb();
702 }
703 } else {
704 /* New ASIDs were allocated for the VM */
705
706 /* Were we in guest context? If so then the pre-empted ASID is no longer
707 * valid, we need to set it to what it should be based on the mode of
708 * the Guest (Kernel/User)
709 */
710 if (current->flags & PF_VCPU) {
711 if (KVM_GUEST_KERNEL_MODE(vcpu))
David Daney48c4ac92013-05-13 13:56:44 -0700712 write_c0_entryhi(vcpu->arch.
713 guest_kernel_asid[cpu] &
714 ASID_MASK);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800715 else
David Daney48c4ac92013-05-13 13:56:44 -0700716 write_c0_entryhi(vcpu->arch.
717 guest_user_asid[cpu] &
718 ASID_MASK);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800719 ehb();
720 }
721 }
722
723 local_irq_restore(flags);
724
725}
726
727/* ASID can change if another task is scheduled during preemption */
728void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
729{
730 unsigned long flags;
731 uint32_t cpu;
732
733 local_irq_save(flags);
734
735 cpu = smp_processor_id();
736
737
738 vcpu->arch.preempt_entryhi = read_c0_entryhi();
739 vcpu->arch.last_sched_cpu = cpu;
740
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800741 if (((cpu_context(cpu, current->mm) ^ asid_cache(cpu)) &
742 ASID_VERSION_MASK)) {
743 kvm_debug("%s: Dropping MMU Context: %#lx\n", __func__,
744 cpu_context(cpu, current->mm));
745 drop_mmu_context(current->mm, cpu);
746 }
747 write_c0_entryhi(cpu_asid(cpu, current->mm));
748 ehb();
749
750 local_irq_restore(flags);
751}
752
753uint32_t kvm_get_inst(uint32_t *opc, struct kvm_vcpu *vcpu)
754{
755 struct mips_coproc *cop0 = vcpu->arch.cop0;
756 unsigned long paddr, flags;
757 uint32_t inst;
758 int index;
759
760 if (KVM_GUEST_KSEGX((unsigned long) opc) < KVM_GUEST_KSEG0 ||
761 KVM_GUEST_KSEGX((unsigned long) opc) == KVM_GUEST_KSEG23) {
762 local_irq_save(flags);
763 index = kvm_mips_host_tlb_lookup(vcpu, (unsigned long) opc);
764 if (index >= 0) {
765 inst = *(opc);
766 } else {
767 index =
768 kvm_mips_guest_tlb_lookup(vcpu,
769 ((unsigned long) opc & VPN2_MASK)
770 |
David Daney48c4ac92013-05-13 13:56:44 -0700771 (kvm_read_c0_guest_entryhi
772 (cop0) & ASID_MASK));
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800773 if (index < 0) {
774 kvm_err
775 ("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n",
776 __func__, opc, vcpu, read_c0_entryhi());
777 kvm_mips_dump_host_tlbs();
778 local_irq_restore(flags);
779 return KVM_INVALID_INST;
780 }
781 kvm_mips_handle_mapped_seg_tlb_fault(vcpu,
782 &vcpu->arch.
783 guest_tlb[index],
784 NULL, NULL);
785 inst = *(opc);
786 }
787 local_irq_restore(flags);
788 } else if (KVM_GUEST_KSEGX(opc) == KVM_GUEST_KSEG0) {
789 paddr =
790 kvm_mips_translate_guest_kseg0_to_hpa(vcpu,
791 (unsigned long) opc);
792 inst = *(uint32_t *) CKSEG0ADDR(paddr);
793 } else {
794 kvm_err("%s: illegal address: %p\n", __func__, opc);
795 return KVM_INVALID_INST;
796 }
797
798 return inst;
799}
800
801EXPORT_SYMBOL(kvm_local_flush_tlb_all);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800802EXPORT_SYMBOL(kvm_mips_handle_mapped_seg_tlb_fault);
803EXPORT_SYMBOL(kvm_mips_handle_commpage_tlb_fault);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800804EXPORT_SYMBOL(kvm_mips_dump_host_tlbs);
805EXPORT_SYMBOL(kvm_mips_handle_kseg0_tlb_fault);
806EXPORT_SYMBOL(kvm_mips_host_tlb_lookup);
807EXPORT_SYMBOL(kvm_mips_flush_host_tlb);
808EXPORT_SYMBOL(kvm_mips_guest_tlb_lookup);
809EXPORT_SYMBOL(kvm_mips_host_tlb_inv);
810EXPORT_SYMBOL(kvm_mips_translate_guest_kseg0_to_hpa);
Sanjay Lal858dd5d2012-11-21 18:34:05 -0800811EXPORT_SYMBOL(kvm_mips_dump_guest_tlbs);
812EXPORT_SYMBOL(kvm_get_inst);
813EXPORT_SYMBOL(kvm_arch_vcpu_load);
814EXPORT_SYMBOL(kvm_arch_vcpu_put);