blob: 6379ac1bc7b9a3edbe603ea14575d01c193cec65 [file] [log] [blame]
James Hogan403015b2016-06-09 14:19:10 +01001/*
2 * This file is subject to the terms and conditions of the GNU General Public
3 * License. See the file "COPYING" in the main directory of this archive
4 * for more details.
5 *
6 * KVM/MIPS MMU handling in the KVM module.
7 *
8 * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved.
9 * Authors: Sanjay Lal <sanjayl@kymasys.com>
10 */
11
James Hogan28cc5bd2016-07-08 11:53:22 +010012#include <linux/highmem.h>
James Hogan403015b2016-06-09 14:19:10 +010013#include <linux/kvm_host.h>
James Hogandacc3ed2016-08-19 15:27:22 +010014#include <linux/uaccess.h>
James Hogan403015b2016-06-09 14:19:10 +010015#include <asm/mmu_context.h>
James Hogana31b50d2016-12-16 15:57:00 +000016#include <asm/pgalloc.h>
James Hogan403015b2016-06-09 14:19:10 +010017
James Hoganfb995892017-01-05 10:44:38 +000018/*
19 * KVM_MMU_CACHE_MIN_PAGES is the number of GPA page table translation levels
20 * for which pages need to be cached.
21 */
22#if defined(__PAGETABLE_PMD_FOLDED)
23#define KVM_MMU_CACHE_MIN_PAGES 1
24#else
25#define KVM_MMU_CACHE_MIN_PAGES 2
26#endif
27
28static int mmu_topup_memory_cache(struct kvm_mmu_memory_cache *cache,
29 int min, int max)
30{
31 void *page;
32
33 BUG_ON(max > KVM_NR_MEM_OBJS);
34 if (cache->nobjs >= min)
35 return 0;
36 while (cache->nobjs < max) {
37 page = (void *)__get_free_page(GFP_KERNEL);
38 if (!page)
39 return -ENOMEM;
40 cache->objects[cache->nobjs++] = page;
41 }
42 return 0;
43}
44
James Hoganaba859292016-12-16 15:57:00 +000045static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc)
46{
47 while (mc->nobjs)
48 free_page((unsigned long)mc->objects[--mc->nobjs]);
49}
50
51static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc)
52{
53 void *p;
54
55 BUG_ON(!mc || !mc->nobjs);
56 p = mc->objects[--mc->nobjs];
57 return p;
58}
59
60void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu)
61{
62 mmu_free_memory_cache(&vcpu->arch.mmu_page_cache);
63}
64
James Hoganaba859292016-12-16 15:57:00 +000065/**
66 * kvm_mips_walk_pgd() - Walk page table with optional allocation.
67 * @pgd: Page directory pointer.
68 * @addr: Address to index page table using.
69 * @cache: MMU page cache to allocate new page tables from, or NULL.
70 *
71 * Walk the page tables pointed to by @pgd to find the PTE corresponding to the
72 * address @addr. If page tables don't exist for @addr, they will be created
73 * from the MMU cache if @cache is not NULL.
74 *
75 * Returns: Pointer to pte_t corresponding to @addr.
76 * NULL if a page table doesn't exist for @addr and !@cache.
77 * NULL if a page table allocation failed.
78 */
79static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache,
80 unsigned long addr)
81{
82 pud_t *pud;
83 pmd_t *pmd;
84
85 pgd += pgd_index(addr);
86 if (pgd_none(*pgd)) {
87 /* Not used on MIPS yet */
88 BUG();
89 return NULL;
90 }
91 pud = pud_offset(pgd, addr);
92 if (pud_none(*pud)) {
93 pmd_t *new_pmd;
94
95 if (!cache)
96 return NULL;
97 new_pmd = mmu_memory_cache_alloc(cache);
98 pmd_init((unsigned long)new_pmd,
99 (unsigned long)invalid_pte_table);
100 pud_populate(NULL, pud, new_pmd);
101 }
102 pmd = pmd_offset(pud, addr);
103 if (pmd_none(*pmd)) {
104 pte_t *new_pte;
105
106 if (!cache)
107 return NULL;
108 new_pte = mmu_memory_cache_alloc(cache);
109 clear_page(new_pte);
110 pmd_populate_kernel(NULL, pmd, new_pte);
111 }
112 return pte_offset(pmd, addr);
113}
114
James Hogan403015b2016-06-09 14:19:10 +0100115static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn)
116{
117 int srcu_idx, err = 0;
118 kvm_pfn_t pfn;
119
120 if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE)
121 return 0;
122
123 srcu_idx = srcu_read_lock(&kvm->srcu);
James Hogan9befad22016-06-09 14:19:11 +0100124 pfn = gfn_to_pfn(kvm, gfn);
James Hogan403015b2016-06-09 14:19:10 +0100125
James Hoganba913e42016-08-19 14:30:29 +0100126 if (is_error_noslot_pfn(pfn)) {
James Hogan403015b2016-06-09 14:19:10 +0100127 kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn);
128 err = -EFAULT;
129 goto out;
130 }
131
132 kvm->arch.guest_pmap[gfn] = pfn;
133out:
134 srcu_read_unlock(&kvm->srcu, srcu_idx);
135 return err;
136}
137
James Hoganfb995892017-01-05 10:44:38 +0000138static pte_t *kvm_trap_emul_pte_for_gva(struct kvm_vcpu *vcpu,
139 unsigned long addr)
140{
141 struct kvm_mmu_memory_cache *memcache = &vcpu->arch.mmu_page_cache;
142 pgd_t *pgdp;
143 int ret;
144
145 /* We need a minimum of cached pages ready for page table creation */
146 ret = mmu_topup_memory_cache(memcache, KVM_MMU_CACHE_MIN_PAGES,
147 KVM_NR_MEM_OBJS);
148 if (ret)
149 return NULL;
150
151 if (KVM_GUEST_KERNEL_MODE(vcpu))
152 pgdp = vcpu->arch.guest_kernel_mm.pgd;
153 else
154 pgdp = vcpu->arch.guest_user_mm.pgd;
155
156 return kvm_mips_walk_pgd(pgdp, memcache, addr);
157}
158
James Hoganaba859292016-12-16 15:57:00 +0000159void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr,
160 bool user)
161{
162 pgd_t *pgdp;
163 pte_t *ptep;
164
165 addr &= PAGE_MASK << 1;
166
167 pgdp = vcpu->arch.guest_kernel_mm.pgd;
168 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
169 if (ptep) {
170 ptep[0] = pfn_pte(0, __pgprot(0));
171 ptep[1] = pfn_pte(0, __pgprot(0));
172 }
173
174 if (user) {
175 pgdp = vcpu->arch.guest_user_mm.pgd;
176 ptep = kvm_mips_walk_pgd(pgdp, NULL, addr);
177 if (ptep) {
178 ptep[0] = pfn_pte(0, __pgprot(0));
179 ptep[1] = pfn_pte(0, __pgprot(0));
180 }
181 }
182}
183
James Hogana31b50d2016-12-16 15:57:00 +0000184/*
185 * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}.
186 * Flush a range of guest physical address space from the VM's GPA page tables.
187 */
188
189static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva,
190 unsigned long end_gva)
191{
192 int i_min = __pte_offset(start_gva);
193 int i_max = __pte_offset(end_gva);
194 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1);
195 int i;
196
197 /*
198 * There's no freeing to do, so there's no point clearing individual
199 * entries unless only part of the last level page table needs flushing.
200 */
201 if (safe_to_remove)
202 return true;
203
204 for (i = i_min; i <= i_max; ++i) {
205 if (!pte_present(pte[i]))
206 continue;
207
208 set_pte(pte + i, __pte(0));
209 }
210 return false;
211}
212
213static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva,
214 unsigned long end_gva)
215{
216 pte_t *pte;
217 unsigned long end = ~0ul;
218 int i_min = __pmd_offset(start_gva);
219 int i_max = __pmd_offset(end_gva);
220 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1);
221 int i;
222
223 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
224 if (!pmd_present(pmd[i]))
225 continue;
226
227 pte = pte_offset(pmd + i, 0);
228 if (i == i_max)
229 end = end_gva;
230
231 if (kvm_mips_flush_gva_pte(pte, start_gva, end)) {
232 pmd_clear(pmd + i);
233 pte_free_kernel(NULL, pte);
234 } else {
235 safe_to_remove = false;
236 }
237 }
238 return safe_to_remove;
239}
240
241static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva,
242 unsigned long end_gva)
243{
244 pmd_t *pmd;
245 unsigned long end = ~0ul;
246 int i_min = __pud_offset(start_gva);
247 int i_max = __pud_offset(end_gva);
248 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1);
249 int i;
250
251 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
252 if (!pud_present(pud[i]))
253 continue;
254
255 pmd = pmd_offset(pud + i, 0);
256 if (i == i_max)
257 end = end_gva;
258
259 if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) {
260 pud_clear(pud + i);
261 pmd_free(NULL, pmd);
262 } else {
263 safe_to_remove = false;
264 }
265 }
266 return safe_to_remove;
267}
268
269static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva,
270 unsigned long end_gva)
271{
272 pud_t *pud;
273 unsigned long end = ~0ul;
274 int i_min = pgd_index(start_gva);
275 int i_max = pgd_index(end_gva);
276 bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1);
277 int i;
278
279 for (i = i_min; i <= i_max; ++i, start_gva = 0) {
280 if (!pgd_present(pgd[i]))
281 continue;
282
283 pud = pud_offset(pgd + i, 0);
284 if (i == i_max)
285 end = end_gva;
286
287 if (kvm_mips_flush_gva_pud(pud, start_gva, end)) {
288 pgd_clear(pgd + i);
289 pud_free(NULL, pud);
290 } else {
291 safe_to_remove = false;
292 }
293 }
294 return safe_to_remove;
295}
296
297void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags)
298{
299 if (flags & KMF_GPA) {
300 /* all of guest virtual address space could be affected */
301 if (flags & KMF_KERN)
302 /* useg, kseg0, seg2/3 */
303 kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff);
304 else
305 /* useg */
306 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
307 } else {
308 /* useg */
309 kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff);
310
311 /* kseg2/3 */
312 if (flags & KMF_KERN)
313 kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff);
314 }
315}
316
James Hogan403015b2016-06-09 14:19:10 +0100317/* XXXKYMA: Must be called with interrupts disabled */
318int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr,
319 struct kvm_vcpu *vcpu)
320{
321 gfn_t gfn;
322 kvm_pfn_t pfn0, pfn1;
323 unsigned long vaddr = 0;
James Hogan403015b2016-06-09 14:19:10 +0100324 struct kvm *kvm = vcpu->kvm;
James Hoganfb995892017-01-05 10:44:38 +0000325 pte_t *ptep_gva;
James Hogan403015b2016-06-09 14:19:10 +0100326
327 if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) {
328 kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr);
329 kvm_mips_dump_host_tlbs();
330 return -1;
331 }
332
James Hoganfb995892017-01-05 10:44:38 +0000333 /* Find host PFNs */
334
James Hogan403015b2016-06-09 14:19:10 +0100335 gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT);
James Hogan0741f522016-08-11 11:58:14 +0100336 if ((gfn | 1) >= kvm->arch.guest_pmap_npages) {
James Hogan403015b2016-06-09 14:19:10 +0100337 kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__,
338 gfn, badvaddr);
339 kvm_mips_dump_host_tlbs();
340 return -1;
341 }
James Hogan403015b2016-06-09 14:19:10 +0100342 vaddr = badvaddr & (PAGE_MASK << 1);
343
344 if (kvm_mips_map_page(vcpu->kvm, gfn) < 0)
345 return -1;
346
347 if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0)
348 return -1;
349
James Hogan021df202016-06-09 14:19:12 +0100350 pfn0 = kvm->arch.guest_pmap[gfn & ~0x1];
351 pfn1 = kvm->arch.guest_pmap[gfn | 0x1];
James Hogan403015b2016-06-09 14:19:10 +0100352
James Hoganfb995892017-01-05 10:44:38 +0000353 /* Find GVA page table entry */
James Hogan403015b2016-06-09 14:19:10 +0100354
James Hoganfb995892017-01-05 10:44:38 +0000355 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, vaddr);
356 if (!ptep_gva) {
357 kvm_err("No ptep for gva %lx\n", vaddr);
358 return -1;
359 }
James Hogan403015b2016-06-09 14:19:10 +0100360
James Hoganfb995892017-01-05 10:44:38 +0000361 /* Write host PFNs into GVA page table */
362 ptep_gva[0] = pte_mkyoung(pte_mkdirty(pfn_pte(pfn0, PAGE_SHARED)));
363 ptep_gva[1] = pte_mkyoung(pte_mkdirty(pfn_pte(pfn1, PAGE_SHARED)));
364
365 /* Invalidate this entry in the TLB, guest kernel ASID only */
366 kvm_mips_host_tlb_inv(vcpu, vaddr, false, true);
367 return 0;
James Hogan403015b2016-06-09 14:19:10 +0100368}
369
370int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu,
James Hogan7e3d2a72016-10-08 01:15:19 +0100371 struct kvm_mips_tlb *tlb,
372 unsigned long gva)
James Hogan403015b2016-06-09 14:19:10 +0100373{
James Hogan403015b2016-06-09 14:19:10 +0100374 struct kvm *kvm = vcpu->kvm;
James Hogan7e3d2a72016-10-08 01:15:19 +0100375 kvm_pfn_t pfn;
376 gfn_t gfn;
377 long tlb_lo = 0;
378 pte_t *ptep_gva;
379 unsigned int idx;
380 bool kernel = KVM_GUEST_KERNEL_MODE(vcpu);
James Hogan403015b2016-06-09 14:19:10 +0100381
James Hoganc604cff2016-08-11 11:58:12 +0100382 /*
383 * The commpage address must not be mapped to anything else if the guest
384 * TLB contains entries nearby, or commpage accesses will break.
385 */
James Hogan7e3d2a72016-10-08 01:15:19 +0100386 idx = TLB_LO_IDX(*tlb, gva);
387 if ((gva ^ KVM_GUEST_COMMPAGE_ADDR) & VPN2_MASK & PAGE_MASK)
388 tlb_lo = tlb->tlb_lo[idx];
James Hogan403015b2016-06-09 14:19:10 +0100389
James Hogan7e3d2a72016-10-08 01:15:19 +0100390 /* Find host PFN */
391 gfn = mips3_tlbpfn_to_paddr(tlb_lo) >> PAGE_SHIFT;
392 if (gfn >= kvm->arch.guest_pmap_npages) {
393 kvm_err("%s: Invalid gfn: %#llx, EHi: %#lx\n",
394 __func__, gfn, tlb->tlb_hi);
James Hogan8985d502016-08-11 11:58:13 +0100395 kvm_mips_dump_guest_tlbs(vcpu);
396 return -1;
397 }
James Hogan7e3d2a72016-10-08 01:15:19 +0100398 if (kvm_mips_map_page(kvm, gfn) < 0)
James Hoganc604cff2016-08-11 11:58:12 +0100399 return -1;
James Hogan7e3d2a72016-10-08 01:15:19 +0100400 pfn = kvm->arch.guest_pmap[gfn];
James Hoganc604cff2016-08-11 11:58:12 +0100401
James Hogan7e3d2a72016-10-08 01:15:19 +0100402 /* Find GVA page table entry */
403 ptep_gva = kvm_trap_emul_pte_for_gva(vcpu, gva);
404 if (!ptep_gva) {
405 kvm_err("No ptep for gva %lx\n", gva);
James Hoganc604cff2016-08-11 11:58:12 +0100406 return -1;
James Hogan7e3d2a72016-10-08 01:15:19 +0100407 }
James Hoganc604cff2016-08-11 11:58:12 +0100408
James Hogan7e3d2a72016-10-08 01:15:19 +0100409 /* Write PFN into GVA page table, taking attributes from Guest TLB */
410 *ptep_gva = pfn_pte(pfn, (!(tlb_lo & ENTRYLO_V)) ? __pgprot(0) :
411 (tlb_lo & ENTRYLO_D) ? PAGE_SHARED :
412 PAGE_READONLY);
413 if (pte_present(*ptep_gva))
414 *ptep_gva = pte_mkyoung(pte_mkdirty(*ptep_gva));
James Hogan403015b2016-06-09 14:19:10 +0100415
James Hogan7e3d2a72016-10-08 01:15:19 +0100416 /* Invalidate this entry in the TLB, current guest mode ASID only */
417 kvm_mips_host_tlb_inv(vcpu, gva, !kernel, kernel);
James Hogan403015b2016-06-09 14:19:10 +0100418
419 kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc,
James Hogan9fbfb062016-06-09 14:19:17 +0100420 tlb->tlb_lo[0], tlb->tlb_lo[1]);
James Hogan403015b2016-06-09 14:19:10 +0100421
James Hogan7e3d2a72016-10-08 01:15:19 +0100422 return 0;
James Hogan403015b2016-06-09 14:19:10 +0100423}
424
James Hogan4c864602016-10-08 01:16:21 +0100425int kvm_mips_handle_commpage_tlb_fault(unsigned long badvaddr,
426 struct kvm_vcpu *vcpu)
427{
428 kvm_pfn_t pfn;
429 pte_t *ptep;
430
431 ptep = kvm_trap_emul_pte_for_gva(vcpu, badvaddr);
432 if (!ptep) {
433 kvm_err("No ptep for commpage %lx\n", badvaddr);
434 return -1;
435 }
436
437 pfn = PFN_DOWN(virt_to_phys(vcpu->arch.kseg0_commpage));
438 /* Also set valid and dirty, so refill handler doesn't have to */
439 *ptep = pte_mkyoung(pte_mkdirty(pfn_pte(pfn, PAGE_SHARED)));
440
441 /* Invalidate this entry in the TLB, guest kernel ASID only */
442 kvm_mips_host_tlb_inv(vcpu, badvaddr, false, true);
443 return 0;
444}
445
James Hogan403015b2016-06-09 14:19:10 +0100446/**
447 * kvm_mips_migrate_count() - Migrate timer.
448 * @vcpu: Virtual CPU.
449 *
450 * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it
451 * if it was running prior to being cancelled.
452 *
453 * Must be called when the VCPU is migrated to a different CPU to ensure that
454 * timer expiry during guest execution interrupts the guest and causes the
455 * interrupt to be delivered in a timely manner.
456 */
457static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu)
458{
459 if (hrtimer_cancel(&vcpu->arch.comparecount_timer))
460 hrtimer_restart(&vcpu->arch.comparecount_timer);
461}
462
463/* Restore ASID once we are scheduled back after preemption */
464void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
465{
James Hogan403015b2016-06-09 14:19:10 +0100466 unsigned long flags;
James Hogan403015b2016-06-09 14:19:10 +0100467
468 kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu);
469
James Hogan403015b2016-06-09 14:19:10 +0100470 local_irq_save(flags);
471
James Hogan403015b2016-06-09 14:19:10 +0100472 if (vcpu->arch.last_sched_cpu != cpu) {
473 kvm_debug("[%d->%d]KVM VCPU[%d] switch\n",
474 vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id);
475 /*
476 * Migrate the timer interrupt to the current CPU so that it
477 * always interrupts the guest and synchronously triggers a
478 * guest timer interrupt.
479 */
480 kvm_mips_migrate_count(vcpu);
481 }
482
James Hogan403015b2016-06-09 14:19:10 +0100483 /* restore guest state to registers */
James Hogana60b8432016-11-12 00:00:13 +0000484 kvm_mips_callbacks->vcpu_load(vcpu, cpu);
James Hogan403015b2016-06-09 14:19:10 +0100485
486 local_irq_restore(flags);
James Hogan403015b2016-06-09 14:19:10 +0100487}
488
489/* ASID can change if another task is scheduled during preemption */
490void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu)
491{
492 unsigned long flags;
493 int cpu;
494
495 local_irq_save(flags);
496
497 cpu = smp_processor_id();
James Hogan403015b2016-06-09 14:19:10 +0100498 vcpu->arch.last_sched_cpu = cpu;
499
500 /* save guest state in registers */
James Hogana60b8432016-11-12 00:00:13 +0000501 kvm_mips_callbacks->vcpu_put(vcpu, cpu);
James Hogan403015b2016-06-09 14:19:10 +0100502
James Hogan403015b2016-06-09 14:19:10 +0100503 local_irq_restore(flags);
504}
505
James Hogan122e51d2016-11-28 17:23:14 +0000506int kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu, u32 *out)
James Hogan403015b2016-06-09 14:19:10 +0100507{
James Hogandacc3ed2016-08-19 15:27:22 +0100508 int err;
James Hogan403015b2016-06-09 14:19:10 +0100509
James Hogan122e51d2016-11-28 17:23:14 +0000510 err = get_user(*out, opc);
James Hogandacc3ed2016-08-19 15:27:22 +0100511 if (unlikely(err)) {
James Hogan403015b2016-06-09 14:19:10 +0100512 kvm_err("%s: illegal address: %p\n", __func__, opc);
James Hogan122e51d2016-11-28 17:23:14 +0000513 return -EFAULT;
James Hogan403015b2016-06-09 14:19:10 +0100514 }
515
James Hogan122e51d2016-11-28 17:23:14 +0000516 return 0;
James Hogan403015b2016-06-09 14:19:10 +0100517}