blob: a1f5b0d4b1d638c848f9c8dea7ed57a99f0ea91a [file] [log] [blame]
Alexander Grafb71c9e22013-01-11 15:22:45 +01001/*
2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/sched.h>
29#include <linux/rwsem.h>
30#include <linux/vmalloc.h>
31#include <linux/hugetlb.h>
32#include <asm/kvm_ppc.h>
33
34#include "e500.h"
Alexander Grafb71c9e22013-01-11 15:22:45 +010035#include "timing.h"
36#include "e500_mmu_host.h"
37
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053038#include "trace_booke.h"
39
Alexander Grafb71c9e22013-01-11 15:22:45 +010040#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
41
42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
43
44static inline unsigned int tlb1_max_shadow_size(void)
45{
46 /* reserve one entry for magic page */
47 return host_tlb_params[1].entries - tlbcam_index - 1;
48}
49
50static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
51{
52 /* Mask off reserved bits. */
53 mas3 &= MAS3_ATTRIB_MASK;
54
55#ifndef CONFIG_KVM_BOOKE_HV
56 if (!usermode) {
57 /* Guest is in supervisor mode,
58 * so we need to translate guest
59 * supervisor permissions into user permissions. */
60 mas3 &= ~E500_TLB_USER_PERM_MASK;
61 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
62 }
63 mas3 |= E500_TLB_SUPER_PERM_MASK;
64#endif
65 return mas3;
66}
67
Alexander Grafb71c9e22013-01-11 15:22:45 +010068/*
69 * writing shadow tlb entry to host TLB
70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
Mihai Caraman188e2672014-09-01 12:01:58 +030072 uint32_t mas0,
73 uint32_t lpid)
Alexander Grafb71c9e22013-01-11 15:22:45 +010074{
75 unsigned long flags;
76
77 local_irq_save(flags);
78 mtspr(SPRN_MAS0, mas0);
79 mtspr(SPRN_MAS1, stlbe->mas1);
80 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
81 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
82 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
83#ifdef CONFIG_KVM_BOOKE_HV
Mihai Caraman188e2672014-09-01 12:01:58 +030084 mtspr(SPRN_MAS8, MAS8_TGS | get_thread_specific_lpid(lpid));
Alexander Grafb71c9e22013-01-11 15:22:45 +010085#endif
86 asm volatile("isync; tlbwe" : : : "memory");
87
88#ifdef CONFIG_KVM_BOOKE_HV
89 /* Must clear mas8 for other host tlbwe's */
90 mtspr(SPRN_MAS8, 0);
91 isync();
92#endif
93 local_irq_restore(flags);
94
95 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
96 stlbe->mas2, stlbe->mas7_3);
97}
98
99/*
100 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
101 *
102 * We don't care about the address we're searching for, other than that it's
103 * in the right set and is not present in the TLB. Using a zero PID and a
104 * userspace address means we don't have to set and then restore MAS5, or
105 * calculate a proper MAS6 value.
106 */
107static u32 get_host_mas0(unsigned long eaddr)
108{
109 unsigned long flags;
110 u32 mas0;
Mihai Caramand57cef92014-06-30 15:54:58 +0300111 u32 mas4;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100112
113 local_irq_save(flags);
114 mtspr(SPRN_MAS6, 0);
Mihai Caramand57cef92014-06-30 15:54:58 +0300115 mas4 = mfspr(SPRN_MAS4);
116 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100117 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
118 mas0 = mfspr(SPRN_MAS0);
Mihai Caramand57cef92014-06-30 15:54:58 +0300119 mtspr(SPRN_MAS4, mas4);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100120 local_irq_restore(flags);
121
122 return mas0;
123}
124
125/* sesel is for tlb1 only */
126static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
127 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
128{
129 u32 mas0;
130
131 if (tlbsel == 0) {
132 mas0 = get_host_mas0(stlbe->mas2);
Mihai Caraman188e2672014-09-01 12:01:58 +0300133 __write_host_tlbe(stlbe, mas0, vcpu_e500->vcpu.kvm->arch.lpid);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100134 } else {
135 __write_host_tlbe(stlbe,
136 MAS0_TLBSEL(1) |
Mihai Caraman188e2672014-09-01 12:01:58 +0300137 MAS0_ESEL(to_htlb1_esel(sesel)),
138 vcpu_e500->vcpu.kvm->arch.lpid);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100139 }
140}
141
142/* sesel is for tlb1 only */
143static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
144 struct kvm_book3e_206_tlb_entry *gtlbe,
145 struct kvm_book3e_206_tlb_entry *stlbe,
146 int stlbsel, int sesel)
147{
148 int stid;
149
150 preempt_disable();
151 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
152
153 stlbe->mas1 |= MAS1_TID(stid);
154 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
155 preempt_enable();
156}
157
158#ifdef CONFIG_KVM_E500V2
159/* XXX should be a hook in the gva2hpa translation */
160void kvmppc_map_magic(struct kvm_vcpu *vcpu)
161{
162 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
163 struct kvm_book3e_206_tlb_entry magic;
164 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
165 unsigned int stid;
166 pfn_t pfn;
167
168 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
169 get_page(pfn_to_page(pfn));
170
171 preempt_disable();
172 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
173
174 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
175 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
176 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
177 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
178 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
179 magic.mas8 = 0;
180
Mihai Caraman188e2672014-09-01 12:01:58 +0300181 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index), 0);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100182 preempt_enable();
183}
184#endif
185
186void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
187 int esel)
188{
189 struct kvm_book3e_206_tlb_entry *gtlbe =
190 get_entry(vcpu_e500, tlbsel, esel);
191 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
192
193 /* Don't bother with unmapped entries */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000194 if (!(ref->flags & E500_TLB_VALID)) {
195 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
196 "%s: flags %x\n", __func__, ref->flags);
197 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
198 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100199
200 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
201 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
202 int hw_tlb_indx;
203 unsigned long flags;
204
205 local_irq_save(flags);
206 while (tmp) {
207 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
208 mtspr(SPRN_MAS0,
209 MAS0_TLBSEL(1) |
210 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
211 mtspr(SPRN_MAS1, 0);
212 asm volatile("tlbwe");
213 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
214 tmp &= tmp - 1;
215 }
216 mb();
217 vcpu_e500->g2h_tlb1_map[esel] = 0;
218 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
219 local_irq_restore(flags);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100220 }
221
Alexander Grafc015c622013-01-17 17:54:36 +0100222 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
223 /*
224 * TLB1 entry is backed by 4k pages. This should happen
225 * rarely and is not worth optimizing. Invalidate everything.
226 */
227 kvmppc_e500_tlbil_all(vcpu_e500);
228 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
229 }
230
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530231 /*
232 * If TLB entry is still valid then it's a TLB0 entry, and thus
233 * backed by at most one host tlbe per shadow pid
234 */
235 if (ref->flags & E500_TLB_VALID)
236 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100237
238 /* Mark the TLB as not backed by the host anymore */
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530239 ref->flags = 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100240}
241
242static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
243{
244 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
245}
246
247static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
248 struct kvm_book3e_206_tlb_entry *gtlbe,
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530249 pfn_t pfn, unsigned int wimg)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100250{
251 ref->pfn = pfn;
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530252 ref->flags = E500_TLB_VALID;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100253
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530254 /* Use guest supplied MAS2_G and MAS2_E */
255 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
256
Bharat Bhushan84e4d632013-08-07 15:33:45 +0530257 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn);
259
Alexander Grafb71c9e22013-01-11 15:22:45 +0100260 if (tlbe_is_writable(gtlbe))
261 kvm_set_pfn_dirty(pfn);
262}
263
264static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
265{
266 if (ref->flags & E500_TLB_VALID) {
Scott Wood4d2be6f2013-03-06 16:02:49 +0000267 /* FIXME: don't log bogus pfn for TLB1 */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100268 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
269 ref->flags = 0;
270 }
271}
272
Alexander Graf483ba972013-01-18 15:13:19 +0100273static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100274{
275 if (vcpu_e500->g2h_tlb1_map)
276 memset(vcpu_e500->g2h_tlb1_map, 0,
277 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
278 if (vcpu_e500->h2g_tlb1_rmap)
279 memset(vcpu_e500->h2g_tlb1_rmap, 0,
280 sizeof(unsigned int) * host_tlb_params[1].entries);
281}
282
283static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
284{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000285 int tlbsel;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100286 int i;
287
Scott Wood4d2be6f2013-03-06 16:02:49 +0000288 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
289 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
290 struct tlbe_ref *ref =
291 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
292 kvmppc_e500_ref_release(ref);
293 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100294 }
295}
296
Alexander Grafb71c9e22013-01-11 15:22:45 +0100297void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
298{
299 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wood4d2be6f2013-03-06 16:02:49 +0000300 kvmppc_e500_tlbil_all(vcpu_e500);
301 clear_tlb_privs(vcpu_e500);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100302 clear_tlb1_bitmap(vcpu_e500);
303}
304
305/* TID must be supplied by the caller */
306static void kvmppc_e500_setup_stlbe(
307 struct kvm_vcpu *vcpu,
308 struct kvm_book3e_206_tlb_entry *gtlbe,
309 int tsize, struct tlbe_ref *ref, u64 gvaddr,
310 struct kvm_book3e_206_tlb_entry *stlbe)
311{
312 pfn_t pfn = ref->pfn;
313 u32 pr = vcpu->arch.shared->msr & MSR_PR;
314
315 BUG_ON(!(ref->flags & E500_TLB_VALID));
316
317 /* Force IPROT=0 for all guest mappings. */
318 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530319 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100320 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
321 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100322}
323
324static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
325 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
326 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
327 struct tlbe_ref *ref)
328{
329 struct kvm_memory_slot *slot;
330 unsigned long pfn = 0; /* silence GCC warning */
331 unsigned long hva;
332 int pfnmap = 0;
333 int tsize = BOOK3E_PAGESZ_4K;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530334 int ret = 0;
335 unsigned long mmu_seq;
336 struct kvm *kvm = vcpu_e500->vcpu.kvm;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530337 unsigned long tsize_pages = 0;
338 pte_t *ptep;
339 unsigned int wimg = 0;
340 pgd_t *pgdir;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530341
342 /* used to check for invalidations in progress */
343 mmu_seq = kvm->mmu_notifier_seq;
344 smp_rmb();
Alexander Grafb71c9e22013-01-11 15:22:45 +0100345
346 /*
347 * Translate guest physical to true physical, acquiring
348 * a page reference if it is normal, non-reserved memory.
349 *
350 * gfn_to_memslot() must succeed because otherwise we wouldn't
351 * have gotten this far. Eventually we should just pass the slot
352 * pointer through from the first lookup.
353 */
354 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
355 hva = gfn_to_hva_memslot(slot, gfn);
356
357 if (tlbsel == 1) {
358 struct vm_area_struct *vma;
359 down_read(&current->mm->mmap_sem);
360
361 vma = find_vma(current->mm, hva);
362 if (vma && hva >= vma->vm_start &&
363 (vma->vm_flags & VM_PFNMAP)) {
364 /*
365 * This VMA is a physically contiguous region (e.g.
366 * /dev/mem) that bypasses normal Linux page
367 * management. Find the overlap between the
368 * vma and the memslot.
369 */
370
371 unsigned long start, end;
372 unsigned long slot_start, slot_end;
373
374 pfnmap = 1;
375
376 start = vma->vm_pgoff;
377 end = start +
378 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
379
380 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
381
382 slot_start = pfn - (gfn - slot->base_gfn);
383 slot_end = slot_start + slot->npages;
384
385 if (start < slot_start)
386 start = slot_start;
387 if (end > slot_end)
388 end = slot_end;
389
390 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
391 MAS1_TSIZE_SHIFT;
392
393 /*
394 * e500 doesn't implement the lowest tsize bit,
395 * or 1K pages.
396 */
397 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
398
399 /*
400 * Now find the largest tsize (up to what the guest
401 * requested) that will cover gfn, stay within the
402 * range, and for which gfn and pfn are mutually
403 * aligned.
404 */
405
406 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530407 unsigned long gfn_start, gfn_end;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100408 tsize_pages = 1 << (tsize - 2);
409
410 gfn_start = gfn & ~(tsize_pages - 1);
411 gfn_end = gfn_start + tsize_pages;
412
413 if (gfn_start + pfn - gfn < start)
414 continue;
415 if (gfn_end + pfn - gfn > end)
416 continue;
417 if ((gfn & (tsize_pages - 1)) !=
418 (pfn & (tsize_pages - 1)))
419 continue;
420
421 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
422 pfn &= ~(tsize_pages - 1);
423 break;
424 }
425 } else if (vma && hva >= vma->vm_start &&
426 (vma->vm_flags & VM_HUGETLB)) {
427 unsigned long psize = vma_kernel_pagesize(vma);
428
429 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
430 MAS1_TSIZE_SHIFT;
431
432 /*
433 * Take the largest page size that satisfies both host
434 * and guest mapping
435 */
436 tsize = min(__ilog2(psize) - 10, tsize);
437
438 /*
439 * e500 doesn't implement the lowest tsize bit,
440 * or 1K pages.
441 */
442 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
443 }
444
445 up_read(&current->mm->mmap_sem);
446 }
447
448 if (likely(!pfnmap)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530449 tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100450 pfn = gfn_to_pfn_memslot(slot, gfn);
451 if (is_error_noslot_pfn(pfn)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530452 if (printk_ratelimit())
453 pr_err("%s: real page not found for gfn %lx\n",
454 __func__, (long)gfn);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100455 return -EINVAL;
456 }
457
458 /* Align guest and physical address to page map boundaries */
459 pfn &= ~(tsize_pages - 1);
460 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
461 }
462
Bharat Bhushan40fde702013-08-07 15:33:46 +0530463 spin_lock(&kvm->mmu_lock);
464 if (mmu_notifier_retry(kvm, mmu_seq)) {
465 ret = -EAGAIN;
466 goto out;
467 }
468
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530469
470 pgdir = vcpu_e500->vcpu.arch.pgdir;
Aneesh Kumar K.Vdac56572015-03-30 10:39:13 +0530471 ptep = find_linux_pte_or_hugepte(pgdir, hva, NULL);
Aneesh Kumar K.V5e1d44a2015-03-30 10:39:12 +0530472 if (ptep) {
473 pte_t pte = READ_ONCE(*ptep);
474
475 if (pte_present(pte))
476 wimg = (pte_val(pte) >> PTE_WIMGE_SHIFT) &
477 MAS2_WIMGE_MASK;
478 else {
479 pr_err_ratelimited("%s: pte not present: gfn %lx,pfn %lx\n",
480 __func__, (long)gfn, pfn);
481 ret = -EINVAL;
482 goto out;
483 }
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530484 }
485 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100486
487 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
488 ref, gvaddr, stlbe);
489
490 /* Clear i-cache for new pages */
491 kvmppc_mmu_flush_icache(pfn);
492
Bharat Bhushan40fde702013-08-07 15:33:46 +0530493out:
494 spin_unlock(&kvm->mmu_lock);
495
Alexander Grafb71c9e22013-01-11 15:22:45 +0100496 /* Drop refcount on page, so that mmu notifiers can clear it */
497 kvm_release_pfn_clean(pfn);
498
Bharat Bhushan40fde702013-08-07 15:33:46 +0530499 return ret;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100500}
501
502/* XXX only map the one-one case, for now use TLB0 */
503static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
504 struct kvm_book3e_206_tlb_entry *stlbe)
505{
506 struct kvm_book3e_206_tlb_entry *gtlbe;
507 struct tlbe_ref *ref;
508 int stlbsel = 0;
509 int sesel = 0;
510 int r;
511
512 gtlbe = get_entry(vcpu_e500, 0, esel);
513 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
514
515 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
516 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
517 gtlbe, 0, stlbe, ref);
518 if (r)
519 return r;
520
521 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
522
523 return 0;
524}
525
Alexander Grafc015c622013-01-17 17:54:36 +0100526static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
527 struct tlbe_ref *ref,
528 int esel)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100529{
Alexander Grafc015c622013-01-17 17:54:36 +0100530 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100531
532 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
533 vcpu_e500->host_tlb1_nv = 0;
534
Alexander Grafb71c9e22013-01-11 15:22:45 +0100535 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000536 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100537 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
538 }
Scott Wood66a5fec2013-02-13 19:37:49 +0000539
Scott Wood66a5fec2013-02-13 19:37:49 +0000540 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
541 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000542 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
Scott Wood4d2be6f2013-03-06 16:02:49 +0000543 WARN_ON(!(ref->flags & E500_TLB_VALID));
Alexander Grafb71c9e22013-01-11 15:22:45 +0100544
Alexander Grafc015c622013-01-17 17:54:36 +0100545 return sesel;
546}
547
548/* Caller must ensure that the specified guest TLB entry is safe to insert into
549 * the shadow TLB. */
550/* For both one-one and one-to-many */
551static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
552 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
553 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
554{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000555 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
Alexander Grafc015c622013-01-17 17:54:36 +0100556 int sesel;
557 int r;
558
Alexander Grafc015c622013-01-17 17:54:36 +0100559 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
Scott Wood4d2be6f2013-03-06 16:02:49 +0000560 ref);
Alexander Grafc015c622013-01-17 17:54:36 +0100561 if (r)
562 return r;
563
564 /* Use TLB0 when we can only map a page with 4k */
565 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
566 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
567 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
568 return 0;
569 }
570
571 /* Otherwise map into TLB1 */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000572 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
Alexander Grafc015c622013-01-17 17:54:36 +0100573 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100574
575 return 0;
576}
577
578void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
579 unsigned int index)
580{
581 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
582 struct tlbe_priv *priv;
583 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
584 int tlbsel = tlbsel_of(index);
585 int esel = esel_of(index);
586
587 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
588
589 switch (tlbsel) {
590 case 0:
591 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
592
Scott Wood4d2be6f2013-03-06 16:02:49 +0000593 /* Triggers after clear_tlb_privs or on initial mapping */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100594 if (!(priv->ref.flags & E500_TLB_VALID)) {
595 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
596 } else {
597 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
598 &priv->ref, eaddr, &stlbe);
599 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
600 }
601 break;
602
603 case 1: {
604 gfn_t gfn = gpaddr >> PAGE_SHIFT;
605 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
606 esel);
607 break;
608 }
609
610 default:
611 BUG();
612 break;
613 }
614}
615
Mihai Caramanf5250472014-07-23 19:06:22 +0300616#ifdef CONFIG_KVM_BOOKE_HV
617int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
618 u32 *instr)
619{
620 gva_t geaddr;
621 hpa_t addr;
622 hfn_t pfn;
623 hva_t eaddr;
624 u32 mas1, mas2, mas3;
625 u64 mas7_mas3;
626 struct page *page;
627 unsigned int addr_space, psize_shift;
628 bool pr;
629 unsigned long flags;
630
631 /* Search TLB for guest pc to get the real address */
632 geaddr = kvmppc_get_pc(vcpu);
633
634 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
635
636 local_irq_save(flags);
637 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
Mihai Caraman188e2672014-09-01 12:01:58 +0300638 mtspr(SPRN_MAS5, MAS5_SGS | get_lpid(vcpu));
Mihai Caramanf5250472014-07-23 19:06:22 +0300639 asm volatile("tlbsx 0, %[geaddr]\n" : :
640 [geaddr] "r" (geaddr));
641 mtspr(SPRN_MAS5, 0);
642 mtspr(SPRN_MAS8, 0);
643 mas1 = mfspr(SPRN_MAS1);
644 mas2 = mfspr(SPRN_MAS2);
645 mas3 = mfspr(SPRN_MAS3);
646#ifdef CONFIG_64BIT
647 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
648#else
649 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
650#endif
651 local_irq_restore(flags);
652
653 /*
654 * If the TLB entry for guest pc was evicted, return to the guest.
655 * There are high chances to find a valid TLB entry next time.
656 */
657 if (!(mas1 & MAS1_VALID))
658 return EMULATE_AGAIN;
659
660 /*
661 * Another thread may rewrite the TLB entry in parallel, don't
662 * execute from the address if the execute permission is not set
663 */
664 pr = vcpu->arch.shared->msr & MSR_PR;
665 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
666 (!pr && !(mas3 & MAS3_SX)))) {
667 pr_err_ratelimited(
Masanari Iida6774def2014-11-05 22:26:48 +0900668 "%s: Instruction emulation from guest address %08lx without execute permission\n",
Mihai Caramanf5250472014-07-23 19:06:22 +0300669 __func__, geaddr);
670 return EMULATE_AGAIN;
671 }
672
673 /*
674 * The real address will be mapped by a cacheable, memory coherent,
675 * write-back page. Check for mismatches when LRAT is used.
676 */
677 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
678 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
679 pr_err_ratelimited(
Masanari Iida6774def2014-11-05 22:26:48 +0900680 "%s: Instruction emulation from guest address %08lx mismatches storage attributes\n",
Mihai Caramanf5250472014-07-23 19:06:22 +0300681 __func__, geaddr);
682 return EMULATE_AGAIN;
683 }
684
685 /* Get pfn */
686 psize_shift = MAS1_GET_TSIZE(mas1) + 10;
687 addr = (mas7_mas3 & (~0ULL << psize_shift)) |
688 (geaddr & ((1ULL << psize_shift) - 1ULL));
689 pfn = addr >> PAGE_SHIFT;
690
691 /* Guard against emulation from devices area */
692 if (unlikely(!page_is_ram(pfn))) {
Masanari Iida6774def2014-11-05 22:26:48 +0900693 pr_err_ratelimited("%s: Instruction emulation from non-RAM host address %08llx is not supported\n",
Mihai Caramanf5250472014-07-23 19:06:22 +0300694 __func__, addr);
695 return EMULATE_AGAIN;
696 }
697
698 /* Map a page and get guest's instruction */
699 page = pfn_to_page(pfn);
700 eaddr = (unsigned long)kmap_atomic(page);
701 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
702 kunmap_atomic((u32 *)eaddr);
703
704 return EMULATE_DONE;
705}
706#else
Mihai Caraman51f04722014-07-23 19:06:21 +0300707int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
708 u32 *instr)
709{
710 return EMULATE_AGAIN;
711}
Mihai Caramanf5250472014-07-23 19:06:22 +0300712#endif
Mihai Caraman51f04722014-07-23 19:06:21 +0300713
Alexander Grafb71c9e22013-01-11 15:22:45 +0100714/************* MMU Notifiers *************/
715
716int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
717{
718 trace_kvm_unmap_hva(hva);
719
720 /*
721 * Flush all shadow tlb entries everywhere. This is slow, but
722 * we are 100% sure that we catch the to be unmapped page
723 */
724 kvm_flush_remote_tlbs(kvm);
725
726 return 0;
727}
728
729int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
730{
731 /* kvm_unmap_hva flushes everything anyways */
732 kvm_unmap_hva(kvm, start);
733
734 return 0;
735}
736
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700737int kvm_age_hva(struct kvm *kvm, unsigned long start, unsigned long end)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100738{
739 /* XXX could be more clever ;) */
740 return 0;
741}
742
743int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
744{
745 /* XXX could be more clever ;) */
746 return 0;
747}
748
749void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
750{
751 /* The page will get remapped properly on its next fault */
752 kvm_unmap_hva(kvm, hva);
753}
754
755/*****************************************/
756
757int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
758{
759 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
760 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
761
762 /*
763 * This should never happen on real e500 hardware, but is
764 * architecturally possible -- e.g. in some weird nested
765 * virtualization case.
766 */
767 if (host_tlb_params[0].entries == 0 ||
768 host_tlb_params[1].entries == 0) {
769 pr_err("%s: need to know host tlb size\n", __func__);
770 return -ENODEV;
771 }
772
773 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
774 TLBnCFG_ASSOC_SHIFT;
775 host_tlb_params[1].ways = host_tlb_params[1].entries;
776
777 if (!is_power_of_2(host_tlb_params[0].entries) ||
778 !is_power_of_2(host_tlb_params[0].ways) ||
779 host_tlb_params[0].entries < host_tlb_params[0].ways ||
780 host_tlb_params[0].ways == 0) {
781 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
782 __func__, host_tlb_params[0].entries,
783 host_tlb_params[0].ways);
784 return -ENODEV;
785 }
786
787 host_tlb_params[0].sets =
788 host_tlb_params[0].entries / host_tlb_params[0].ways;
789 host_tlb_params[1].sets = 1;
790
Alexander Grafb71c9e22013-01-11 15:22:45 +0100791 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
792 host_tlb_params[1].entries,
793 GFP_KERNEL);
794 if (!vcpu_e500->h2g_tlb1_rmap)
Scott Wood4d2be6f2013-03-06 16:02:49 +0000795 return -EINVAL;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100796
797 return 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100798}
799
800void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
801{
802 kfree(vcpu_e500->h2g_tlb1_rmap);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100803}