blob: 41508267b0e2e9a397ac0367ef0b5f8df1fd6811 [file] [log] [blame]
Alexander Grafb71c9e22013-01-11 15:22:45 +01001/*
2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/sched.h>
29#include <linux/rwsem.h>
30#include <linux/vmalloc.h>
31#include <linux/hugetlb.h>
32#include <asm/kvm_ppc.h>
33
34#include "e500.h"
Alexander Grafb71c9e22013-01-11 15:22:45 +010035#include "timing.h"
36#include "e500_mmu_host.h"
37
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053038#include "trace_booke.h"
39
Alexander Grafb71c9e22013-01-11 15:22:45 +010040#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
41
42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
43
44static inline unsigned int tlb1_max_shadow_size(void)
45{
46 /* reserve one entry for magic page */
47 return host_tlb_params[1].entries - tlbcam_index - 1;
48}
49
50static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
51{
52 /* Mask off reserved bits. */
53 mas3 &= MAS3_ATTRIB_MASK;
54
55#ifndef CONFIG_KVM_BOOKE_HV
56 if (!usermode) {
57 /* Guest is in supervisor mode,
58 * so we need to translate guest
59 * supervisor permissions into user permissions. */
60 mas3 &= ~E500_TLB_USER_PERM_MASK;
61 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
62 }
63 mas3 |= E500_TLB_SUPER_PERM_MASK;
64#endif
65 return mas3;
66}
67
Alexander Grafb71c9e22013-01-11 15:22:45 +010068/*
69 * writing shadow tlb entry to host TLB
70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
72 uint32_t mas0)
73{
74 unsigned long flags;
75
76 local_irq_save(flags);
77 mtspr(SPRN_MAS0, mas0);
78 mtspr(SPRN_MAS1, stlbe->mas1);
79 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
80 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
81 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
82#ifdef CONFIG_KVM_BOOKE_HV
83 mtspr(SPRN_MAS8, stlbe->mas8);
84#endif
85 asm volatile("isync; tlbwe" : : : "memory");
86
87#ifdef CONFIG_KVM_BOOKE_HV
88 /* Must clear mas8 for other host tlbwe's */
89 mtspr(SPRN_MAS8, 0);
90 isync();
91#endif
92 local_irq_restore(flags);
93
94 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
95 stlbe->mas2, stlbe->mas7_3);
96}
97
98/*
99 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
100 *
101 * We don't care about the address we're searching for, other than that it's
102 * in the right set and is not present in the TLB. Using a zero PID and a
103 * userspace address means we don't have to set and then restore MAS5, or
104 * calculate a proper MAS6 value.
105 */
106static u32 get_host_mas0(unsigned long eaddr)
107{
108 unsigned long flags;
109 u32 mas0;
Mihai Caramand57cef92014-06-30 15:54:58 +0300110 u32 mas4;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100111
112 local_irq_save(flags);
113 mtspr(SPRN_MAS6, 0);
Mihai Caramand57cef92014-06-30 15:54:58 +0300114 mas4 = mfspr(SPRN_MAS4);
115 mtspr(SPRN_MAS4, mas4 & ~MAS4_TLBSEL_MASK);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100116 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
117 mas0 = mfspr(SPRN_MAS0);
Mihai Caramand57cef92014-06-30 15:54:58 +0300118 mtspr(SPRN_MAS4, mas4);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100119 local_irq_restore(flags);
120
121 return mas0;
122}
123
124/* sesel is for tlb1 only */
125static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
126 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
127{
128 u32 mas0;
129
130 if (tlbsel == 0) {
131 mas0 = get_host_mas0(stlbe->mas2);
132 __write_host_tlbe(stlbe, mas0);
133 } else {
134 __write_host_tlbe(stlbe,
135 MAS0_TLBSEL(1) |
136 MAS0_ESEL(to_htlb1_esel(sesel)));
137 }
138}
139
140/* sesel is for tlb1 only */
141static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
142 struct kvm_book3e_206_tlb_entry *gtlbe,
143 struct kvm_book3e_206_tlb_entry *stlbe,
144 int stlbsel, int sesel)
145{
146 int stid;
147
148 preempt_disable();
149 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
150
151 stlbe->mas1 |= MAS1_TID(stid);
152 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
153 preempt_enable();
154}
155
156#ifdef CONFIG_KVM_E500V2
157/* XXX should be a hook in the gva2hpa translation */
158void kvmppc_map_magic(struct kvm_vcpu *vcpu)
159{
160 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
161 struct kvm_book3e_206_tlb_entry magic;
162 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
163 unsigned int stid;
164 pfn_t pfn;
165
166 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
167 get_page(pfn_to_page(pfn));
168
169 preempt_disable();
170 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
171
172 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
173 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
174 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
175 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
176 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
177 magic.mas8 = 0;
178
179 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
180 preempt_enable();
181}
182#endif
183
184void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
185 int esel)
186{
187 struct kvm_book3e_206_tlb_entry *gtlbe =
188 get_entry(vcpu_e500, tlbsel, esel);
189 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
190
191 /* Don't bother with unmapped entries */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000192 if (!(ref->flags & E500_TLB_VALID)) {
193 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
194 "%s: flags %x\n", __func__, ref->flags);
195 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
196 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100197
198 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
199 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
200 int hw_tlb_indx;
201 unsigned long flags;
202
203 local_irq_save(flags);
204 while (tmp) {
205 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
206 mtspr(SPRN_MAS0,
207 MAS0_TLBSEL(1) |
208 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
209 mtspr(SPRN_MAS1, 0);
210 asm volatile("tlbwe");
211 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
212 tmp &= tmp - 1;
213 }
214 mb();
215 vcpu_e500->g2h_tlb1_map[esel] = 0;
216 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
217 local_irq_restore(flags);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100218 }
219
Alexander Grafc015c622013-01-17 17:54:36 +0100220 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
221 /*
222 * TLB1 entry is backed by 4k pages. This should happen
223 * rarely and is not worth optimizing. Invalidate everything.
224 */
225 kvmppc_e500_tlbil_all(vcpu_e500);
226 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
227 }
228
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530229 /*
230 * If TLB entry is still valid then it's a TLB0 entry, and thus
231 * backed by at most one host tlbe per shadow pid
232 */
233 if (ref->flags & E500_TLB_VALID)
234 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100235
236 /* Mark the TLB as not backed by the host anymore */
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530237 ref->flags = 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100238}
239
240static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
241{
242 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
243}
244
245static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
246 struct kvm_book3e_206_tlb_entry *gtlbe,
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530247 pfn_t pfn, unsigned int wimg)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100248{
249 ref->pfn = pfn;
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530250 ref->flags = E500_TLB_VALID;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100251
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530252 /* Use guest supplied MAS2_G and MAS2_E */
253 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
254
Bharat Bhushan84e4d632013-08-07 15:33:45 +0530255 /* Mark the page accessed */
256 kvm_set_pfn_accessed(pfn);
257
Alexander Grafb71c9e22013-01-11 15:22:45 +0100258 if (tlbe_is_writable(gtlbe))
259 kvm_set_pfn_dirty(pfn);
260}
261
262static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
263{
264 if (ref->flags & E500_TLB_VALID) {
Scott Wood4d2be6f2013-03-06 16:02:49 +0000265 /* FIXME: don't log bogus pfn for TLB1 */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100266 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
267 ref->flags = 0;
268 }
269}
270
Alexander Graf483ba972013-01-18 15:13:19 +0100271static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100272{
273 if (vcpu_e500->g2h_tlb1_map)
274 memset(vcpu_e500->g2h_tlb1_map, 0,
275 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
276 if (vcpu_e500->h2g_tlb1_rmap)
277 memset(vcpu_e500->h2g_tlb1_rmap, 0,
278 sizeof(unsigned int) * host_tlb_params[1].entries);
279}
280
281static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
282{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000283 int tlbsel;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100284 int i;
285
Scott Wood4d2be6f2013-03-06 16:02:49 +0000286 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
287 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
288 struct tlbe_ref *ref =
289 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
290 kvmppc_e500_ref_release(ref);
291 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100292 }
293}
294
Alexander Grafb71c9e22013-01-11 15:22:45 +0100295void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
296{
297 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wood4d2be6f2013-03-06 16:02:49 +0000298 kvmppc_e500_tlbil_all(vcpu_e500);
299 clear_tlb_privs(vcpu_e500);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100300 clear_tlb1_bitmap(vcpu_e500);
301}
302
303/* TID must be supplied by the caller */
304static void kvmppc_e500_setup_stlbe(
305 struct kvm_vcpu *vcpu,
306 struct kvm_book3e_206_tlb_entry *gtlbe,
307 int tsize, struct tlbe_ref *ref, u64 gvaddr,
308 struct kvm_book3e_206_tlb_entry *stlbe)
309{
310 pfn_t pfn = ref->pfn;
311 u32 pr = vcpu->arch.shared->msr & MSR_PR;
312
313 BUG_ON(!(ref->flags & E500_TLB_VALID));
314
315 /* Force IPROT=0 for all guest mappings. */
316 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530317 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100318 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
319 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
320
321#ifdef CONFIG_KVM_BOOKE_HV
322 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
323#endif
324}
325
326static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
327 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
328 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
329 struct tlbe_ref *ref)
330{
331 struct kvm_memory_slot *slot;
332 unsigned long pfn = 0; /* silence GCC warning */
333 unsigned long hva;
334 int pfnmap = 0;
335 int tsize = BOOK3E_PAGESZ_4K;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530336 int ret = 0;
337 unsigned long mmu_seq;
338 struct kvm *kvm = vcpu_e500->vcpu.kvm;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530339 unsigned long tsize_pages = 0;
340 pte_t *ptep;
341 unsigned int wimg = 0;
342 pgd_t *pgdir;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530343
344 /* used to check for invalidations in progress */
345 mmu_seq = kvm->mmu_notifier_seq;
346 smp_rmb();
Alexander Grafb71c9e22013-01-11 15:22:45 +0100347
348 /*
349 * Translate guest physical to true physical, acquiring
350 * a page reference if it is normal, non-reserved memory.
351 *
352 * gfn_to_memslot() must succeed because otherwise we wouldn't
353 * have gotten this far. Eventually we should just pass the slot
354 * pointer through from the first lookup.
355 */
356 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
357 hva = gfn_to_hva_memslot(slot, gfn);
358
359 if (tlbsel == 1) {
360 struct vm_area_struct *vma;
361 down_read(&current->mm->mmap_sem);
362
363 vma = find_vma(current->mm, hva);
364 if (vma && hva >= vma->vm_start &&
365 (vma->vm_flags & VM_PFNMAP)) {
366 /*
367 * This VMA is a physically contiguous region (e.g.
368 * /dev/mem) that bypasses normal Linux page
369 * management. Find the overlap between the
370 * vma and the memslot.
371 */
372
373 unsigned long start, end;
374 unsigned long slot_start, slot_end;
375
376 pfnmap = 1;
377
378 start = vma->vm_pgoff;
379 end = start +
380 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
381
382 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
383
384 slot_start = pfn - (gfn - slot->base_gfn);
385 slot_end = slot_start + slot->npages;
386
387 if (start < slot_start)
388 start = slot_start;
389 if (end > slot_end)
390 end = slot_end;
391
392 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
393 MAS1_TSIZE_SHIFT;
394
395 /*
396 * e500 doesn't implement the lowest tsize bit,
397 * or 1K pages.
398 */
399 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
400
401 /*
402 * Now find the largest tsize (up to what the guest
403 * requested) that will cover gfn, stay within the
404 * range, and for which gfn and pfn are mutually
405 * aligned.
406 */
407
408 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530409 unsigned long gfn_start, gfn_end;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100410 tsize_pages = 1 << (tsize - 2);
411
412 gfn_start = gfn & ~(tsize_pages - 1);
413 gfn_end = gfn_start + tsize_pages;
414
415 if (gfn_start + pfn - gfn < start)
416 continue;
417 if (gfn_end + pfn - gfn > end)
418 continue;
419 if ((gfn & (tsize_pages - 1)) !=
420 (pfn & (tsize_pages - 1)))
421 continue;
422
423 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
424 pfn &= ~(tsize_pages - 1);
425 break;
426 }
427 } else if (vma && hva >= vma->vm_start &&
428 (vma->vm_flags & VM_HUGETLB)) {
429 unsigned long psize = vma_kernel_pagesize(vma);
430
431 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
432 MAS1_TSIZE_SHIFT;
433
434 /*
435 * Take the largest page size that satisfies both host
436 * and guest mapping
437 */
438 tsize = min(__ilog2(psize) - 10, tsize);
439
440 /*
441 * e500 doesn't implement the lowest tsize bit,
442 * or 1K pages.
443 */
444 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
445 }
446
447 up_read(&current->mm->mmap_sem);
448 }
449
450 if (likely(!pfnmap)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530451 tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100452 pfn = gfn_to_pfn_memslot(slot, gfn);
453 if (is_error_noslot_pfn(pfn)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530454 if (printk_ratelimit())
455 pr_err("%s: real page not found for gfn %lx\n",
456 __func__, (long)gfn);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100457 return -EINVAL;
458 }
459
460 /* Align guest and physical address to page map boundaries */
461 pfn &= ~(tsize_pages - 1);
462 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
463 }
464
Bharat Bhushan40fde702013-08-07 15:33:46 +0530465 spin_lock(&kvm->mmu_lock);
466 if (mmu_notifier_retry(kvm, mmu_seq)) {
467 ret = -EAGAIN;
468 goto out;
469 }
470
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530471
472 pgdir = vcpu_e500->vcpu.arch.pgdir;
473 ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
474 if (pte_present(*ptep))
475 wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
476 else {
477 if (printk_ratelimit())
478 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
479 __func__, (long)gfn, pfn);
480 return -EINVAL;
481 }
482 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100483
484 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
485 ref, gvaddr, stlbe);
486
487 /* Clear i-cache for new pages */
488 kvmppc_mmu_flush_icache(pfn);
489
Bharat Bhushan40fde702013-08-07 15:33:46 +0530490out:
491 spin_unlock(&kvm->mmu_lock);
492
Alexander Grafb71c9e22013-01-11 15:22:45 +0100493 /* Drop refcount on page, so that mmu notifiers can clear it */
494 kvm_release_pfn_clean(pfn);
495
Bharat Bhushan40fde702013-08-07 15:33:46 +0530496 return ret;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100497}
498
499/* XXX only map the one-one case, for now use TLB0 */
500static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
501 struct kvm_book3e_206_tlb_entry *stlbe)
502{
503 struct kvm_book3e_206_tlb_entry *gtlbe;
504 struct tlbe_ref *ref;
505 int stlbsel = 0;
506 int sesel = 0;
507 int r;
508
509 gtlbe = get_entry(vcpu_e500, 0, esel);
510 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
511
512 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
513 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
514 gtlbe, 0, stlbe, ref);
515 if (r)
516 return r;
517
518 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
519
520 return 0;
521}
522
Alexander Grafc015c622013-01-17 17:54:36 +0100523static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
524 struct tlbe_ref *ref,
525 int esel)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100526{
Alexander Grafc015c622013-01-17 17:54:36 +0100527 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100528
529 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
530 vcpu_e500->host_tlb1_nv = 0;
531
Alexander Grafb71c9e22013-01-11 15:22:45 +0100532 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000533 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100534 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
535 }
Scott Wood66a5fec2013-02-13 19:37:49 +0000536
Scott Wood66a5fec2013-02-13 19:37:49 +0000537 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
538 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000539 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
Scott Wood4d2be6f2013-03-06 16:02:49 +0000540 WARN_ON(!(ref->flags & E500_TLB_VALID));
Alexander Grafb71c9e22013-01-11 15:22:45 +0100541
Alexander Grafc015c622013-01-17 17:54:36 +0100542 return sesel;
543}
544
545/* Caller must ensure that the specified guest TLB entry is safe to insert into
546 * the shadow TLB. */
547/* For both one-one and one-to-many */
548static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
549 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
550 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
551{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000552 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
Alexander Grafc015c622013-01-17 17:54:36 +0100553 int sesel;
554 int r;
555
Alexander Grafc015c622013-01-17 17:54:36 +0100556 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
Scott Wood4d2be6f2013-03-06 16:02:49 +0000557 ref);
Alexander Grafc015c622013-01-17 17:54:36 +0100558 if (r)
559 return r;
560
561 /* Use TLB0 when we can only map a page with 4k */
562 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
563 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
564 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
565 return 0;
566 }
567
568 /* Otherwise map into TLB1 */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000569 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
Alexander Grafc015c622013-01-17 17:54:36 +0100570 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100571
572 return 0;
573}
574
575void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
576 unsigned int index)
577{
578 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
579 struct tlbe_priv *priv;
580 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
581 int tlbsel = tlbsel_of(index);
582 int esel = esel_of(index);
583
584 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
585
586 switch (tlbsel) {
587 case 0:
588 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
589
Scott Wood4d2be6f2013-03-06 16:02:49 +0000590 /* Triggers after clear_tlb_privs or on initial mapping */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100591 if (!(priv->ref.flags & E500_TLB_VALID)) {
592 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
593 } else {
594 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
595 &priv->ref, eaddr, &stlbe);
596 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
597 }
598 break;
599
600 case 1: {
601 gfn_t gfn = gpaddr >> PAGE_SHIFT;
602 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
603 esel);
604 break;
605 }
606
607 default:
608 BUG();
609 break;
610 }
611}
612
Mihai Caramanf5250472014-07-23 19:06:22 +0300613#ifdef CONFIG_KVM_BOOKE_HV
614int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
615 u32 *instr)
616{
617 gva_t geaddr;
618 hpa_t addr;
619 hfn_t pfn;
620 hva_t eaddr;
621 u32 mas1, mas2, mas3;
622 u64 mas7_mas3;
623 struct page *page;
624 unsigned int addr_space, psize_shift;
625 bool pr;
626 unsigned long flags;
627
628 /* Search TLB for guest pc to get the real address */
629 geaddr = kvmppc_get_pc(vcpu);
630
631 addr_space = (vcpu->arch.shared->msr & MSR_IS) >> MSR_IR_LG;
632
633 local_irq_save(flags);
634 mtspr(SPRN_MAS6, (vcpu->arch.pid << MAS6_SPID_SHIFT) | addr_space);
635 mtspr(SPRN_MAS5, MAS5_SGS | vcpu->kvm->arch.lpid);
636 asm volatile("tlbsx 0, %[geaddr]\n" : :
637 [geaddr] "r" (geaddr));
638 mtspr(SPRN_MAS5, 0);
639 mtspr(SPRN_MAS8, 0);
640 mas1 = mfspr(SPRN_MAS1);
641 mas2 = mfspr(SPRN_MAS2);
642 mas3 = mfspr(SPRN_MAS3);
643#ifdef CONFIG_64BIT
644 mas7_mas3 = mfspr(SPRN_MAS7_MAS3);
645#else
646 mas7_mas3 = ((u64)mfspr(SPRN_MAS7) << 32) | mas3;
647#endif
648 local_irq_restore(flags);
649
650 /*
651 * If the TLB entry for guest pc was evicted, return to the guest.
652 * There are high chances to find a valid TLB entry next time.
653 */
654 if (!(mas1 & MAS1_VALID))
655 return EMULATE_AGAIN;
656
657 /*
658 * Another thread may rewrite the TLB entry in parallel, don't
659 * execute from the address if the execute permission is not set
660 */
661 pr = vcpu->arch.shared->msr & MSR_PR;
662 if (unlikely((pr && !(mas3 & MAS3_UX)) ||
663 (!pr && !(mas3 & MAS3_SX)))) {
664 pr_err_ratelimited(
665 "%s: Instuction emulation from guest addres %08lx without execute permission\n",
666 __func__, geaddr);
667 return EMULATE_AGAIN;
668 }
669
670 /*
671 * The real address will be mapped by a cacheable, memory coherent,
672 * write-back page. Check for mismatches when LRAT is used.
673 */
674 if (has_feature(vcpu, VCPU_FTR_MMU_V2) &&
675 unlikely((mas2 & MAS2_I) || (mas2 & MAS2_W) || !(mas2 & MAS2_M))) {
676 pr_err_ratelimited(
677 "%s: Instuction emulation from guest addres %08lx mismatches storage attributes\n",
678 __func__, geaddr);
679 return EMULATE_AGAIN;
680 }
681
682 /* Get pfn */
683 psize_shift = MAS1_GET_TSIZE(mas1) + 10;
684 addr = (mas7_mas3 & (~0ULL << psize_shift)) |
685 (geaddr & ((1ULL << psize_shift) - 1ULL));
686 pfn = addr >> PAGE_SHIFT;
687
688 /* Guard against emulation from devices area */
689 if (unlikely(!page_is_ram(pfn))) {
690 pr_err_ratelimited("%s: Instruction emulation from non-RAM host addres %08llx is not supported\n",
691 __func__, addr);
692 return EMULATE_AGAIN;
693 }
694
695 /* Map a page and get guest's instruction */
696 page = pfn_to_page(pfn);
697 eaddr = (unsigned long)kmap_atomic(page);
698 *instr = *(u32 *)(eaddr | (unsigned long)(addr & ~PAGE_MASK));
699 kunmap_atomic((u32 *)eaddr);
700
701 return EMULATE_DONE;
702}
703#else
Mihai Caraman51f04722014-07-23 19:06:21 +0300704int kvmppc_load_last_inst(struct kvm_vcpu *vcpu, enum instruction_type type,
705 u32 *instr)
706{
707 return EMULATE_AGAIN;
708}
Mihai Caramanf5250472014-07-23 19:06:22 +0300709#endif
Mihai Caraman51f04722014-07-23 19:06:21 +0300710
Alexander Grafb71c9e22013-01-11 15:22:45 +0100711/************* MMU Notifiers *************/
712
713int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
714{
715 trace_kvm_unmap_hva(hva);
716
717 /*
718 * Flush all shadow tlb entries everywhere. This is slow, but
719 * we are 100% sure that we catch the to be unmapped page
720 */
721 kvm_flush_remote_tlbs(kvm);
722
723 return 0;
724}
725
726int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
727{
728 /* kvm_unmap_hva flushes everything anyways */
729 kvm_unmap_hva(kvm, start);
730
731 return 0;
732}
733
734int kvm_age_hva(struct kvm *kvm, unsigned long hva)
735{
736 /* XXX could be more clever ;) */
737 return 0;
738}
739
740int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
741{
742 /* XXX could be more clever ;) */
743 return 0;
744}
745
746void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
747{
748 /* The page will get remapped properly on its next fault */
749 kvm_unmap_hva(kvm, hva);
750}
751
752/*****************************************/
753
754int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
755{
756 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
757 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
758
759 /*
760 * This should never happen on real e500 hardware, but is
761 * architecturally possible -- e.g. in some weird nested
762 * virtualization case.
763 */
764 if (host_tlb_params[0].entries == 0 ||
765 host_tlb_params[1].entries == 0) {
766 pr_err("%s: need to know host tlb size\n", __func__);
767 return -ENODEV;
768 }
769
770 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
771 TLBnCFG_ASSOC_SHIFT;
772 host_tlb_params[1].ways = host_tlb_params[1].entries;
773
774 if (!is_power_of_2(host_tlb_params[0].entries) ||
775 !is_power_of_2(host_tlb_params[0].ways) ||
776 host_tlb_params[0].entries < host_tlb_params[0].ways ||
777 host_tlb_params[0].ways == 0) {
778 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
779 __func__, host_tlb_params[0].entries,
780 host_tlb_params[0].ways);
781 return -ENODEV;
782 }
783
784 host_tlb_params[0].sets =
785 host_tlb_params[0].entries / host_tlb_params[0].ways;
786 host_tlb_params[1].sets = 1;
787
Alexander Grafb71c9e22013-01-11 15:22:45 +0100788 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
789 host_tlb_params[1].entries,
790 GFP_KERNEL);
791 if (!vcpu_e500->h2g_tlb1_rmap)
Scott Wood4d2be6f2013-03-06 16:02:49 +0000792 return -EINVAL;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100793
794 return 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100795}
796
797void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
798{
799 kfree(vcpu_e500->h2g_tlb1_rmap);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100800}