blob: 86903d3f5a033d215b3857f979fcea2cb68a3de6 [file] [log] [blame]
Alexander Grafb71c9e22013-01-11 15:22:45 +01001/*
2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/sched.h>
29#include <linux/rwsem.h>
30#include <linux/vmalloc.h>
31#include <linux/hugetlb.h>
32#include <asm/kvm_ppc.h>
33
34#include "e500.h"
Alexander Grafb71c9e22013-01-11 15:22:45 +010035#include "timing.h"
36#include "e500_mmu_host.h"
37
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053038#include "trace_booke.h"
39
Alexander Grafb71c9e22013-01-11 15:22:45 +010040#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
41
42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
43
44static inline unsigned int tlb1_max_shadow_size(void)
45{
46 /* reserve one entry for magic page */
47 return host_tlb_params[1].entries - tlbcam_index - 1;
48}
49
50static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
51{
52 /* Mask off reserved bits. */
53 mas3 &= MAS3_ATTRIB_MASK;
54
55#ifndef CONFIG_KVM_BOOKE_HV
56 if (!usermode) {
57 /* Guest is in supervisor mode,
58 * so we need to translate guest
59 * supervisor permissions into user permissions. */
60 mas3 &= ~E500_TLB_USER_PERM_MASK;
61 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
62 }
63 mas3 |= E500_TLB_SUPER_PERM_MASK;
64#endif
65 return mas3;
66}
67
Alexander Grafb71c9e22013-01-11 15:22:45 +010068/*
69 * writing shadow tlb entry to host TLB
70 */
71static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
72 uint32_t mas0)
73{
74 unsigned long flags;
75
76 local_irq_save(flags);
77 mtspr(SPRN_MAS0, mas0);
78 mtspr(SPRN_MAS1, stlbe->mas1);
79 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
80 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
81 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
82#ifdef CONFIG_KVM_BOOKE_HV
83 mtspr(SPRN_MAS8, stlbe->mas8);
84#endif
85 asm volatile("isync; tlbwe" : : : "memory");
86
87#ifdef CONFIG_KVM_BOOKE_HV
88 /* Must clear mas8 for other host tlbwe's */
89 mtspr(SPRN_MAS8, 0);
90 isync();
91#endif
92 local_irq_restore(flags);
93
94 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
95 stlbe->mas2, stlbe->mas7_3);
96}
97
98/*
99 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
100 *
101 * We don't care about the address we're searching for, other than that it's
102 * in the right set and is not present in the TLB. Using a zero PID and a
103 * userspace address means we don't have to set and then restore MAS5, or
104 * calculate a proper MAS6 value.
105 */
106static u32 get_host_mas0(unsigned long eaddr)
107{
108 unsigned long flags;
109 u32 mas0;
110
111 local_irq_save(flags);
112 mtspr(SPRN_MAS6, 0);
113 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
114 mas0 = mfspr(SPRN_MAS0);
115 local_irq_restore(flags);
116
117 return mas0;
118}
119
120/* sesel is for tlb1 only */
121static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
122 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
123{
124 u32 mas0;
125
126 if (tlbsel == 0) {
127 mas0 = get_host_mas0(stlbe->mas2);
128 __write_host_tlbe(stlbe, mas0);
129 } else {
130 __write_host_tlbe(stlbe,
131 MAS0_TLBSEL(1) |
132 MAS0_ESEL(to_htlb1_esel(sesel)));
133 }
134}
135
136/* sesel is for tlb1 only */
137static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
138 struct kvm_book3e_206_tlb_entry *gtlbe,
139 struct kvm_book3e_206_tlb_entry *stlbe,
140 int stlbsel, int sesel)
141{
142 int stid;
143
144 preempt_disable();
145 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
146
147 stlbe->mas1 |= MAS1_TID(stid);
148 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
149 preempt_enable();
150}
151
152#ifdef CONFIG_KVM_E500V2
153/* XXX should be a hook in the gva2hpa translation */
154void kvmppc_map_magic(struct kvm_vcpu *vcpu)
155{
156 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
157 struct kvm_book3e_206_tlb_entry magic;
158 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
159 unsigned int stid;
160 pfn_t pfn;
161
162 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
163 get_page(pfn_to_page(pfn));
164
165 preempt_disable();
166 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
167
168 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
169 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
170 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
171 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
172 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
173 magic.mas8 = 0;
174
175 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
176 preempt_enable();
177}
178#endif
179
180void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
181 int esel)
182{
183 struct kvm_book3e_206_tlb_entry *gtlbe =
184 get_entry(vcpu_e500, tlbsel, esel);
185 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
186
187 /* Don't bother with unmapped entries */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000188 if (!(ref->flags & E500_TLB_VALID)) {
189 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
190 "%s: flags %x\n", __func__, ref->flags);
191 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
192 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100193
194 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
195 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
196 int hw_tlb_indx;
197 unsigned long flags;
198
199 local_irq_save(flags);
200 while (tmp) {
201 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
202 mtspr(SPRN_MAS0,
203 MAS0_TLBSEL(1) |
204 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
205 mtspr(SPRN_MAS1, 0);
206 asm volatile("tlbwe");
207 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
208 tmp &= tmp - 1;
209 }
210 mb();
211 vcpu_e500->g2h_tlb1_map[esel] = 0;
212 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
213 local_irq_restore(flags);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100214 }
215
Alexander Grafc015c622013-01-17 17:54:36 +0100216 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
217 /*
218 * TLB1 entry is backed by 4k pages. This should happen
219 * rarely and is not worth optimizing. Invalidate everything.
220 */
221 kvmppc_e500_tlbil_all(vcpu_e500);
222 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
223 }
224
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530225 /*
226 * If TLB entry is still valid then it's a TLB0 entry, and thus
227 * backed by at most one host tlbe per shadow pid
228 */
229 if (ref->flags & E500_TLB_VALID)
230 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100231
232 /* Mark the TLB as not backed by the host anymore */
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530233 ref->flags = 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100234}
235
236static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
237{
238 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
239}
240
241static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
242 struct kvm_book3e_206_tlb_entry *gtlbe,
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530243 pfn_t pfn, unsigned int wimg)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100244{
245 ref->pfn = pfn;
Bharat Bhushan30a91fe2013-11-15 11:01:13 +0530246 ref->flags = E500_TLB_VALID;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100247
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530248 /* Use guest supplied MAS2_G and MAS2_E */
249 ref->flags |= (gtlbe->mas2 & MAS2_ATTRIB_MASK) | wimg;
250
Bharat Bhushan84e4d632013-08-07 15:33:45 +0530251 /* Mark the page accessed */
252 kvm_set_pfn_accessed(pfn);
253
Alexander Grafb71c9e22013-01-11 15:22:45 +0100254 if (tlbe_is_writable(gtlbe))
255 kvm_set_pfn_dirty(pfn);
256}
257
258static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
259{
260 if (ref->flags & E500_TLB_VALID) {
Scott Wood4d2be6f2013-03-06 16:02:49 +0000261 /* FIXME: don't log bogus pfn for TLB1 */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100262 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
263 ref->flags = 0;
264 }
265}
266
Alexander Graf483ba972013-01-18 15:13:19 +0100267static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100268{
269 if (vcpu_e500->g2h_tlb1_map)
270 memset(vcpu_e500->g2h_tlb1_map, 0,
271 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
272 if (vcpu_e500->h2g_tlb1_rmap)
273 memset(vcpu_e500->h2g_tlb1_rmap, 0,
274 sizeof(unsigned int) * host_tlb_params[1].entries);
275}
276
277static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
278{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000279 int tlbsel;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100280 int i;
281
Scott Wood4d2be6f2013-03-06 16:02:49 +0000282 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
283 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
284 struct tlbe_ref *ref =
285 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
286 kvmppc_e500_ref_release(ref);
287 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100288 }
289}
290
Alexander Grafb71c9e22013-01-11 15:22:45 +0100291void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
292{
293 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wood4d2be6f2013-03-06 16:02:49 +0000294 kvmppc_e500_tlbil_all(vcpu_e500);
295 clear_tlb_privs(vcpu_e500);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100296 clear_tlb1_bitmap(vcpu_e500);
297}
298
299/* TID must be supplied by the caller */
300static void kvmppc_e500_setup_stlbe(
301 struct kvm_vcpu *vcpu,
302 struct kvm_book3e_206_tlb_entry *gtlbe,
303 int tsize, struct tlbe_ref *ref, u64 gvaddr,
304 struct kvm_book3e_206_tlb_entry *stlbe)
305{
306 pfn_t pfn = ref->pfn;
307 u32 pr = vcpu->arch.shared->msr & MSR_PR;
308
309 BUG_ON(!(ref->flags & E500_TLB_VALID));
310
311 /* Force IPROT=0 for all guest mappings. */
312 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530313 stlbe->mas2 = (gvaddr & MAS2_EPN) | (ref->flags & E500_TLB_MAS2_ATTR);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100314 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
315 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
316
317#ifdef CONFIG_KVM_BOOKE_HV
318 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
319#endif
320}
321
322static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
323 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
324 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
325 struct tlbe_ref *ref)
326{
327 struct kvm_memory_slot *slot;
328 unsigned long pfn = 0; /* silence GCC warning */
329 unsigned long hva;
330 int pfnmap = 0;
331 int tsize = BOOK3E_PAGESZ_4K;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530332 int ret = 0;
333 unsigned long mmu_seq;
334 struct kvm *kvm = vcpu_e500->vcpu.kvm;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530335 unsigned long tsize_pages = 0;
336 pte_t *ptep;
337 unsigned int wimg = 0;
338 pgd_t *pgdir;
Bharat Bhushan40fde702013-08-07 15:33:46 +0530339
340 /* used to check for invalidations in progress */
341 mmu_seq = kvm->mmu_notifier_seq;
342 smp_rmb();
Alexander Grafb71c9e22013-01-11 15:22:45 +0100343
344 /*
345 * Translate guest physical to true physical, acquiring
346 * a page reference if it is normal, non-reserved memory.
347 *
348 * gfn_to_memslot() must succeed because otherwise we wouldn't
349 * have gotten this far. Eventually we should just pass the slot
350 * pointer through from the first lookup.
351 */
352 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
353 hva = gfn_to_hva_memslot(slot, gfn);
354
355 if (tlbsel == 1) {
356 struct vm_area_struct *vma;
357 down_read(&current->mm->mmap_sem);
358
359 vma = find_vma(current->mm, hva);
360 if (vma && hva >= vma->vm_start &&
361 (vma->vm_flags & VM_PFNMAP)) {
362 /*
363 * This VMA is a physically contiguous region (e.g.
364 * /dev/mem) that bypasses normal Linux page
365 * management. Find the overlap between the
366 * vma and the memslot.
367 */
368
369 unsigned long start, end;
370 unsigned long slot_start, slot_end;
371
372 pfnmap = 1;
373
374 start = vma->vm_pgoff;
375 end = start +
376 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
377
378 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
379
380 slot_start = pfn - (gfn - slot->base_gfn);
381 slot_end = slot_start + slot->npages;
382
383 if (start < slot_start)
384 start = slot_start;
385 if (end > slot_end)
386 end = slot_end;
387
388 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
389 MAS1_TSIZE_SHIFT;
390
391 /*
392 * e500 doesn't implement the lowest tsize bit,
393 * or 1K pages.
394 */
395 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
396
397 /*
398 * Now find the largest tsize (up to what the guest
399 * requested) that will cover gfn, stay within the
400 * range, and for which gfn and pfn are mutually
401 * aligned.
402 */
403
404 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530405 unsigned long gfn_start, gfn_end;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100406 tsize_pages = 1 << (tsize - 2);
407
408 gfn_start = gfn & ~(tsize_pages - 1);
409 gfn_end = gfn_start + tsize_pages;
410
411 if (gfn_start + pfn - gfn < start)
412 continue;
413 if (gfn_end + pfn - gfn > end)
414 continue;
415 if ((gfn & (tsize_pages - 1)) !=
416 (pfn & (tsize_pages - 1)))
417 continue;
418
419 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
420 pfn &= ~(tsize_pages - 1);
421 break;
422 }
423 } else if (vma && hva >= vma->vm_start &&
424 (vma->vm_flags & VM_HUGETLB)) {
425 unsigned long psize = vma_kernel_pagesize(vma);
426
427 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
428 MAS1_TSIZE_SHIFT;
429
430 /*
431 * Take the largest page size that satisfies both host
432 * and guest mapping
433 */
434 tsize = min(__ilog2(psize) - 10, tsize);
435
436 /*
437 * e500 doesn't implement the lowest tsize bit,
438 * or 1K pages.
439 */
440 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
441 }
442
443 up_read(&current->mm->mmap_sem);
444 }
445
446 if (likely(!pfnmap)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530447 tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100448 pfn = gfn_to_pfn_memslot(slot, gfn);
449 if (is_error_noslot_pfn(pfn)) {
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530450 if (printk_ratelimit())
451 pr_err("%s: real page not found for gfn %lx\n",
452 __func__, (long)gfn);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100453 return -EINVAL;
454 }
455
456 /* Align guest and physical address to page map boundaries */
457 pfn &= ~(tsize_pages - 1);
458 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
459 }
460
Bharat Bhushan40fde702013-08-07 15:33:46 +0530461 spin_lock(&kvm->mmu_lock);
462 if (mmu_notifier_retry(kvm, mmu_seq)) {
463 ret = -EAGAIN;
464 goto out;
465 }
466
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530467
468 pgdir = vcpu_e500->vcpu.arch.pgdir;
469 ptep = lookup_linux_ptep(pgdir, hva, &tsize_pages);
470 if (pte_present(*ptep))
471 wimg = (*ptep >> PTE_WIMGE_SHIFT) & MAS2_WIMGE_MASK;
472 else {
473 if (printk_ratelimit())
474 pr_err("%s: pte not present: gfn %lx, pfn %lx\n",
475 __func__, (long)gfn, pfn);
Mihai Caraman511c6682014-06-18 18:45:05 +0300476 ret = -EINVAL;
477 goto out;
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530478 }
479 kvmppc_e500_ref_setup(ref, gtlbe, pfn, wimg);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100480
481 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
482 ref, gvaddr, stlbe);
483
484 /* Clear i-cache for new pages */
485 kvmppc_mmu_flush_icache(pfn);
486
Bharat Bhushan40fde702013-08-07 15:33:46 +0530487out:
488 spin_unlock(&kvm->mmu_lock);
489
Alexander Grafb71c9e22013-01-11 15:22:45 +0100490 /* Drop refcount on page, so that mmu notifiers can clear it */
491 kvm_release_pfn_clean(pfn);
492
Bharat Bhushan40fde702013-08-07 15:33:46 +0530493 return ret;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100494}
495
496/* XXX only map the one-one case, for now use TLB0 */
497static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
498 struct kvm_book3e_206_tlb_entry *stlbe)
499{
500 struct kvm_book3e_206_tlb_entry *gtlbe;
501 struct tlbe_ref *ref;
502 int stlbsel = 0;
503 int sesel = 0;
504 int r;
505
506 gtlbe = get_entry(vcpu_e500, 0, esel);
507 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
508
509 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
510 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
511 gtlbe, 0, stlbe, ref);
512 if (r)
513 return r;
514
515 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
516
517 return 0;
518}
519
Alexander Grafc015c622013-01-17 17:54:36 +0100520static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
521 struct tlbe_ref *ref,
522 int esel)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100523{
Alexander Grafc015c622013-01-17 17:54:36 +0100524 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100525
526 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
527 vcpu_e500->host_tlb1_nv = 0;
528
Alexander Grafb71c9e22013-01-11 15:22:45 +0100529 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000530 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100531 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
532 }
Scott Wood66a5fec2013-02-13 19:37:49 +0000533
Scott Wood66a5fec2013-02-13 19:37:49 +0000534 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
535 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000536 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
Scott Wood4d2be6f2013-03-06 16:02:49 +0000537 WARN_ON(!(ref->flags & E500_TLB_VALID));
Alexander Grafb71c9e22013-01-11 15:22:45 +0100538
Alexander Grafc015c622013-01-17 17:54:36 +0100539 return sesel;
540}
541
542/* Caller must ensure that the specified guest TLB entry is safe to insert into
543 * the shadow TLB. */
544/* For both one-one and one-to-many */
545static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
546 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
547 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
548{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000549 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
Alexander Grafc015c622013-01-17 17:54:36 +0100550 int sesel;
551 int r;
552
Alexander Grafc015c622013-01-17 17:54:36 +0100553 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
Scott Wood4d2be6f2013-03-06 16:02:49 +0000554 ref);
Alexander Grafc015c622013-01-17 17:54:36 +0100555 if (r)
556 return r;
557
558 /* Use TLB0 when we can only map a page with 4k */
559 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
560 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
561 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
562 return 0;
563 }
564
565 /* Otherwise map into TLB1 */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000566 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
Alexander Grafc015c622013-01-17 17:54:36 +0100567 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100568
569 return 0;
570}
571
572void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
573 unsigned int index)
574{
575 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
576 struct tlbe_priv *priv;
577 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
578 int tlbsel = tlbsel_of(index);
579 int esel = esel_of(index);
580
581 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
582
583 switch (tlbsel) {
584 case 0:
585 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
586
Scott Wood4d2be6f2013-03-06 16:02:49 +0000587 /* Triggers after clear_tlb_privs or on initial mapping */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100588 if (!(priv->ref.flags & E500_TLB_VALID)) {
589 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
590 } else {
591 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
592 &priv->ref, eaddr, &stlbe);
593 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
594 }
595 break;
596
597 case 1: {
598 gfn_t gfn = gpaddr >> PAGE_SHIFT;
599 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
600 esel);
601 break;
602 }
603
604 default:
605 BUG();
606 break;
607 }
608}
609
610/************* MMU Notifiers *************/
611
612int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
613{
614 trace_kvm_unmap_hva(hva);
615
616 /*
617 * Flush all shadow tlb entries everywhere. This is slow, but
618 * we are 100% sure that we catch the to be unmapped page
619 */
620 kvm_flush_remote_tlbs(kvm);
621
622 return 0;
623}
624
625int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
626{
627 /* kvm_unmap_hva flushes everything anyways */
628 kvm_unmap_hva(kvm, start);
629
630 return 0;
631}
632
633int kvm_age_hva(struct kvm *kvm, unsigned long hva)
634{
635 /* XXX could be more clever ;) */
636 return 0;
637}
638
639int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
640{
641 /* XXX could be more clever ;) */
642 return 0;
643}
644
645void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
646{
647 /* The page will get remapped properly on its next fault */
648 kvm_unmap_hva(kvm, hva);
649}
650
651/*****************************************/
652
653int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
654{
655 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
656 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
657
658 /*
659 * This should never happen on real e500 hardware, but is
660 * architecturally possible -- e.g. in some weird nested
661 * virtualization case.
662 */
663 if (host_tlb_params[0].entries == 0 ||
664 host_tlb_params[1].entries == 0) {
665 pr_err("%s: need to know host tlb size\n", __func__);
666 return -ENODEV;
667 }
668
669 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
670 TLBnCFG_ASSOC_SHIFT;
671 host_tlb_params[1].ways = host_tlb_params[1].entries;
672
673 if (!is_power_of_2(host_tlb_params[0].entries) ||
674 !is_power_of_2(host_tlb_params[0].ways) ||
675 host_tlb_params[0].entries < host_tlb_params[0].ways ||
676 host_tlb_params[0].ways == 0) {
677 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
678 __func__, host_tlb_params[0].entries,
679 host_tlb_params[0].ways);
680 return -ENODEV;
681 }
682
683 host_tlb_params[0].sets =
684 host_tlb_params[0].entries / host_tlb_params[0].ways;
685 host_tlb_params[1].sets = 1;
686
Alexander Grafb71c9e22013-01-11 15:22:45 +0100687 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
688 host_tlb_params[1].entries,
689 GFP_KERNEL);
690 if (!vcpu_e500->h2g_tlb1_rmap)
Scott Wood4d2be6f2013-03-06 16:02:49 +0000691 return -EINVAL;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100692
693 return 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100694}
695
696void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
697{
698 kfree(vcpu_e500->h2g_tlb1_rmap);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100699}