blob: e7dde4b88783b8378ed570cef221dbf320265d19 [file] [log] [blame]
Alexander Grafb71c9e22013-01-11 15:22:45 +01001/*
2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/sched.h>
29#include <linux/rwsem.h>
30#include <linux/vmalloc.h>
31#include <linux/hugetlb.h>
32#include <asm/kvm_ppc.h>
33
34#include "e500.h"
Alexander Grafb71c9e22013-01-11 15:22:45 +010035#include "timing.h"
36#include "e500_mmu_host.h"
37
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053038#include "trace_booke.h"
39
Alexander Grafb71c9e22013-01-11 15:22:45 +010040#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
41
42static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
43
44static inline unsigned int tlb1_max_shadow_size(void)
45{
46 /* reserve one entry for magic page */
47 return host_tlb_params[1].entries - tlbcam_index - 1;
48}
49
50static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
51{
52 /* Mask off reserved bits. */
53 mas3 &= MAS3_ATTRIB_MASK;
54
55#ifndef CONFIG_KVM_BOOKE_HV
56 if (!usermode) {
57 /* Guest is in supervisor mode,
58 * so we need to translate guest
59 * supervisor permissions into user permissions. */
60 mas3 &= ~E500_TLB_USER_PERM_MASK;
61 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
62 }
63 mas3 |= E500_TLB_SUPER_PERM_MASK;
64#endif
65 return mas3;
66}
67
68static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
69{
70#ifdef CONFIG_SMP
71 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
72#else
73 return mas2 & MAS2_ATTRIB_MASK;
74#endif
75}
76
77/*
78 * writing shadow tlb entry to host TLB
79 */
80static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
81 uint32_t mas0)
82{
83 unsigned long flags;
84
85 local_irq_save(flags);
86 mtspr(SPRN_MAS0, mas0);
87 mtspr(SPRN_MAS1, stlbe->mas1);
88 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
89 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
90 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
91#ifdef CONFIG_KVM_BOOKE_HV
92 mtspr(SPRN_MAS8, stlbe->mas8);
93#endif
94 asm volatile("isync; tlbwe" : : : "memory");
95
96#ifdef CONFIG_KVM_BOOKE_HV
97 /* Must clear mas8 for other host tlbwe's */
98 mtspr(SPRN_MAS8, 0);
99 isync();
100#endif
101 local_irq_restore(flags);
102
103 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
104 stlbe->mas2, stlbe->mas7_3);
105}
106
107/*
108 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
109 *
110 * We don't care about the address we're searching for, other than that it's
111 * in the right set and is not present in the TLB. Using a zero PID and a
112 * userspace address means we don't have to set and then restore MAS5, or
113 * calculate a proper MAS6 value.
114 */
115static u32 get_host_mas0(unsigned long eaddr)
116{
117 unsigned long flags;
118 u32 mas0;
119
120 local_irq_save(flags);
121 mtspr(SPRN_MAS6, 0);
122 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
123 mas0 = mfspr(SPRN_MAS0);
124 local_irq_restore(flags);
125
126 return mas0;
127}
128
129/* sesel is for tlb1 only */
130static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
131 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
132{
133 u32 mas0;
134
135 if (tlbsel == 0) {
136 mas0 = get_host_mas0(stlbe->mas2);
137 __write_host_tlbe(stlbe, mas0);
138 } else {
139 __write_host_tlbe(stlbe,
140 MAS0_TLBSEL(1) |
141 MAS0_ESEL(to_htlb1_esel(sesel)));
142 }
143}
144
145/* sesel is for tlb1 only */
146static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
147 struct kvm_book3e_206_tlb_entry *gtlbe,
148 struct kvm_book3e_206_tlb_entry *stlbe,
149 int stlbsel, int sesel)
150{
151 int stid;
152
153 preempt_disable();
154 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
155
156 stlbe->mas1 |= MAS1_TID(stid);
157 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
158 preempt_enable();
159}
160
161#ifdef CONFIG_KVM_E500V2
162/* XXX should be a hook in the gva2hpa translation */
163void kvmppc_map_magic(struct kvm_vcpu *vcpu)
164{
165 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
166 struct kvm_book3e_206_tlb_entry magic;
167 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
168 unsigned int stid;
169 pfn_t pfn;
170
171 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
172 get_page(pfn_to_page(pfn));
173
174 preempt_disable();
175 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
176
177 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
178 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
179 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
180 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
181 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
182 magic.mas8 = 0;
183
184 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
185 preempt_enable();
186}
187#endif
188
189void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
190 int esel)
191{
192 struct kvm_book3e_206_tlb_entry *gtlbe =
193 get_entry(vcpu_e500, tlbsel, esel);
194 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
195
196 /* Don't bother with unmapped entries */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000197 if (!(ref->flags & E500_TLB_VALID)) {
198 WARN(ref->flags & (E500_TLB_BITMAP | E500_TLB_TLB0),
199 "%s: flags %x\n", __func__, ref->flags);
200 WARN_ON(tlbsel == 1 && vcpu_e500->g2h_tlb1_map[esel]);
201 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100202
203 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
204 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
205 int hw_tlb_indx;
206 unsigned long flags;
207
208 local_irq_save(flags);
209 while (tmp) {
210 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
211 mtspr(SPRN_MAS0,
212 MAS0_TLBSEL(1) |
213 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
214 mtspr(SPRN_MAS1, 0);
215 asm volatile("tlbwe");
216 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
217 tmp &= tmp - 1;
218 }
219 mb();
220 vcpu_e500->g2h_tlb1_map[esel] = 0;
221 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
222 local_irq_restore(flags);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100223 }
224
Alexander Grafc015c622013-01-17 17:54:36 +0100225 if (tlbsel == 1 && ref->flags & E500_TLB_TLB0) {
226 /*
227 * TLB1 entry is backed by 4k pages. This should happen
228 * rarely and is not worth optimizing. Invalidate everything.
229 */
230 kvmppc_e500_tlbil_all(vcpu_e500);
231 ref->flags &= ~(E500_TLB_TLB0 | E500_TLB_VALID);
232 }
233
234 /* Already invalidated in between */
235 if (!(ref->flags & E500_TLB_VALID))
236 return;
237
Alexander Grafb71c9e22013-01-11 15:22:45 +0100238 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
239 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
240
241 /* Mark the TLB as not backed by the host anymore */
242 ref->flags &= ~E500_TLB_VALID;
243}
244
245static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
246{
247 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
248}
249
250static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
251 struct kvm_book3e_206_tlb_entry *gtlbe,
252 pfn_t pfn)
253{
254 ref->pfn = pfn;
Scott Wood4d2be6f2013-03-06 16:02:49 +0000255 ref->flags |= E500_TLB_VALID;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100256
Bharat Bhushan84e4d632013-08-07 15:33:45 +0530257 /* Mark the page accessed */
258 kvm_set_pfn_accessed(pfn);
259
Alexander Grafb71c9e22013-01-11 15:22:45 +0100260 if (tlbe_is_writable(gtlbe))
261 kvm_set_pfn_dirty(pfn);
262}
263
264static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
265{
266 if (ref->flags & E500_TLB_VALID) {
Scott Wood4d2be6f2013-03-06 16:02:49 +0000267 /* FIXME: don't log bogus pfn for TLB1 */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100268 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
269 ref->flags = 0;
270 }
271}
272
Alexander Graf483ba972013-01-18 15:13:19 +0100273static void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100274{
275 if (vcpu_e500->g2h_tlb1_map)
276 memset(vcpu_e500->g2h_tlb1_map, 0,
277 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
278 if (vcpu_e500->h2g_tlb1_rmap)
279 memset(vcpu_e500->h2g_tlb1_rmap, 0,
280 sizeof(unsigned int) * host_tlb_params[1].entries);
281}
282
283static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
284{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000285 int tlbsel;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100286 int i;
287
Scott Wood4d2be6f2013-03-06 16:02:49 +0000288 for (tlbsel = 0; tlbsel <= 1; tlbsel++) {
289 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
290 struct tlbe_ref *ref =
291 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
292 kvmppc_e500_ref_release(ref);
293 }
Alexander Grafb71c9e22013-01-11 15:22:45 +0100294 }
295}
296
Alexander Grafb71c9e22013-01-11 15:22:45 +0100297void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
298{
299 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wood4d2be6f2013-03-06 16:02:49 +0000300 kvmppc_e500_tlbil_all(vcpu_e500);
301 clear_tlb_privs(vcpu_e500);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100302 clear_tlb1_bitmap(vcpu_e500);
303}
304
305/* TID must be supplied by the caller */
306static void kvmppc_e500_setup_stlbe(
307 struct kvm_vcpu *vcpu,
308 struct kvm_book3e_206_tlb_entry *gtlbe,
309 int tsize, struct tlbe_ref *ref, u64 gvaddr,
310 struct kvm_book3e_206_tlb_entry *stlbe)
311{
312 pfn_t pfn = ref->pfn;
313 u32 pr = vcpu->arch.shared->msr & MSR_PR;
314
315 BUG_ON(!(ref->flags & E500_TLB_VALID));
316
317 /* Force IPROT=0 for all guest mappings. */
318 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
319 stlbe->mas2 = (gvaddr & MAS2_EPN) |
320 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
321 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
322 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
323
324#ifdef CONFIG_KVM_BOOKE_HV
325 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
326#endif
327}
328
329static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
330 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
331 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
332 struct tlbe_ref *ref)
333{
334 struct kvm_memory_slot *slot;
335 unsigned long pfn = 0; /* silence GCC warning */
336 unsigned long hva;
337 int pfnmap = 0;
338 int tsize = BOOK3E_PAGESZ_4K;
339
340 /*
341 * Translate guest physical to true physical, acquiring
342 * a page reference if it is normal, non-reserved memory.
343 *
344 * gfn_to_memslot() must succeed because otherwise we wouldn't
345 * have gotten this far. Eventually we should just pass the slot
346 * pointer through from the first lookup.
347 */
348 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
349 hva = gfn_to_hva_memslot(slot, gfn);
350
351 if (tlbsel == 1) {
352 struct vm_area_struct *vma;
353 down_read(&current->mm->mmap_sem);
354
355 vma = find_vma(current->mm, hva);
356 if (vma && hva >= vma->vm_start &&
357 (vma->vm_flags & VM_PFNMAP)) {
358 /*
359 * This VMA is a physically contiguous region (e.g.
360 * /dev/mem) that bypasses normal Linux page
361 * management. Find the overlap between the
362 * vma and the memslot.
363 */
364
365 unsigned long start, end;
366 unsigned long slot_start, slot_end;
367
368 pfnmap = 1;
369
370 start = vma->vm_pgoff;
371 end = start +
372 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
373
374 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
375
376 slot_start = pfn - (gfn - slot->base_gfn);
377 slot_end = slot_start + slot->npages;
378
379 if (start < slot_start)
380 start = slot_start;
381 if (end > slot_end)
382 end = slot_end;
383
384 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
385 MAS1_TSIZE_SHIFT;
386
387 /*
388 * e500 doesn't implement the lowest tsize bit,
389 * or 1K pages.
390 */
391 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
392
393 /*
394 * Now find the largest tsize (up to what the guest
395 * requested) that will cover gfn, stay within the
396 * range, and for which gfn and pfn are mutually
397 * aligned.
398 */
399
400 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
401 unsigned long gfn_start, gfn_end, tsize_pages;
402 tsize_pages = 1 << (tsize - 2);
403
404 gfn_start = gfn & ~(tsize_pages - 1);
405 gfn_end = gfn_start + tsize_pages;
406
407 if (gfn_start + pfn - gfn < start)
408 continue;
409 if (gfn_end + pfn - gfn > end)
410 continue;
411 if ((gfn & (tsize_pages - 1)) !=
412 (pfn & (tsize_pages - 1)))
413 continue;
414
415 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
416 pfn &= ~(tsize_pages - 1);
417 break;
418 }
419 } else if (vma && hva >= vma->vm_start &&
420 (vma->vm_flags & VM_HUGETLB)) {
421 unsigned long psize = vma_kernel_pagesize(vma);
422
423 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
424 MAS1_TSIZE_SHIFT;
425
426 /*
427 * Take the largest page size that satisfies both host
428 * and guest mapping
429 */
430 tsize = min(__ilog2(psize) - 10, tsize);
431
432 /*
433 * e500 doesn't implement the lowest tsize bit,
434 * or 1K pages.
435 */
436 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
437 }
438
439 up_read(&current->mm->mmap_sem);
440 }
441
442 if (likely(!pfnmap)) {
443 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
444 pfn = gfn_to_pfn_memslot(slot, gfn);
445 if (is_error_noslot_pfn(pfn)) {
446 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
447 (long)gfn);
448 return -EINVAL;
449 }
450
451 /* Align guest and physical address to page map boundaries */
452 pfn &= ~(tsize_pages - 1);
453 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
454 }
455
Alexander Grafb71c9e22013-01-11 15:22:45 +0100456 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
457
458 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
459 ref, gvaddr, stlbe);
460
461 /* Clear i-cache for new pages */
462 kvmppc_mmu_flush_icache(pfn);
463
464 /* Drop refcount on page, so that mmu notifiers can clear it */
465 kvm_release_pfn_clean(pfn);
466
467 return 0;
468}
469
470/* XXX only map the one-one case, for now use TLB0 */
471static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
472 struct kvm_book3e_206_tlb_entry *stlbe)
473{
474 struct kvm_book3e_206_tlb_entry *gtlbe;
475 struct tlbe_ref *ref;
476 int stlbsel = 0;
477 int sesel = 0;
478 int r;
479
480 gtlbe = get_entry(vcpu_e500, 0, esel);
481 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
482
483 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
484 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
485 gtlbe, 0, stlbe, ref);
486 if (r)
487 return r;
488
489 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
490
491 return 0;
492}
493
Alexander Grafc015c622013-01-17 17:54:36 +0100494static int kvmppc_e500_tlb1_map_tlb1(struct kvmppc_vcpu_e500 *vcpu_e500,
495 struct tlbe_ref *ref,
496 int esel)
Alexander Grafb71c9e22013-01-11 15:22:45 +0100497{
Alexander Grafc015c622013-01-17 17:54:36 +0100498 unsigned int sesel = vcpu_e500->host_tlb1_nv++;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100499
500 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
501 vcpu_e500->host_tlb1_nv = 0;
502
Alexander Grafb71c9e22013-01-11 15:22:45 +0100503 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000504 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel] - 1;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100505 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
506 }
Scott Wood66a5fec2013-02-13 19:37:49 +0000507
Scott Wood66a5fec2013-02-13 19:37:49 +0000508 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
509 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
Scott Wood6b2ba1a2013-02-13 19:37:48 +0000510 vcpu_e500->h2g_tlb1_rmap[sesel] = esel + 1;
Scott Wood4d2be6f2013-03-06 16:02:49 +0000511 WARN_ON(!(ref->flags & E500_TLB_VALID));
Alexander Grafb71c9e22013-01-11 15:22:45 +0100512
Alexander Grafc015c622013-01-17 17:54:36 +0100513 return sesel;
514}
515
516/* Caller must ensure that the specified guest TLB entry is safe to insert into
517 * the shadow TLB. */
518/* For both one-one and one-to-many */
519static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
520 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
521 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
522{
Scott Wood4d2be6f2013-03-06 16:02:49 +0000523 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[1][esel].ref;
Alexander Grafc015c622013-01-17 17:54:36 +0100524 int sesel;
525 int r;
526
Alexander Grafc015c622013-01-17 17:54:36 +0100527 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
Scott Wood4d2be6f2013-03-06 16:02:49 +0000528 ref);
Alexander Grafc015c622013-01-17 17:54:36 +0100529 if (r)
530 return r;
531
532 /* Use TLB0 when we can only map a page with 4k */
533 if (get_tlb_tsize(stlbe) == BOOK3E_PAGESZ_4K) {
534 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_TLB0;
535 write_stlbe(vcpu_e500, gtlbe, stlbe, 0, 0);
536 return 0;
537 }
538
539 /* Otherwise map into TLB1 */
Scott Wood4d2be6f2013-03-06 16:02:49 +0000540 sesel = kvmppc_e500_tlb1_map_tlb1(vcpu_e500, ref, esel);
Alexander Grafc015c622013-01-17 17:54:36 +0100541 write_stlbe(vcpu_e500, gtlbe, stlbe, 1, sesel);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100542
543 return 0;
544}
545
546void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
547 unsigned int index)
548{
549 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
550 struct tlbe_priv *priv;
551 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
552 int tlbsel = tlbsel_of(index);
553 int esel = esel_of(index);
554
555 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
556
557 switch (tlbsel) {
558 case 0:
559 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
560
Scott Wood4d2be6f2013-03-06 16:02:49 +0000561 /* Triggers after clear_tlb_privs or on initial mapping */
Alexander Grafb71c9e22013-01-11 15:22:45 +0100562 if (!(priv->ref.flags & E500_TLB_VALID)) {
563 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
564 } else {
565 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
566 &priv->ref, eaddr, &stlbe);
567 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
568 }
569 break;
570
571 case 1: {
572 gfn_t gfn = gpaddr >> PAGE_SHIFT;
573 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
574 esel);
575 break;
576 }
577
578 default:
579 BUG();
580 break;
581 }
582}
583
584/************* MMU Notifiers *************/
585
586int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
587{
588 trace_kvm_unmap_hva(hva);
589
590 /*
591 * Flush all shadow tlb entries everywhere. This is slow, but
592 * we are 100% sure that we catch the to be unmapped page
593 */
594 kvm_flush_remote_tlbs(kvm);
595
596 return 0;
597}
598
599int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
600{
601 /* kvm_unmap_hva flushes everything anyways */
602 kvm_unmap_hva(kvm, start);
603
604 return 0;
605}
606
607int kvm_age_hva(struct kvm *kvm, unsigned long hva)
608{
609 /* XXX could be more clever ;) */
610 return 0;
611}
612
613int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
614{
615 /* XXX could be more clever ;) */
616 return 0;
617}
618
619void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
620{
621 /* The page will get remapped properly on its next fault */
622 kvm_unmap_hva(kvm, hva);
623}
624
625/*****************************************/
626
627int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
628{
629 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
630 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
631
632 /*
633 * This should never happen on real e500 hardware, but is
634 * architecturally possible -- e.g. in some weird nested
635 * virtualization case.
636 */
637 if (host_tlb_params[0].entries == 0 ||
638 host_tlb_params[1].entries == 0) {
639 pr_err("%s: need to know host tlb size\n", __func__);
640 return -ENODEV;
641 }
642
643 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
644 TLBnCFG_ASSOC_SHIFT;
645 host_tlb_params[1].ways = host_tlb_params[1].entries;
646
647 if (!is_power_of_2(host_tlb_params[0].entries) ||
648 !is_power_of_2(host_tlb_params[0].ways) ||
649 host_tlb_params[0].entries < host_tlb_params[0].ways ||
650 host_tlb_params[0].ways == 0) {
651 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
652 __func__, host_tlb_params[0].entries,
653 host_tlb_params[0].ways);
654 return -ENODEV;
655 }
656
657 host_tlb_params[0].sets =
658 host_tlb_params[0].entries / host_tlb_params[0].ways;
659 host_tlb_params[1].sets = 1;
660
Alexander Grafb71c9e22013-01-11 15:22:45 +0100661 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
662 host_tlb_params[1].entries,
663 GFP_KERNEL);
664 if (!vcpu_e500->h2g_tlb1_rmap)
Scott Wood4d2be6f2013-03-06 16:02:49 +0000665 return -EINVAL;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100666
667 return 0;
Alexander Grafb71c9e22013-01-11 15:22:45 +0100668}
669
670void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
671{
672 kfree(vcpu_e500->h2g_tlb1_rmap);
Alexander Grafb71c9e22013-01-11 15:22:45 +0100673}