blob: 7f808c52e64ab936660e1e50f98ff2291a0241f2 [file] [log] [blame]
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001/*
Scott Wood49ea0692011-03-28 15:01:24 -05002 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06003 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060017#include <linux/string.h>
18#include <linux/kvm.h>
19#include <linux/kvm_host.h>
20#include <linux/highmem.h>
21#include <asm/kvm_ppc.h>
22#include <asm/kvm_e500.h>
23
Liu Yu9aa4dd52009-01-14 10:47:38 -060024#include "../mm/mmu_decl.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060025#include "e500_tlb.h"
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030026#include "trace.h"
Scott Wood49ea0692011-03-28 15:01:24 -050027#include "timing.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060028
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30
31static unsigned int tlb1_entry_num;
32
33void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
34{
35 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
36 struct tlbe *tlbe;
37 int i, tlbsel;
38
39 printk("| %8s | %8s | %8s | %8s | %8s |\n",
40 "nr", "mas1", "mas2", "mas3", "mas7");
41
42 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
43 printk("Guest TLB%d:\n", tlbsel);
44 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
45 tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
46 if (tlbe->mas1 & MAS1_VALID)
47 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
48 tlbsel, i, tlbe->mas1, tlbe->mas2,
49 tlbe->mas3, tlbe->mas7);
50 }
51 }
52
53 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
54 printk("Shadow TLB%d:\n", tlbsel);
55 for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
56 tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
57 if (tlbe->mas1 & MAS1_VALID)
58 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
59 tlbsel, i, tlbe->mas1, tlbe->mas2,
60 tlbe->mas3, tlbe->mas7);
61 }
62 }
63}
64
65static inline unsigned int tlb0_get_next_victim(
66 struct kvmppc_vcpu_e500 *vcpu_e500)
67{
68 unsigned int victim;
69
70 victim = vcpu_e500->guest_tlb_nv[0]++;
71 if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
72 vcpu_e500->guest_tlb_nv[0] = 0;
73
74 return victim;
75}
76
77static inline unsigned int tlb1_max_shadow_size(void)
78{
79 return tlb1_entry_num - tlbcam_index;
80}
81
82static inline int tlbe_is_writable(struct tlbe *tlbe)
83{
84 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
85}
86
87static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
88{
89 /* Mask off reserved bits. */
90 mas3 &= MAS3_ATTRIB_MASK;
91
92 if (!usermode) {
93 /* Guest is in supervisor mode,
94 * so we need to translate guest
95 * supervisor permissions into user permissions. */
96 mas3 &= ~E500_TLB_USER_PERM_MASK;
97 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
98 }
99
100 return mas3 | E500_TLB_SUPER_PERM_MASK;
101}
102
103static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
104{
Liu Yu046a48b2009-03-17 16:57:46 +0800105#ifdef CONFIG_SMP
106 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
107#else
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600108 return mas2 & MAS2_ATTRIB_MASK;
Liu Yu046a48b2009-03-17 16:57:46 +0800109#endif
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600110}
111
112/*
113 * writing shadow tlb entry to host TLB
114 */
Scott Wood0ef3099562011-06-14 18:34:35 -0500115static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600116{
Scott Wood0ef3099562011-06-14 18:34:35 -0500117 unsigned long flags;
118
119 local_irq_save(flags);
120 mtspr(SPRN_MAS0, mas0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600121 mtspr(SPRN_MAS1, stlbe->mas1);
122 mtspr(SPRN_MAS2, stlbe->mas2);
123 mtspr(SPRN_MAS3, stlbe->mas3);
124 mtspr(SPRN_MAS7, stlbe->mas7);
Scott Wood0ef3099562011-06-14 18:34:35 -0500125 asm volatile("isync; tlbwe" : : : "memory");
126 local_irq_restore(flags);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600127}
128
129static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
130 int tlbsel, int esel)
131{
132 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
133
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600134 if (tlbsel == 0) {
Scott Wood0ef3099562011-06-14 18:34:35 -0500135 __write_host_tlbe(stlbe,
136 MAS0_TLBSEL(0) |
137 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600138 } else {
Scott Wood0ef3099562011-06-14 18:34:35 -0500139 __write_host_tlbe(stlbe,
140 MAS0_TLBSEL(1) |
141 MAS0_ESEL(to_htlb1_esel(esel)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600142 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600143}
144
145void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
146{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600147}
148
149void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
150{
Liu Yu9aa4dd52009-01-14 10:47:38 -0600151 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600152}
153
154/* Search the guest TLB for a matching entry. */
155static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
156 gva_t eaddr, int tlbsel, unsigned int pid, int as)
157{
158 int i;
159
160 /* XXX Replace loop with fancy data structures. */
161 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
162 struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
163 unsigned int tid;
164
165 if (eaddr < get_tlb_eaddr(tlbe))
166 continue;
167
168 if (eaddr > get_tlb_end(tlbe))
169 continue;
170
171 tid = get_tlb_tid(tlbe);
172 if (tid && (tid != pid))
173 continue;
174
175 if (!get_tlb_v(tlbe))
176 continue;
177
178 if (get_tlb_ts(tlbe) != as && as != -1)
179 continue;
180
181 return i;
182 }
183
184 return -1;
185}
186
187static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
188 int tlbsel, int esel)
189{
190 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
Scott Wood59c1f4e2011-06-14 18:34:37 -0500191 unsigned long pfn;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600192
Scott Wood59c1f4e2011-06-14 18:34:37 -0500193 pfn = stlbe->mas3 >> PAGE_SHIFT;
194 pfn |= stlbe->mas7 << (32 - PAGE_SHIFT);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600195
Scott Wood59c1f4e2011-06-14 18:34:37 -0500196 if (get_tlb_v(stlbe)) {
197 if (tlbe_is_writable(stlbe))
198 kvm_release_pfn_dirty(pfn);
199 else
200 kvm_release_pfn_clean(pfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600201 }
202}
203
204static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
205 int tlbsel, int esel)
206{
207 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
208
209 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
210 stlbe->mas1 = 0;
Kyle Moffett21e537b2010-08-30 11:38:39 -0400211 trace_kvm_stlb_inval(index_of(tlbsel, esel));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600212}
213
214static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
215 gva_t eaddr, gva_t eend, u32 tid)
216{
217 unsigned int pid = tid & 0xff;
218 unsigned int i;
219
220 /* XXX Replace loop with fancy data structures. */
221 for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
222 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
223 unsigned int tid;
224
225 if (!get_tlb_v(stlbe))
226 continue;
227
228 if (eend < get_tlb_eaddr(stlbe))
229 continue;
230
231 if (eaddr > get_tlb_end(stlbe))
232 continue;
233
234 tid = get_tlb_tid(stlbe);
235 if (tid && (tid != pid))
236 continue;
237
238 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
239 write_host_tlbe(vcpu_e500, 1, i);
240 }
241}
242
243static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
244 unsigned int eaddr, int as)
245{
246 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
247 unsigned int victim, pidsel, tsized;
248 int tlbsel;
249
Liu Yufb2838d2009-01-14 10:47:37 -0600250 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600251 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
252 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
253 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
Liu Yu0cfb50e2009-06-05 14:54:29 +0800254 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600255
256 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
257 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
258 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
259 | MAS1_TID(vcpu_e500->pid[pidsel])
260 | MAS1_TSIZE(tsized);
261 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
262 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
263 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
264 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
265 | (get_cur_pid(vcpu) << 16)
266 | (as ? MAS6_SAS : 0);
267 vcpu_e500->mas7 = 0;
268}
269
270static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
271 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
272{
Scott Wood9973d542011-06-14 18:34:39 -0500273 struct kvm_memory_slot *slot;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600274 struct tlbe *stlbe;
Scott Wood9973d542011-06-14 18:34:39 -0500275 unsigned long pfn, hva;
276 int pfnmap = 0;
277 int tsize = BOOK3E_PAGESZ_4K;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600278
279 stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
280
Scott Wood59c1f4e2011-06-14 18:34:37 -0500281 /*
282 * Translate guest physical to true physical, acquiring
283 * a page reference if it is normal, non-reserved memory.
Scott Wood9973d542011-06-14 18:34:39 -0500284 *
285 * gfn_to_memslot() must succeed because otherwise we wouldn't
286 * have gotten this far. Eventually we should just pass the slot
287 * pointer through from the first lookup.
Scott Wood59c1f4e2011-06-14 18:34:37 -0500288 */
Scott Wood9973d542011-06-14 18:34:39 -0500289 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
290 hva = gfn_to_hva_memslot(slot, gfn);
291
292 if (tlbsel == 1) {
293 struct vm_area_struct *vma;
294 down_read(&current->mm->mmap_sem);
295
296 vma = find_vma(current->mm, hva);
297 if (vma && hva >= vma->vm_start &&
298 (vma->vm_flags & VM_PFNMAP)) {
299 /*
300 * This VMA is a physically contiguous region (e.g.
301 * /dev/mem) that bypasses normal Linux page
302 * management. Find the overlap between the
303 * vma and the memslot.
304 */
305
306 unsigned long start, end;
307 unsigned long slot_start, slot_end;
308
309 pfnmap = 1;
310
311 start = vma->vm_pgoff;
312 end = start +
313 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
314
315 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
316
317 slot_start = pfn - (gfn - slot->base_gfn);
318 slot_end = slot_start + slot->npages;
319
320 if (start < slot_start)
321 start = slot_start;
322 if (end > slot_end)
323 end = slot_end;
324
325 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
326 MAS1_TSIZE_SHIFT;
327
328 /*
329 * e500 doesn't implement the lowest tsize bit,
330 * or 1K pages.
331 */
332 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
333
334 /*
335 * Now find the largest tsize (up to what the guest
336 * requested) that will cover gfn, stay within the
337 * range, and for which gfn and pfn are mutually
338 * aligned.
339 */
340
341 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
342 unsigned long gfn_start, gfn_end, tsize_pages;
343 tsize_pages = 1 << (tsize - 2);
344
345 gfn_start = gfn & ~(tsize_pages - 1);
346 gfn_end = gfn_start + tsize_pages;
347
348 if (gfn_start + pfn - gfn < start)
349 continue;
350 if (gfn_end + pfn - gfn > end)
351 continue;
352 if ((gfn & (tsize_pages - 1)) !=
353 (pfn & (tsize_pages - 1)))
354 continue;
355
356 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
357 pfn &= ~(tsize_pages - 1);
358 break;
359 }
360 }
361
362 up_read(&current->mm->mmap_sem);
363 }
364
365 if (likely(!pfnmap)) {
366 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
367 if (is_error_pfn(pfn)) {
368 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
369 (long)gfn);
370 kvm_release_pfn_clean(pfn);
371 return;
372 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600373 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600374
375 /* Drop reference to old page. */
376 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
377
Scott Wood9973d542011-06-14 18:34:39 -0500378 /* Force TS=1 IPROT=0 for all guest mappings. */
379 stlbe->mas1 = MAS1_TSIZE(tsize)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600380 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
381 stlbe->mas2 = (gvaddr & MAS2_EPN)
382 | e500_shadow_mas2_attrib(gtlbe->mas2,
Alexander Graf666e7252010-07-29 14:47:43 +0200383 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
Scott Wood59c1f4e2011-06-14 18:34:37 -0500384 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600385 | e500_shadow_mas3_attrib(gtlbe->mas3,
Alexander Graf666e7252010-07-29 14:47:43 +0200386 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
Scott Wood59c1f4e2011-06-14 18:34:37 -0500387 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600388
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300389 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
390 stlbe->mas3, stlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600391}
392
393/* XXX only map the one-one case, for now use TLB0 */
394static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
395 int tlbsel, int esel)
396{
397 struct tlbe *gtlbe;
398
399 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
400
401 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
402 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
403 gtlbe, tlbsel, esel);
404
405 return esel;
406}
407
408/* Caller must ensure that the specified guest TLB entry is safe to insert into
409 * the shadow TLB. */
410/* XXX for both one-one and one-to-many , for now use TLB1 */
411static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
412 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
413{
414 unsigned int victim;
415
416 victim = vcpu_e500->guest_tlb_nv[1]++;
417
418 if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
419 vcpu_e500->guest_tlb_nv[1] = 0;
420
421 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
422
423 return victim;
424}
425
426/* Invalidate all guest kernel mappings when enter usermode,
427 * so that when they fault back in they will get the
428 * proper permission bits. */
429void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
430{
431 if (usermode) {
432 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
433 int i;
434
435 /* XXX Replace loop with fancy data structures. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600436 for (i = 0; i < tlb1_max_shadow_size(); i++)
437 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
438
Liu Yu9aa4dd52009-01-14 10:47:38 -0600439 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600440 }
441}
442
443static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
444 int tlbsel, int esel)
445{
446 struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
447
448 if (unlikely(get_tlb_iprot(gtlbe)))
449 return -1;
450
451 if (tlbsel == 1) {
452 kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
453 get_tlb_end(gtlbe),
454 get_tlb_tid(gtlbe));
455 } else {
456 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
457 }
458
459 gtlbe->mas1 = 0;
460
461 return 0;
462}
463
Liu Yub0a18352009-02-17 16:52:08 +0800464int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
465{
466 int esel;
467
468 if (value & MMUCSR0_TLB0FI)
469 for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
470 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
471 if (value & MMUCSR0_TLB1FI)
472 for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
473 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
474
475 _tlbil_all();
476
477 return EMULATE_DONE;
478}
479
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600480int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
481{
482 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
483 unsigned int ia;
484 int esel, tlbsel;
485 gva_t ea;
486
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100487 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600488
489 ia = (ea >> 2) & 0x1;
490
Liu Yufb2838d2009-01-14 10:47:37 -0600491 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600492 tlbsel = (ea >> 3) & 0x1;
493
494 if (ia) {
495 /* invalidate all entries */
496 for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
497 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
498 } else {
499 ea &= 0xfffff000;
500 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
501 get_cur_pid(vcpu), -1);
502 if (esel >= 0)
503 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
504 }
505
Liu Yu9aa4dd52009-01-14 10:47:38 -0600506 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600507
508 return EMULATE_DONE;
509}
510
511int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
512{
513 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
514 int tlbsel, esel;
515 struct tlbe *gtlbe;
516
517 tlbsel = get_tlb_tlbsel(vcpu_e500);
518 esel = get_tlb_esel(vcpu_e500, tlbsel);
519
520 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
Liu Yubc35cbc2009-03-17 16:57:45 +0800521 vcpu_e500->mas0 &= ~MAS0_NV(~0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600522 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
523 vcpu_e500->mas1 = gtlbe->mas1;
524 vcpu_e500->mas2 = gtlbe->mas2;
525 vcpu_e500->mas3 = gtlbe->mas3;
526 vcpu_e500->mas7 = gtlbe->mas7;
527
528 return EMULATE_DONE;
529}
530
531int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
532{
533 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
534 int as = !!get_cur_sas(vcpu_e500);
535 unsigned int pid = get_cur_spid(vcpu_e500);
536 int esel, tlbsel;
537 struct tlbe *gtlbe = NULL;
538 gva_t ea;
539
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100540 ea = kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600541
542 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
543 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
544 if (esel >= 0) {
545 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
546 break;
547 }
548 }
549
550 if (gtlbe) {
551 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
552 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
553 vcpu_e500->mas1 = gtlbe->mas1;
554 vcpu_e500->mas2 = gtlbe->mas2;
555 vcpu_e500->mas3 = gtlbe->mas3;
556 vcpu_e500->mas7 = gtlbe->mas7;
557 } else {
558 int victim;
559
Liu Yufb2838d2009-01-14 10:47:37 -0600560 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600561 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
562 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
563
564 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
565 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
566 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
567 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
568 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
569 vcpu_e500->mas2 &= MAS2_EPN;
570 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
571 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
572 vcpu_e500->mas7 = 0;
573 }
574
Scott Wood49ea0692011-03-28 15:01:24 -0500575 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600576 return EMULATE_DONE;
577}
578
579int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
580{
581 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
582 u64 eaddr;
583 u64 raddr;
584 u32 tid;
585 struct tlbe *gtlbe;
586 int tlbsel, esel, stlbsel, sesel;
587
588 tlbsel = get_tlb_tlbsel(vcpu_e500);
589 esel = get_tlb_esel(vcpu_e500, tlbsel);
590
591 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
592
593 if (get_tlb_v(gtlbe) && tlbsel == 1) {
594 eaddr = get_tlb_eaddr(gtlbe);
595 tid = get_tlb_tid(gtlbe);
596 kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
597 get_tlb_end(gtlbe), tid);
598 }
599
600 gtlbe->mas1 = vcpu_e500->mas1;
601 gtlbe->mas2 = vcpu_e500->mas2;
602 gtlbe->mas3 = vcpu_e500->mas3;
603 gtlbe->mas7 = vcpu_e500->mas7;
604
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300605 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
606 gtlbe->mas3, gtlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600607
608 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
609 if (tlbe_is_host_safe(vcpu, gtlbe)) {
610 switch (tlbsel) {
611 case 0:
612 /* TLB0 */
613 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
Liu Yu0cfb50e2009-06-05 14:54:29 +0800614 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600615
616 stlbsel = 0;
617 sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
618
619 break;
620
621 case 1:
622 /* TLB1 */
623 eaddr = get_tlb_eaddr(gtlbe);
624 raddr = get_tlb_raddr(gtlbe);
625
626 /* Create a 4KB mapping on the host.
627 * If the guest wanted a large page,
628 * only the first 4KB is mapped here and the rest
629 * are mapped on the fly. */
630 stlbsel = 1;
631 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
632 raddr >> PAGE_SHIFT, gtlbe);
633 break;
634
635 default:
636 BUG();
637 }
638 write_host_tlbe(vcpu_e500, stlbsel, sesel);
639 }
640
Scott Wood49ea0692011-03-28 15:01:24 -0500641 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600642 return EMULATE_DONE;
643}
644
645int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
646{
Alexander Graf666e7252010-07-29 14:47:43 +0200647 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600648
649 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
650}
651
652int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
653{
Alexander Graf666e7252010-07-29 14:47:43 +0200654 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600655
656 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
657}
658
659void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
660{
Alexander Graf666e7252010-07-29 14:47:43 +0200661 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600662
663 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
664}
665
666void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
667{
Alexander Graf666e7252010-07-29 14:47:43 +0200668 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600669
670 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
671}
672
673gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
674 gva_t eaddr)
675{
676 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
677 struct tlbe *gtlbe =
678 &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
679 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
680
681 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
682}
683
684void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
685{
686 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
687 int tlbsel, i;
688
689 for (tlbsel = 0; tlbsel < 2; tlbsel++)
690 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
691 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
692
693 /* discard all guest mapping */
Liu Yu9aa4dd52009-01-14 10:47:38 -0600694 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600695}
696
697void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
698 unsigned int index)
699{
700 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
701 int tlbsel = tlbsel_of(index);
702 int esel = esel_of(index);
703 int stlbsel, sesel;
704
705 switch (tlbsel) {
706 case 0:
707 stlbsel = 0;
708 sesel = esel;
709 break;
710
711 case 1: {
712 gfn_t gfn = gpaddr >> PAGE_SHIFT;
713 struct tlbe *gtlbe
714 = &vcpu_e500->guest_tlb[tlbsel][esel];
715
716 stlbsel = 1;
717 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
718 break;
719 }
720
721 default:
722 BUG();
723 break;
724 }
725 write_host_tlbe(vcpu_e500, stlbsel, sesel);
726}
727
728int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
729 gva_t eaddr, unsigned int pid, int as)
730{
731 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
732 int esel, tlbsel;
733
734 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
735 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
736 if (esel >= 0)
737 return index_of(tlbsel, esel);
738 }
739
740 return -1;
741}
742
Scott Wood5ce941e2011-04-27 17:24:21 -0500743void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
744{
745 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
746
747 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
748 vcpu->arch.pid = pid;
749}
750
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600751void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
752{
753 struct tlbe *tlbe;
754
755 /* Insert large initial mapping for guest. */
756 tlbe = &vcpu_e500->guest_tlb[1][0];
Liu Yu0cfb50e2009-06-05 14:54:29 +0800757 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600758 tlbe->mas2 = 0;
759 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
760 tlbe->mas7 = 0;
761
762 /* 4K map for serial output. Used by kernel wrapper. */
763 tlbe = &vcpu_e500->guest_tlb[1][1];
Liu Yu0cfb50e2009-06-05 14:54:29 +0800764 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600765 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
766 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
767 tlbe->mas7 = 0;
768}
769
770int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
771{
772 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
773
774 vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
775 vcpu_e500->guest_tlb[0] =
776 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
777 if (vcpu_e500->guest_tlb[0] == NULL)
778 goto err_out;
779
780 vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
781 vcpu_e500->shadow_tlb[0] =
782 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
783 if (vcpu_e500->shadow_tlb[0] == NULL)
784 goto err_out_guest0;
785
786 vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
787 vcpu_e500->guest_tlb[1] =
788 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
789 if (vcpu_e500->guest_tlb[1] == NULL)
790 goto err_out_shadow0;
791
792 vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
793 vcpu_e500->shadow_tlb[1] =
794 kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
795 if (vcpu_e500->shadow_tlb[1] == NULL)
796 goto err_out_guest1;
797
Liu Yuda15bf42010-01-22 19:36:53 +0800798 /* Init TLB configuration register */
799 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
800 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
801 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
802 vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
803
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600804 return 0;
805
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600806err_out_guest1:
807 kfree(vcpu_e500->guest_tlb[1]);
808err_out_shadow0:
809 kfree(vcpu_e500->shadow_tlb[0]);
810err_out_guest0:
811 kfree(vcpu_e500->guest_tlb[0]);
812err_out:
813 return -1;
814}
815
816void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
817{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600818 kfree(vcpu_e500->shadow_tlb[1]);
819 kfree(vcpu_e500->guest_tlb[1]);
820 kfree(vcpu_e500->shadow_tlb[0]);
821 kfree(vcpu_e500->guest_tlb[0]);
822}