blob: f1b37e0de86cd041f58979e4986183a30d7040ca [file] [log] [blame]
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001/*
Scott Wood49ea0692011-03-28 15:01:24 -05002 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06003 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
15#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090016#include <linux/slab.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060017#include <linux/string.h>
18#include <linux/kvm.h>
19#include <linux/kvm_host.h>
20#include <linux/highmem.h>
21#include <asm/kvm_ppc.h>
22#include <asm/kvm_e500.h>
23
Liu Yu9aa4dd52009-01-14 10:47:38 -060024#include "../mm/mmu_decl.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060025#include "e500_tlb.h"
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030026#include "trace.h"
Scott Wood49ea0692011-03-28 15:01:24 -050027#include "timing.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060028
29#define to_htlb1_esel(esel) (tlb1_entry_num - (esel) - 1)
30
31static unsigned int tlb1_entry_num;
32
33void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
34{
35 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
36 struct tlbe *tlbe;
37 int i, tlbsel;
38
39 printk("| %8s | %8s | %8s | %8s | %8s |\n",
40 "nr", "mas1", "mas2", "mas3", "mas7");
41
42 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
43 printk("Guest TLB%d:\n", tlbsel);
44 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
45 tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
46 if (tlbe->mas1 & MAS1_VALID)
47 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
48 tlbsel, i, tlbe->mas1, tlbe->mas2,
49 tlbe->mas3, tlbe->mas7);
50 }
51 }
52
53 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
54 printk("Shadow TLB%d:\n", tlbsel);
55 for (i = 0; i < vcpu_e500->shadow_tlb_size[tlbsel]; i++) {
56 tlbe = &vcpu_e500->shadow_tlb[tlbsel][i];
57 if (tlbe->mas1 & MAS1_VALID)
58 printk(" S[%d][%3d] | %08X | %08X | %08X | %08X |\n",
59 tlbsel, i, tlbe->mas1, tlbe->mas2,
60 tlbe->mas3, tlbe->mas7);
61 }
62 }
63}
64
65static inline unsigned int tlb0_get_next_victim(
66 struct kvmppc_vcpu_e500 *vcpu_e500)
67{
68 unsigned int victim;
69
70 victim = vcpu_e500->guest_tlb_nv[0]++;
71 if (unlikely(vcpu_e500->guest_tlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
72 vcpu_e500->guest_tlb_nv[0] = 0;
73
74 return victim;
75}
76
77static inline unsigned int tlb1_max_shadow_size(void)
78{
79 return tlb1_entry_num - tlbcam_index;
80}
81
82static inline int tlbe_is_writable(struct tlbe *tlbe)
83{
84 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
85}
86
87static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
88{
89 /* Mask off reserved bits. */
90 mas3 &= MAS3_ATTRIB_MASK;
91
92 if (!usermode) {
93 /* Guest is in supervisor mode,
94 * so we need to translate guest
95 * supervisor permissions into user permissions. */
96 mas3 &= ~E500_TLB_USER_PERM_MASK;
97 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
98 }
99
100 return mas3 | E500_TLB_SUPER_PERM_MASK;
101}
102
103static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
104{
Liu Yu046a48b2009-03-17 16:57:46 +0800105#ifdef CONFIG_SMP
106 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
107#else
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600108 return mas2 & MAS2_ATTRIB_MASK;
Liu Yu046a48b2009-03-17 16:57:46 +0800109#endif
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600110}
111
112/*
113 * writing shadow tlb entry to host TLB
114 */
Scott Wood0ef3099562011-06-14 18:34:35 -0500115static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600116{
Scott Wood0ef3099562011-06-14 18:34:35 -0500117 unsigned long flags;
118
119 local_irq_save(flags);
120 mtspr(SPRN_MAS0, mas0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600121 mtspr(SPRN_MAS1, stlbe->mas1);
122 mtspr(SPRN_MAS2, stlbe->mas2);
123 mtspr(SPRN_MAS3, stlbe->mas3);
124 mtspr(SPRN_MAS7, stlbe->mas7);
Scott Wood0ef3099562011-06-14 18:34:35 -0500125 asm volatile("isync; tlbwe" : : : "memory");
126 local_irq_restore(flags);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600127}
128
129static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
130 int tlbsel, int esel)
131{
132 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
133
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600134 if (tlbsel == 0) {
Scott Wood0ef3099562011-06-14 18:34:35 -0500135 __write_host_tlbe(stlbe,
136 MAS0_TLBSEL(0) |
137 MAS0_ESEL(esel & (KVM_E500_TLB0_WAY_NUM - 1)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600138 } else {
Scott Wood0ef3099562011-06-14 18:34:35 -0500139 __write_host_tlbe(stlbe,
140 MAS0_TLBSEL(1) |
141 MAS0_ESEL(to_htlb1_esel(esel)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600142 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600143}
144
145void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
146{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600147}
148
149void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
150{
Liu Yu9aa4dd52009-01-14 10:47:38 -0600151 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600152}
153
154/* Search the guest TLB for a matching entry. */
155static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
156 gva_t eaddr, int tlbsel, unsigned int pid, int as)
157{
158 int i;
159
160 /* XXX Replace loop with fancy data structures. */
161 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++) {
162 struct tlbe *tlbe = &vcpu_e500->guest_tlb[tlbsel][i];
163 unsigned int tid;
164
165 if (eaddr < get_tlb_eaddr(tlbe))
166 continue;
167
168 if (eaddr > get_tlb_end(tlbe))
169 continue;
170
171 tid = get_tlb_tid(tlbe);
172 if (tid && (tid != pid))
173 continue;
174
175 if (!get_tlb_v(tlbe))
176 continue;
177
178 if (get_tlb_ts(tlbe) != as && as != -1)
179 continue;
180
181 return i;
182 }
183
184 return -1;
185}
186
187static void kvmppc_e500_shadow_release(struct kvmppc_vcpu_e500 *vcpu_e500,
188 int tlbsel, int esel)
189{
190 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
191 struct page *page = vcpu_e500->shadow_pages[tlbsel][esel];
192
193 if (page) {
194 vcpu_e500->shadow_pages[tlbsel][esel] = NULL;
195
196 if (get_tlb_v(stlbe)) {
197 if (tlbe_is_writable(stlbe))
198 kvm_release_page_dirty(page);
199 else
200 kvm_release_page_clean(page);
201 }
202 }
203}
204
205static void kvmppc_e500_stlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
206 int tlbsel, int esel)
207{
208 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
209
210 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
211 stlbe->mas1 = 0;
Kyle Moffett21e537b2010-08-30 11:38:39 -0400212 trace_kvm_stlb_inval(index_of(tlbsel, esel));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600213}
214
215static void kvmppc_e500_tlb1_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
216 gva_t eaddr, gva_t eend, u32 tid)
217{
218 unsigned int pid = tid & 0xff;
219 unsigned int i;
220
221 /* XXX Replace loop with fancy data structures. */
222 for (i = 0; i < vcpu_e500->guest_tlb_size[1]; i++) {
223 struct tlbe *stlbe = &vcpu_e500->shadow_tlb[1][i];
224 unsigned int tid;
225
226 if (!get_tlb_v(stlbe))
227 continue;
228
229 if (eend < get_tlb_eaddr(stlbe))
230 continue;
231
232 if (eaddr > get_tlb_end(stlbe))
233 continue;
234
235 tid = get_tlb_tid(stlbe);
236 if (tid && (tid != pid))
237 continue;
238
239 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
240 write_host_tlbe(vcpu_e500, 1, i);
241 }
242}
243
244static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
245 unsigned int eaddr, int as)
246{
247 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
248 unsigned int victim, pidsel, tsized;
249 int tlbsel;
250
Liu Yufb2838d2009-01-14 10:47:37 -0600251 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600252 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
253 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
254 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
Liu Yu0cfb50e2009-06-05 14:54:29 +0800255 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600256
257 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
258 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
259 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
260 | MAS1_TID(vcpu_e500->pid[pidsel])
261 | MAS1_TSIZE(tsized);
262 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
263 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
264 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
265 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
266 | (get_cur_pid(vcpu) << 16)
267 | (as ? MAS6_SAS : 0);
268 vcpu_e500->mas7 = 0;
269}
270
271static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
272 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int esel)
273{
274 struct page *new_page;
275 struct tlbe *stlbe;
276 hpa_t hpaddr;
277
278 stlbe = &vcpu_e500->shadow_tlb[tlbsel][esel];
279
280 /* Get reference to new page. */
281 new_page = gfn_to_page(vcpu_e500->vcpu.kvm, gfn);
282 if (is_error_page(new_page)) {
Alexander Graf344941b2010-08-31 03:45:39 +0200283 printk(KERN_ERR "Couldn't get guest page for gfn %lx!\n",
284 (long)gfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600285 kvm_release_page_clean(new_page);
286 return;
287 }
288 hpaddr = page_to_phys(new_page);
289
290 /* Drop reference to old page. */
291 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, esel);
292
293 vcpu_e500->shadow_pages[tlbsel][esel] = new_page;
294
295 /* Force TS=1 IPROT=0 TSIZE=4KB for all guest mappings. */
Liu Yu0cfb50e2009-06-05 14:54:29 +0800296 stlbe->mas1 = MAS1_TSIZE(BOOK3E_PAGESZ_4K)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600297 | MAS1_TID(get_tlb_tid(gtlbe)) | MAS1_TS | MAS1_VALID;
298 stlbe->mas2 = (gvaddr & MAS2_EPN)
299 | e500_shadow_mas2_attrib(gtlbe->mas2,
Alexander Graf666e7252010-07-29 14:47:43 +0200300 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600301 stlbe->mas3 = (hpaddr & MAS3_RPN)
302 | e500_shadow_mas3_attrib(gtlbe->mas3,
Alexander Graf666e7252010-07-29 14:47:43 +0200303 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600304 stlbe->mas7 = (hpaddr >> 32) & MAS7_RPN;
305
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300306 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
307 stlbe->mas3, stlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600308}
309
310/* XXX only map the one-one case, for now use TLB0 */
311static int kvmppc_e500_stlbe_map(struct kvmppc_vcpu_e500 *vcpu_e500,
312 int tlbsel, int esel)
313{
314 struct tlbe *gtlbe;
315
316 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
317
318 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
319 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
320 gtlbe, tlbsel, esel);
321
322 return esel;
323}
324
325/* Caller must ensure that the specified guest TLB entry is safe to insert into
326 * the shadow TLB. */
327/* XXX for both one-one and one-to-many , for now use TLB1 */
328static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
329 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe)
330{
331 unsigned int victim;
332
333 victim = vcpu_e500->guest_tlb_nv[1]++;
334
335 if (unlikely(vcpu_e500->guest_tlb_nv[1] >= tlb1_max_shadow_size()))
336 vcpu_e500->guest_tlb_nv[1] = 0;
337
338 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, victim);
339
340 return victim;
341}
342
343/* Invalidate all guest kernel mappings when enter usermode,
344 * so that when they fault back in they will get the
345 * proper permission bits. */
346void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode)
347{
348 if (usermode) {
349 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
350 int i;
351
352 /* XXX Replace loop with fancy data structures. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600353 for (i = 0; i < tlb1_max_shadow_size(); i++)
354 kvmppc_e500_stlbe_invalidate(vcpu_e500, 1, i);
355
Liu Yu9aa4dd52009-01-14 10:47:38 -0600356 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600357 }
358}
359
360static int kvmppc_e500_gtlbe_invalidate(struct kvmppc_vcpu_e500 *vcpu_e500,
361 int tlbsel, int esel)
362{
363 struct tlbe *gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
364
365 if (unlikely(get_tlb_iprot(gtlbe)))
366 return -1;
367
368 if (tlbsel == 1) {
369 kvmppc_e500_tlb1_invalidate(vcpu_e500, get_tlb_eaddr(gtlbe),
370 get_tlb_end(gtlbe),
371 get_tlb_tid(gtlbe));
372 } else {
373 kvmppc_e500_stlbe_invalidate(vcpu_e500, tlbsel, esel);
374 }
375
376 gtlbe->mas1 = 0;
377
378 return 0;
379}
380
Liu Yub0a18352009-02-17 16:52:08 +0800381int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
382{
383 int esel;
384
385 if (value & MMUCSR0_TLB0FI)
386 for (esel = 0; esel < vcpu_e500->guest_tlb_size[0]; esel++)
387 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
388 if (value & MMUCSR0_TLB1FI)
389 for (esel = 0; esel < vcpu_e500->guest_tlb_size[1]; esel++)
390 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
391
392 _tlbil_all();
393
394 return EMULATE_DONE;
395}
396
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600397int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
398{
399 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
400 unsigned int ia;
401 int esel, tlbsel;
402 gva_t ea;
403
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100404 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600405
406 ia = (ea >> 2) & 0x1;
407
Liu Yufb2838d2009-01-14 10:47:37 -0600408 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600409 tlbsel = (ea >> 3) & 0x1;
410
411 if (ia) {
412 /* invalidate all entries */
413 for (esel = 0; esel < vcpu_e500->guest_tlb_size[tlbsel]; esel++)
414 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
415 } else {
416 ea &= 0xfffff000;
417 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
418 get_cur_pid(vcpu), -1);
419 if (esel >= 0)
420 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
421 }
422
Liu Yu9aa4dd52009-01-14 10:47:38 -0600423 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600424
425 return EMULATE_DONE;
426}
427
428int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
429{
430 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
431 int tlbsel, esel;
432 struct tlbe *gtlbe;
433
434 tlbsel = get_tlb_tlbsel(vcpu_e500);
435 esel = get_tlb_esel(vcpu_e500, tlbsel);
436
437 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
Liu Yubc35cbc2009-03-17 16:57:45 +0800438 vcpu_e500->mas0 &= ~MAS0_NV(~0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600439 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
440 vcpu_e500->mas1 = gtlbe->mas1;
441 vcpu_e500->mas2 = gtlbe->mas2;
442 vcpu_e500->mas3 = gtlbe->mas3;
443 vcpu_e500->mas7 = gtlbe->mas7;
444
445 return EMULATE_DONE;
446}
447
448int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
449{
450 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
451 int as = !!get_cur_sas(vcpu_e500);
452 unsigned int pid = get_cur_spid(vcpu_e500);
453 int esel, tlbsel;
454 struct tlbe *gtlbe = NULL;
455 gva_t ea;
456
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100457 ea = kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600458
459 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
460 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
461 if (esel >= 0) {
462 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
463 break;
464 }
465 }
466
467 if (gtlbe) {
468 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
469 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
470 vcpu_e500->mas1 = gtlbe->mas1;
471 vcpu_e500->mas2 = gtlbe->mas2;
472 vcpu_e500->mas3 = gtlbe->mas3;
473 vcpu_e500->mas7 = gtlbe->mas7;
474 } else {
475 int victim;
476
Liu Yufb2838d2009-01-14 10:47:37 -0600477 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600478 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
479 victim = (tlbsel == 0) ? tlb0_get_next_victim(vcpu_e500) : 0;
480
481 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
482 | MAS0_NV(vcpu_e500->guest_tlb_nv[tlbsel]);
483 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
484 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
485 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
486 vcpu_e500->mas2 &= MAS2_EPN;
487 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
488 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
489 vcpu_e500->mas7 = 0;
490 }
491
Scott Wood49ea0692011-03-28 15:01:24 -0500492 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600493 return EMULATE_DONE;
494}
495
496int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
497{
498 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
499 u64 eaddr;
500 u64 raddr;
501 u32 tid;
502 struct tlbe *gtlbe;
503 int tlbsel, esel, stlbsel, sesel;
504
505 tlbsel = get_tlb_tlbsel(vcpu_e500);
506 esel = get_tlb_esel(vcpu_e500, tlbsel);
507
508 gtlbe = &vcpu_e500->guest_tlb[tlbsel][esel];
509
510 if (get_tlb_v(gtlbe) && tlbsel == 1) {
511 eaddr = get_tlb_eaddr(gtlbe);
512 tid = get_tlb_tid(gtlbe);
513 kvmppc_e500_tlb1_invalidate(vcpu_e500, eaddr,
514 get_tlb_end(gtlbe), tid);
515 }
516
517 gtlbe->mas1 = vcpu_e500->mas1;
518 gtlbe->mas2 = vcpu_e500->mas2;
519 gtlbe->mas3 = vcpu_e500->mas3;
520 gtlbe->mas7 = vcpu_e500->mas7;
521
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300522 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
523 gtlbe->mas3, gtlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600524
525 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
526 if (tlbe_is_host_safe(vcpu, gtlbe)) {
527 switch (tlbsel) {
528 case 0:
529 /* TLB0 */
530 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
Liu Yu0cfb50e2009-06-05 14:54:29 +0800531 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600532
533 stlbsel = 0;
534 sesel = kvmppc_e500_stlbe_map(vcpu_e500, 0, esel);
535
536 break;
537
538 case 1:
539 /* TLB1 */
540 eaddr = get_tlb_eaddr(gtlbe);
541 raddr = get_tlb_raddr(gtlbe);
542
543 /* Create a 4KB mapping on the host.
544 * If the guest wanted a large page,
545 * only the first 4KB is mapped here and the rest
546 * are mapped on the fly. */
547 stlbsel = 1;
548 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
549 raddr >> PAGE_SHIFT, gtlbe);
550 break;
551
552 default:
553 BUG();
554 }
555 write_host_tlbe(vcpu_e500, stlbsel, sesel);
556 }
557
Scott Wood49ea0692011-03-28 15:01:24 -0500558 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600559 return EMULATE_DONE;
560}
561
562int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
563{
Alexander Graf666e7252010-07-29 14:47:43 +0200564 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600565
566 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
567}
568
569int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
570{
Alexander Graf666e7252010-07-29 14:47:43 +0200571 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600572
573 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
574}
575
576void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
577{
Alexander Graf666e7252010-07-29 14:47:43 +0200578 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600579
580 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
581}
582
583void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
584{
Alexander Graf666e7252010-07-29 14:47:43 +0200585 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600586
587 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
588}
589
590gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
591 gva_t eaddr)
592{
593 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
594 struct tlbe *gtlbe =
595 &vcpu_e500->guest_tlb[tlbsel_of(index)][esel_of(index)];
596 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
597
598 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
599}
600
601void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
602{
603 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
604 int tlbsel, i;
605
606 for (tlbsel = 0; tlbsel < 2; tlbsel++)
607 for (i = 0; i < vcpu_e500->guest_tlb_size[tlbsel]; i++)
608 kvmppc_e500_shadow_release(vcpu_e500, tlbsel, i);
609
610 /* discard all guest mapping */
Liu Yu9aa4dd52009-01-14 10:47:38 -0600611 _tlbil_all();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600612}
613
614void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
615 unsigned int index)
616{
617 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
618 int tlbsel = tlbsel_of(index);
619 int esel = esel_of(index);
620 int stlbsel, sesel;
621
622 switch (tlbsel) {
623 case 0:
624 stlbsel = 0;
625 sesel = esel;
626 break;
627
628 case 1: {
629 gfn_t gfn = gpaddr >> PAGE_SHIFT;
630 struct tlbe *gtlbe
631 = &vcpu_e500->guest_tlb[tlbsel][esel];
632
633 stlbsel = 1;
634 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe);
635 break;
636 }
637
638 default:
639 BUG();
640 break;
641 }
642 write_host_tlbe(vcpu_e500, stlbsel, sesel);
643}
644
645int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
646 gva_t eaddr, unsigned int pid, int as)
647{
648 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
649 int esel, tlbsel;
650
651 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
652 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
653 if (esel >= 0)
654 return index_of(tlbsel, esel);
655 }
656
657 return -1;
658}
659
Scott Wood5ce941e2011-04-27 17:24:21 -0500660void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
661{
662 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
663
664 vcpu_e500->pid[0] = vcpu->arch.shadow_pid =
665 vcpu->arch.pid = pid;
666}
667
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600668void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
669{
670 struct tlbe *tlbe;
671
672 /* Insert large initial mapping for guest. */
673 tlbe = &vcpu_e500->guest_tlb[1][0];
Liu Yu0cfb50e2009-06-05 14:54:29 +0800674 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600675 tlbe->mas2 = 0;
676 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
677 tlbe->mas7 = 0;
678
679 /* 4K map for serial output. Used by kernel wrapper. */
680 tlbe = &vcpu_e500->guest_tlb[1][1];
Liu Yu0cfb50e2009-06-05 14:54:29 +0800681 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600682 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
683 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
684 tlbe->mas7 = 0;
685}
686
687int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
688{
689 tlb1_entry_num = mfspr(SPRN_TLB1CFG) & 0xFFF;
690
691 vcpu_e500->guest_tlb_size[0] = KVM_E500_TLB0_SIZE;
692 vcpu_e500->guest_tlb[0] =
693 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
694 if (vcpu_e500->guest_tlb[0] == NULL)
695 goto err_out;
696
697 vcpu_e500->shadow_tlb_size[0] = KVM_E500_TLB0_SIZE;
698 vcpu_e500->shadow_tlb[0] =
699 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
700 if (vcpu_e500->shadow_tlb[0] == NULL)
701 goto err_out_guest0;
702
703 vcpu_e500->guest_tlb_size[1] = KVM_E500_TLB1_SIZE;
704 vcpu_e500->guest_tlb[1] =
705 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
706 if (vcpu_e500->guest_tlb[1] == NULL)
707 goto err_out_shadow0;
708
709 vcpu_e500->shadow_tlb_size[1] = tlb1_entry_num;
710 vcpu_e500->shadow_tlb[1] =
711 kzalloc(sizeof(struct tlbe) * tlb1_entry_num, GFP_KERNEL);
712 if (vcpu_e500->shadow_tlb[1] == NULL)
713 goto err_out_guest1;
714
715 vcpu_e500->shadow_pages[0] = (struct page **)
716 kzalloc(sizeof(struct page *) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
717 if (vcpu_e500->shadow_pages[0] == NULL)
718 goto err_out_shadow1;
719
720 vcpu_e500->shadow_pages[1] = (struct page **)
721 kzalloc(sizeof(struct page *) * tlb1_entry_num, GFP_KERNEL);
722 if (vcpu_e500->shadow_pages[1] == NULL)
723 goto err_out_page0;
724
Liu Yuda15bf42010-01-22 19:36:53 +0800725 /* Init TLB configuration register */
726 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
727 vcpu_e500->tlb0cfg |= vcpu_e500->guest_tlb_size[0];
728 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
729 vcpu_e500->tlb1cfg |= vcpu_e500->guest_tlb_size[1];
730
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600731 return 0;
732
733err_out_page0:
734 kfree(vcpu_e500->shadow_pages[0]);
735err_out_shadow1:
736 kfree(vcpu_e500->shadow_tlb[1]);
737err_out_guest1:
738 kfree(vcpu_e500->guest_tlb[1]);
739err_out_shadow0:
740 kfree(vcpu_e500->shadow_tlb[0]);
741err_out_guest0:
742 kfree(vcpu_e500->guest_tlb[0]);
743err_out:
744 return -1;
745}
746
747void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
748{
749 kfree(vcpu_e500->shadow_pages[1]);
750 kfree(vcpu_e500->shadow_pages[0]);
751 kfree(vcpu_e500->shadow_tlb[1]);
752 kfree(vcpu_e500->guest_tlb[1]);
753 kfree(vcpu_e500->shadow_tlb[0]);
754 kfree(vcpu_e500->guest_tlb[0]);
755}