blob: 59221bb1e00e71ac7e900373faec021171b968ec [file] [log] [blame]
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001/*
Scott Wood49ea0692011-03-28 15:01:24 -05002 * Copyright (C) 2008-2011 Freescale Semiconductor, Inc. All rights reserved.
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06003 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 *
6 * Description:
7 * This file is based on arch/powerpc/kvm/44x_tlb.c,
8 * by Hollis Blanchard <hollisb@us.ibm.com>.
9 *
10 * This program is free software; you can redistribute it and/or modify
11 * it under the terms of the GNU General Public License, version 2, as
12 * published by the Free Software Foundation.
13 */
14
Scott Wood0164c0f2011-08-18 15:25:18 -050015#include <linux/kernel.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060016#include <linux/types.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090017#include <linux/slab.h>
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060018#include <linux/string.h>
19#include <linux/kvm.h>
20#include <linux/kvm_host.h>
21#include <linux/highmem.h>
22#include <asm/kvm_ppc.h>
23#include <asm/kvm_e500.h>
24
Liu Yu9aa4dd52009-01-14 10:47:38 -060025#include "../mm/mmu_decl.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060026#include "e500_tlb.h"
Marcelo Tosatti46f43c62009-06-18 11:47:27 -030027#include "trace.h"
Scott Wood49ea0692011-03-28 15:01:24 -050028#include "timing.h"
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060029
Scott Wood0164c0f2011-08-18 15:25:18 -050030#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060031
Liu Yudd9ebf1f2011-06-14 18:35:14 -050032struct id {
33 unsigned long val;
34 struct id **pentry;
35};
36
37#define NUM_TIDS 256
38
39/*
40 * This table provide mappings from:
41 * (guestAS,guestTID,guestPR) --> ID of physical cpu
42 * guestAS [0..1]
43 * guestTID [0..255]
44 * guestPR [0..1]
45 * ID [1..255]
46 * Each vcpu keeps one vcpu_id_table.
47 */
48struct vcpu_id_table {
49 struct id id[2][NUM_TIDS][2];
50};
51
52/*
53 * This table provide reversed mappings of vcpu_id_table:
54 * ID --> address of vcpu_id_table item.
55 * Each physical core has one pcpu_id_table.
56 */
57struct pcpu_id_table {
58 struct id *entry[NUM_TIDS];
59};
60
61static DEFINE_PER_CPU(struct pcpu_id_table, pcpu_sids);
62
63/* This variable keeps last used shadow ID on local core.
64 * The valid range of shadow ID is [1..255] */
65static DEFINE_PER_CPU(unsigned long, pcpu_last_used_sid);
66
Scott Wood0164c0f2011-08-18 15:25:18 -050067static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -060068
Liu Yudd9ebf1f2011-06-14 18:35:14 -050069/*
70 * Allocate a free shadow id and setup a valid sid mapping in given entry.
71 * A mapping is only valid when vcpu_id_table and pcpu_id_table are match.
72 *
73 * The caller must have preemption disabled, and keep it that way until
74 * it has finished with the returned shadow id (either written into the
75 * TLB or arch.shadow_pid, or discarded).
76 */
77static inline int local_sid_setup_one(struct id *entry)
78{
79 unsigned long sid;
80 int ret = -1;
81
82 sid = ++(__get_cpu_var(pcpu_last_used_sid));
83 if (sid < NUM_TIDS) {
84 __get_cpu_var(pcpu_sids).entry[sid] = entry;
85 entry->val = sid;
86 entry->pentry = &__get_cpu_var(pcpu_sids).entry[sid];
87 ret = sid;
88 }
89
90 /*
91 * If sid == NUM_TIDS, we've run out of sids. We return -1, and
92 * the caller will invalidate everything and start over.
93 *
94 * sid > NUM_TIDS indicates a race, which we disable preemption to
95 * avoid.
96 */
97 WARN_ON(sid > NUM_TIDS);
98
99 return ret;
100}
101
102/*
103 * Check if given entry contain a valid shadow id mapping.
104 * An ID mapping is considered valid only if
105 * both vcpu and pcpu know this mapping.
106 *
107 * The caller must have preemption disabled, and keep it that way until
108 * it has finished with the returned shadow id (either written into the
109 * TLB or arch.shadow_pid, or discarded).
110 */
111static inline int local_sid_lookup(struct id *entry)
112{
113 if (entry && entry->val != 0 &&
114 __get_cpu_var(pcpu_sids).entry[entry->val] == entry &&
115 entry->pentry == &__get_cpu_var(pcpu_sids).entry[entry->val])
116 return entry->val;
117 return -1;
118}
119
Scott Wood90b92a62011-08-18 15:25:16 -0500120/* Invalidate all id mappings on local core -- call with preempt disabled */
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500121static inline void local_sid_destroy_all(void)
122{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500123 __get_cpu_var(pcpu_last_used_sid) = 0;
124 memset(&__get_cpu_var(pcpu_sids), 0, sizeof(__get_cpu_var(pcpu_sids)));
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500125}
126
127static void *kvmppc_e500_id_table_alloc(struct kvmppc_vcpu_e500 *vcpu_e500)
128{
129 vcpu_e500->idt = kzalloc(sizeof(struct vcpu_id_table), GFP_KERNEL);
130 return vcpu_e500->idt;
131}
132
133static void kvmppc_e500_id_table_free(struct kvmppc_vcpu_e500 *vcpu_e500)
134{
135 kfree(vcpu_e500->idt);
136}
137
138/* Invalidate all mappings on vcpu */
139static void kvmppc_e500_id_table_reset_all(struct kvmppc_vcpu_e500 *vcpu_e500)
140{
141 memset(vcpu_e500->idt, 0, sizeof(struct vcpu_id_table));
142
143 /* Update shadow pid when mappings are changed */
144 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
145}
146
147/* Invalidate one ID mapping on vcpu */
148static inline void kvmppc_e500_id_table_reset_one(
149 struct kvmppc_vcpu_e500 *vcpu_e500,
150 int as, int pid, int pr)
151{
152 struct vcpu_id_table *idt = vcpu_e500->idt;
153
154 BUG_ON(as >= 2);
155 BUG_ON(pid >= NUM_TIDS);
156 BUG_ON(pr >= 2);
157
158 idt->id[as][pid][pr].val = 0;
159 idt->id[as][pid][pr].pentry = NULL;
160
161 /* Update shadow pid when mappings are changed */
162 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
163}
164
165/*
166 * Map guest (vcpu,AS,ID,PR) to physical core shadow id.
167 * This function first lookup if a valid mapping exists,
168 * if not, then creates a new one.
169 *
170 * The caller must have preemption disabled, and keep it that way until
171 * it has finished with the returned shadow id (either written into the
172 * TLB or arch.shadow_pid, or discarded).
173 */
174static unsigned int kvmppc_e500_get_sid(struct kvmppc_vcpu_e500 *vcpu_e500,
175 unsigned int as, unsigned int gid,
176 unsigned int pr, int avoid_recursion)
177{
178 struct vcpu_id_table *idt = vcpu_e500->idt;
179 int sid;
180
181 BUG_ON(as >= 2);
182 BUG_ON(gid >= NUM_TIDS);
183 BUG_ON(pr >= 2);
184
185 sid = local_sid_lookup(&idt->id[as][gid][pr]);
186
187 while (sid <= 0) {
188 /* No mapping yet */
189 sid = local_sid_setup_one(&idt->id[as][gid][pr]);
190 if (sid <= 0) {
191 _tlbil_all();
192 local_sid_destroy_all();
193 }
194
195 /* Update shadow pid when mappings are changed */
196 if (!avoid_recursion)
197 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
198 }
199
200 return sid;
201}
202
203/* Map guest pid to shadow.
204 * We use PID to keep shadow of current guest non-zero PID,
205 * and use PID1 to keep shadow of guest zero PID.
206 * So that guest tlbe with TID=0 can be accessed at any time */
207void kvmppc_e500_recalc_shadow_pid(struct kvmppc_vcpu_e500 *vcpu_e500)
208{
209 preempt_disable();
210 vcpu_e500->vcpu.arch.shadow_pid = kvmppc_e500_get_sid(vcpu_e500,
211 get_cur_as(&vcpu_e500->vcpu),
212 get_cur_pid(&vcpu_e500->vcpu),
213 get_cur_pr(&vcpu_e500->vcpu), 1);
214 vcpu_e500->vcpu.arch.shadow_pid1 = kvmppc_e500_get_sid(vcpu_e500,
215 get_cur_as(&vcpu_e500->vcpu), 0,
216 get_cur_pr(&vcpu_e500->vcpu), 1);
217 preempt_enable();
218}
219
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600220void kvmppc_dump_tlbs(struct kvm_vcpu *vcpu)
221{
222 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
223 struct tlbe *tlbe;
224 int i, tlbsel;
225
226 printk("| %8s | %8s | %8s | %8s | %8s |\n",
227 "nr", "mas1", "mas2", "mas3", "mas7");
228
229 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
230 printk("Guest TLB%d:\n", tlbsel);
Liu Yu08b7fa92011-06-14 18:34:59 -0500231 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
232 tlbe = &vcpu_e500->gtlb_arch[tlbsel][i];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600233 if (tlbe->mas1 & MAS1_VALID)
234 printk(" G[%d][%3d] | %08X | %08X | %08X | %08X |\n",
235 tlbsel, i, tlbe->mas1, tlbe->mas2,
236 tlbe->mas3, tlbe->mas7);
237 }
238 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600239}
240
Scott Wood0164c0f2011-08-18 15:25:18 -0500241static inline unsigned int gtlb0_get_next_victim(
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600242 struct kvmppc_vcpu_e500 *vcpu_e500)
243{
244 unsigned int victim;
245
Liu Yu08b7fa92011-06-14 18:34:59 -0500246 victim = vcpu_e500->gtlb_nv[0]++;
247 if (unlikely(vcpu_e500->gtlb_nv[0] >= KVM_E500_TLB0_WAY_NUM))
248 vcpu_e500->gtlb_nv[0] = 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600249
250 return victim;
251}
252
253static inline unsigned int tlb1_max_shadow_size(void)
254{
Scott Wooda4cd8b22011-06-14 18:34:41 -0500255 /* reserve one entry for magic page */
Scott Wood0164c0f2011-08-18 15:25:18 -0500256 return host_tlb_params[1].entries - tlbcam_index - 1;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600257}
258
259static inline int tlbe_is_writable(struct tlbe *tlbe)
260{
261 return tlbe->mas3 & (MAS3_SW|MAS3_UW);
262}
263
264static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
265{
266 /* Mask off reserved bits. */
267 mas3 &= MAS3_ATTRIB_MASK;
268
269 if (!usermode) {
270 /* Guest is in supervisor mode,
271 * so we need to translate guest
272 * supervisor permissions into user permissions. */
273 mas3 &= ~E500_TLB_USER_PERM_MASK;
274 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
275 }
276
277 return mas3 | E500_TLB_SUPER_PERM_MASK;
278}
279
280static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
281{
Liu Yu046a48b2009-03-17 16:57:46 +0800282#ifdef CONFIG_SMP
283 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
284#else
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600285 return mas2 & MAS2_ATTRIB_MASK;
Liu Yu046a48b2009-03-17 16:57:46 +0800286#endif
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600287}
288
289/*
290 * writing shadow tlb entry to host TLB
291 */
Scott Wood0ef3099562011-06-14 18:34:35 -0500292static inline void __write_host_tlbe(struct tlbe *stlbe, uint32_t mas0)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600293{
Scott Wood0ef3099562011-06-14 18:34:35 -0500294 unsigned long flags;
295
296 local_irq_save(flags);
297 mtspr(SPRN_MAS0, mas0);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600298 mtspr(SPRN_MAS1, stlbe->mas1);
299 mtspr(SPRN_MAS2, stlbe->mas2);
300 mtspr(SPRN_MAS3, stlbe->mas3);
301 mtspr(SPRN_MAS7, stlbe->mas7);
Scott Wood0ef3099562011-06-14 18:34:35 -0500302 asm volatile("isync; tlbwe" : : : "memory");
303 local_irq_restore(flags);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600304}
305
Scott Wood0164c0f2011-08-18 15:25:18 -0500306/* esel is index into set, not whole array */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600307static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
Liu Yu08b7fa92011-06-14 18:34:59 -0500308 int tlbsel, int esel, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600309{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600310 if (tlbsel == 0) {
Scott Wood0164c0f2011-08-18 15:25:18 -0500311 __write_host_tlbe(stlbe, MAS0_TLBSEL(0) | MAS0_ESEL(esel));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600312 } else {
Scott Wood0ef3099562011-06-14 18:34:35 -0500313 __write_host_tlbe(stlbe,
314 MAS0_TLBSEL(1) |
315 MAS0_ESEL(to_htlb1_esel(esel)));
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600316 }
Liu Yu08b7fa92011-06-14 18:34:59 -0500317 trace_kvm_stlb_write(index_of(tlbsel, esel), stlbe->mas1, stlbe->mas2,
318 stlbe->mas3, stlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600319}
320
Scott Wooda4cd8b22011-06-14 18:34:41 -0500321void kvmppc_map_magic(struct kvm_vcpu *vcpu)
322{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500323 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Scott Wooda4cd8b22011-06-14 18:34:41 -0500324 struct tlbe magic;
325 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500326 unsigned int stid;
Scott Wooda4cd8b22011-06-14 18:34:41 -0500327 pfn_t pfn;
328
329 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
330 get_page(pfn_to_page(pfn));
331
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500332 preempt_disable();
333 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
334
335 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
Scott Wooda4cd8b22011-06-14 18:34:41 -0500336 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
337 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
338 magic.mas3 = (pfn << PAGE_SHIFT) |
339 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
340 magic.mas7 = pfn >> (32 - PAGE_SHIFT);
341
342 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500343 preempt_enable();
Scott Wooda4cd8b22011-06-14 18:34:41 -0500344}
345
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600346void kvmppc_e500_tlb_load(struct kvm_vcpu *vcpu, int cpu)
347{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500348 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
349
350 /* Shadow PID may be expired on local core */
351 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600352}
353
354void kvmppc_e500_tlb_put(struct kvm_vcpu *vcpu)
355{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500356}
357
Scott Wood0164c0f2011-08-18 15:25:18 -0500358static void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500,
359 int tlbsel, int esel)
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500360{
361 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
362 struct vcpu_id_table *idt = vcpu_e500->idt;
363 unsigned int pr, tid, ts, pid;
364 u32 val, eaddr;
365 unsigned long flags;
366
367 ts = get_tlb_ts(gtlbe);
368 tid = get_tlb_tid(gtlbe);
369
370 preempt_disable();
371
372 /* One guest ID may be mapped to two shadow IDs */
373 for (pr = 0; pr < 2; pr++) {
374 /*
375 * The shadow PID can have a valid mapping on at most one
376 * host CPU. In the common case, it will be valid on this
377 * CPU, in which case (for TLB0) we do a local invalidation
378 * of the specific address.
379 *
380 * If the shadow PID is not valid on the current host CPU, or
381 * if we're invalidating a TLB1 entry, we invalidate the
382 * entire shadow PID.
383 */
384 if (tlbsel == 1 ||
385 (pid = local_sid_lookup(&idt->id[ts][tid][pr])) <= 0) {
386 kvmppc_e500_id_table_reset_one(vcpu_e500, ts, tid, pr);
387 continue;
388 }
389
390 /*
391 * The guest is invalidating a TLB0 entry which is in a PID
392 * that has a valid shadow mapping on this host CPU. We
393 * search host TLB0 to invalidate it's shadow TLB entry,
394 * similar to __tlbil_va except that we need to look in AS1.
395 */
396 val = (pid << MAS6_SPID_SHIFT) | MAS6_SAS;
397 eaddr = get_tlb_eaddr(gtlbe);
398
399 local_irq_save(flags);
400
401 mtspr(SPRN_MAS6, val);
402 asm volatile("tlbsx 0, %[eaddr]" : : [eaddr] "r" (eaddr));
403 val = mfspr(SPRN_MAS1);
404 if (val & MAS1_VALID) {
405 mtspr(SPRN_MAS1, val & ~MAS1_VALID);
406 asm volatile("tlbwe");
407 }
408
409 local_irq_restore(flags);
410 }
411
412 preempt_enable();
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600413}
414
Scott Wood0164c0f2011-08-18 15:25:18 -0500415static int tlb0_set_base(gva_t addr, int sets, int ways)
416{
417 int set_base;
418
419 set_base = (addr >> PAGE_SHIFT) & (sets - 1);
420 set_base *= ways;
421
422 return set_base;
423}
424
425static int gtlb0_set_base(struct kvmppc_vcpu_e500 *vcpu_e500, gva_t addr)
426{
427 int sets = KVM_E500_TLB0_SIZE / KVM_E500_TLB0_WAY_NUM;
428
429 return tlb0_set_base(addr, sets, KVM_E500_TLB0_WAY_NUM);
430}
431
432static int htlb0_set_base(gva_t addr)
433{
434 return tlb0_set_base(addr, host_tlb_params[0].sets,
435 host_tlb_params[0].ways);
436}
437
438static unsigned int get_tlb_esel(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel)
439{
440 unsigned int esel = get_tlb_esel_bit(vcpu_e500);
441
442 if (tlbsel == 0) {
443 esel &= KVM_E500_TLB0_WAY_NUM_MASK;
444 esel += gtlb0_set_base(vcpu_e500, vcpu_e500->mas2);
445 } else {
446 esel &= vcpu_e500->gtlb_size[tlbsel] - 1;
447 }
448
449 return esel;
450}
451
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600452/* Search the guest TLB for a matching entry. */
453static int kvmppc_e500_tlb_index(struct kvmppc_vcpu_e500 *vcpu_e500,
454 gva_t eaddr, int tlbsel, unsigned int pid, int as)
455{
Scott Wood1aee47a2011-06-14 18:35:20 -0500456 int size = vcpu_e500->gtlb_size[tlbsel];
Scott Wood0164c0f2011-08-18 15:25:18 -0500457 unsigned int set_base;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600458 int i;
459
Scott Wood1aee47a2011-06-14 18:35:20 -0500460 if (tlbsel == 0) {
Scott Wood0164c0f2011-08-18 15:25:18 -0500461 set_base = gtlb0_set_base(vcpu_e500, eaddr);
Scott Wood1aee47a2011-06-14 18:35:20 -0500462 size = KVM_E500_TLB0_WAY_NUM;
463 } else {
464 set_base = 0;
465 }
466
467 for (i = 0; i < size; i++) {
468 struct tlbe *tlbe = &vcpu_e500->gtlb_arch[tlbsel][set_base + i];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600469 unsigned int tid;
470
471 if (eaddr < get_tlb_eaddr(tlbe))
472 continue;
473
474 if (eaddr > get_tlb_end(tlbe))
475 continue;
476
477 tid = get_tlb_tid(tlbe);
478 if (tid && (tid != pid))
479 continue;
480
481 if (!get_tlb_v(tlbe))
482 continue;
483
484 if (get_tlb_ts(tlbe) != as && as != -1)
485 continue;
486
Scott Wood1aee47a2011-06-14 18:35:20 -0500487 return set_base + i;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600488 }
489
490 return -1;
491}
492
Scott Wood0164c0f2011-08-18 15:25:18 -0500493static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
494 struct tlbe *gtlbe,
495 pfn_t pfn)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600496{
Scott Wood0164c0f2011-08-18 15:25:18 -0500497 ref->pfn = pfn;
498 ref->flags = E500_TLB_VALID;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600499
Liu Yu08b7fa92011-06-14 18:34:59 -0500500 if (tlbe_is_writable(gtlbe))
Scott Wood0164c0f2011-08-18 15:25:18 -0500501 ref->flags |= E500_TLB_DIRTY;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600502}
503
Scott Wood0164c0f2011-08-18 15:25:18 -0500504static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600505{
Scott Wood0164c0f2011-08-18 15:25:18 -0500506 if (ref->flags & E500_TLB_VALID) {
507 if (ref->flags & E500_TLB_DIRTY)
508 kvm_release_pfn_dirty(ref->pfn);
Liu Yu08b7fa92011-06-14 18:34:59 -0500509 else
Scott Wood0164c0f2011-08-18 15:25:18 -0500510 kvm_release_pfn_clean(ref->pfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600511
Scott Wood0164c0f2011-08-18 15:25:18 -0500512 ref->flags = 0;
Liu Yu08b7fa92011-06-14 18:34:59 -0500513 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600514}
515
Scott Wood0164c0f2011-08-18 15:25:18 -0500516static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
517{
518 int tlbsel = 0;
519 int i;
520
521 for (i = 0; i < vcpu_e500->gtlb_size[tlbsel]; i++) {
522 struct tlbe_ref *ref =
523 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
524 kvmppc_e500_ref_release(ref);
525 }
526}
527
528static void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
529{
530 int stlbsel = 1;
531 int i;
532
533 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
534 struct tlbe_ref *ref =
535 &vcpu_e500->tlb_refs[stlbsel][i];
536 kvmppc_e500_ref_release(ref);
537 }
538
539 clear_tlb_privs(vcpu_e500);
540}
541
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600542static inline void kvmppc_e500_deliver_tlb_miss(struct kvm_vcpu *vcpu,
543 unsigned int eaddr, int as)
544{
545 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
546 unsigned int victim, pidsel, tsized;
547 int tlbsel;
548
Liu Yufb2838d2009-01-14 10:47:37 -0600549 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600550 tlbsel = (vcpu_e500->mas4 >> 28) & 0x1;
Scott Wood0164c0f2011-08-18 15:25:18 -0500551 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600552 pidsel = (vcpu_e500->mas4 >> 16) & 0xf;
Liu Yu0cfb50e2009-06-05 14:54:29 +0800553 tsized = (vcpu_e500->mas4 >> 7) & 0x1f;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600554
555 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
Liu Yu08b7fa92011-06-14 18:34:59 -0500556 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600557 vcpu_e500->mas1 = MAS1_VALID | (as ? MAS1_TS : 0)
558 | MAS1_TID(vcpu_e500->pid[pidsel])
559 | MAS1_TSIZE(tsized);
560 vcpu_e500->mas2 = (eaddr & MAS2_EPN)
561 | (vcpu_e500->mas4 & MAS2_ATTRIB_MASK);
562 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
563 vcpu_e500->mas6 = (vcpu_e500->mas6 & MAS6_SPID1)
564 | (get_cur_pid(vcpu) << 16)
565 | (as ? MAS6_SAS : 0);
566 vcpu_e500->mas7 = 0;
567}
568
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500569/* TID must be supplied by the caller */
Liu Yu08b7fa92011-06-14 18:34:59 -0500570static inline void kvmppc_e500_setup_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
571 struct tlbe *gtlbe, int tsize,
Scott Wood0164c0f2011-08-18 15:25:18 -0500572 struct tlbe_ref *ref,
Liu Yu08b7fa92011-06-14 18:34:59 -0500573 u64 gvaddr, struct tlbe *stlbe)
574{
Scott Wood0164c0f2011-08-18 15:25:18 -0500575 pfn_t pfn = ref->pfn;
576
577 BUG_ON(!(ref->flags & E500_TLB_VALID));
Liu Yu08b7fa92011-06-14 18:34:59 -0500578
579 /* Force TS=1 IPROT=0 for all guest mappings. */
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500580 stlbe->mas1 = MAS1_TSIZE(tsize) | MAS1_TS | MAS1_VALID;
Liu Yu08b7fa92011-06-14 18:34:59 -0500581 stlbe->mas2 = (gvaddr & MAS2_EPN)
582 | e500_shadow_mas2_attrib(gtlbe->mas2,
583 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
584 stlbe->mas3 = ((pfn << PAGE_SHIFT) & MAS3_RPN)
585 | e500_shadow_mas3_attrib(gtlbe->mas3,
586 vcpu_e500->vcpu.arch.shared->msr & MSR_PR);
587 stlbe->mas7 = (pfn >> (32 - PAGE_SHIFT)) & MAS7_RPN;
588}
589
Scott Wood0164c0f2011-08-18 15:25:18 -0500590/* sesel is an index into the entire array, not just the set */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600591static inline void kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
Scott Wood0164c0f2011-08-18 15:25:18 -0500592 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, int tlbsel, int sesel,
593 struct tlbe *stlbe, struct tlbe_ref *ref)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600594{
Scott Wood9973d542011-06-14 18:34:39 -0500595 struct kvm_memory_slot *slot;
Scott Wood9973d542011-06-14 18:34:39 -0500596 unsigned long pfn, hva;
597 int pfnmap = 0;
598 int tsize = BOOK3E_PAGESZ_4K;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600599
Scott Wood59c1f4e2011-06-14 18:34:37 -0500600 /*
601 * Translate guest physical to true physical, acquiring
602 * a page reference if it is normal, non-reserved memory.
Scott Wood9973d542011-06-14 18:34:39 -0500603 *
604 * gfn_to_memslot() must succeed because otherwise we wouldn't
605 * have gotten this far. Eventually we should just pass the slot
606 * pointer through from the first lookup.
Scott Wood59c1f4e2011-06-14 18:34:37 -0500607 */
Scott Wood9973d542011-06-14 18:34:39 -0500608 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
609 hva = gfn_to_hva_memslot(slot, gfn);
610
611 if (tlbsel == 1) {
612 struct vm_area_struct *vma;
613 down_read(&current->mm->mmap_sem);
614
615 vma = find_vma(current->mm, hva);
616 if (vma && hva >= vma->vm_start &&
617 (vma->vm_flags & VM_PFNMAP)) {
618 /*
619 * This VMA is a physically contiguous region (e.g.
620 * /dev/mem) that bypasses normal Linux page
621 * management. Find the overlap between the
622 * vma and the memslot.
623 */
624
625 unsigned long start, end;
626 unsigned long slot_start, slot_end;
627
628 pfnmap = 1;
629
630 start = vma->vm_pgoff;
631 end = start +
632 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
633
634 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
635
636 slot_start = pfn - (gfn - slot->base_gfn);
637 slot_end = slot_start + slot->npages;
638
639 if (start < slot_start)
640 start = slot_start;
641 if (end > slot_end)
642 end = slot_end;
643
644 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
645 MAS1_TSIZE_SHIFT;
646
647 /*
648 * e500 doesn't implement the lowest tsize bit,
649 * or 1K pages.
650 */
651 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
652
653 /*
654 * Now find the largest tsize (up to what the guest
655 * requested) that will cover gfn, stay within the
656 * range, and for which gfn and pfn are mutually
657 * aligned.
658 */
659
660 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
661 unsigned long gfn_start, gfn_end, tsize_pages;
662 tsize_pages = 1 << (tsize - 2);
663
664 gfn_start = gfn & ~(tsize_pages - 1);
665 gfn_end = gfn_start + tsize_pages;
666
667 if (gfn_start + pfn - gfn < start)
668 continue;
669 if (gfn_end + pfn - gfn > end)
670 continue;
671 if ((gfn & (tsize_pages - 1)) !=
672 (pfn & (tsize_pages - 1)))
673 continue;
674
675 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
676 pfn &= ~(tsize_pages - 1);
677 break;
678 }
679 }
680
681 up_read(&current->mm->mmap_sem);
682 }
683
684 if (likely(!pfnmap)) {
685 pfn = gfn_to_pfn_memslot(vcpu_e500->vcpu.kvm, slot, gfn);
686 if (is_error_pfn(pfn)) {
687 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
688 (long)gfn);
689 kvm_release_pfn_clean(pfn);
690 return;
691 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600692 }
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600693
Scott Wood0164c0f2011-08-18 15:25:18 -0500694 /* Drop old ref and setup new one. */
695 kvmppc_e500_ref_release(ref);
696 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600697
Scott Wood0164c0f2011-08-18 15:25:18 -0500698 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, tsize, ref, gvaddr, stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600699}
700
701/* XXX only map the one-one case, for now use TLB0 */
Liu Yu08b7fa92011-06-14 18:34:59 -0500702static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500,
703 int esel, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600704{
705 struct tlbe *gtlbe;
Scott Wood0164c0f2011-08-18 15:25:18 -0500706 struct tlbe_ref *ref;
707 int sesel = esel & (host_tlb_params[0].ways - 1);
708 int sesel_base;
709 gva_t ea;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600710
Liu Yu08b7fa92011-06-14 18:34:59 -0500711 gtlbe = &vcpu_e500->gtlb_arch[0][esel];
Scott Wood0164c0f2011-08-18 15:25:18 -0500712 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
713
714 ea = get_tlb_eaddr(gtlbe);
715 sesel_base = htlb0_set_base(ea);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600716
717 kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
718 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
Scott Wood0164c0f2011-08-18 15:25:18 -0500719 gtlbe, 0, sesel_base + sesel, stlbe, ref);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600720
Scott Wood0164c0f2011-08-18 15:25:18 -0500721 return sesel;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600722}
723
724/* Caller must ensure that the specified guest TLB entry is safe to insert into
725 * the shadow TLB. */
726/* XXX for both one-one and one-to-many , for now use TLB1 */
727static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
Liu Yu08b7fa92011-06-14 18:34:59 -0500728 u64 gvaddr, gfn_t gfn, struct tlbe *gtlbe, struct tlbe *stlbe)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600729{
Scott Wood0164c0f2011-08-18 15:25:18 -0500730 struct tlbe_ref *ref;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600731 unsigned int victim;
732
Scott Wood0164c0f2011-08-18 15:25:18 -0500733 victim = vcpu_e500->host_tlb1_nv++;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600734
Scott Wood0164c0f2011-08-18 15:25:18 -0500735 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
736 vcpu_e500->host_tlb1_nv = 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600737
Scott Wood0164c0f2011-08-18 15:25:18 -0500738 ref = &vcpu_e500->tlb_refs[1][victim];
739 kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1,
740 victim, stlbe, ref);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600741
742 return victim;
743}
744
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500745void kvmppc_mmu_msr_notify(struct kvm_vcpu *vcpu, u32 old_msr)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600746{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500747 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
748
749 /* Recalc shadow pid since MSR changes */
750 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600751}
752
Liu Yu08b7fa92011-06-14 18:34:59 -0500753static inline int kvmppc_e500_gtlbe_invalidate(
754 struct kvmppc_vcpu_e500 *vcpu_e500,
755 int tlbsel, int esel)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600756{
Liu Yu08b7fa92011-06-14 18:34:59 -0500757 struct tlbe *gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600758
759 if (unlikely(get_tlb_iprot(gtlbe)))
760 return -1;
761
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600762 gtlbe->mas1 = 0;
763
764 return 0;
765}
766
Liu Yub0a18352009-02-17 16:52:08 +0800767int kvmppc_e500_emul_mt_mmucsr0(struct kvmppc_vcpu_e500 *vcpu_e500, ulong value)
768{
769 int esel;
770
771 if (value & MMUCSR0_TLB0FI)
Liu Yu08b7fa92011-06-14 18:34:59 -0500772 for (esel = 0; esel < vcpu_e500->gtlb_size[0]; esel++)
Liu Yub0a18352009-02-17 16:52:08 +0800773 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 0, esel);
774 if (value & MMUCSR0_TLB1FI)
Liu Yu08b7fa92011-06-14 18:34:59 -0500775 for (esel = 0; esel < vcpu_e500->gtlb_size[1]; esel++)
Liu Yub0a18352009-02-17 16:52:08 +0800776 kvmppc_e500_gtlbe_invalidate(vcpu_e500, 1, esel);
777
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500778 /* Invalidate all vcpu id mappings */
779 kvmppc_e500_id_table_reset_all(vcpu_e500);
Liu Yub0a18352009-02-17 16:52:08 +0800780
781 return EMULATE_DONE;
782}
783
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600784int kvmppc_e500_emul_tlbivax(struct kvm_vcpu *vcpu, int ra, int rb)
785{
786 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
787 unsigned int ia;
788 int esel, tlbsel;
789 gva_t ea;
790
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100791 ea = ((ra) ? kvmppc_get_gpr(vcpu, ra) : 0) + kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600792
793 ia = (ea >> 2) & 0x1;
794
Liu Yufb2838d2009-01-14 10:47:37 -0600795 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600796 tlbsel = (ea >> 3) & 0x1;
797
798 if (ia) {
799 /* invalidate all entries */
Liu Yu08b7fa92011-06-14 18:34:59 -0500800 for (esel = 0; esel < vcpu_e500->gtlb_size[tlbsel]; esel++)
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600801 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
802 } else {
803 ea &= 0xfffff000;
804 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel,
805 get_cur_pid(vcpu), -1);
806 if (esel >= 0)
807 kvmppc_e500_gtlbe_invalidate(vcpu_e500, tlbsel, esel);
808 }
809
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500810 /* Invalidate all vcpu id mappings */
811 kvmppc_e500_id_table_reset_all(vcpu_e500);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600812
813 return EMULATE_DONE;
814}
815
816int kvmppc_e500_emul_tlbre(struct kvm_vcpu *vcpu)
817{
818 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
819 int tlbsel, esel;
820 struct tlbe *gtlbe;
821
822 tlbsel = get_tlb_tlbsel(vcpu_e500);
823 esel = get_tlb_esel(vcpu_e500, tlbsel);
824
Liu Yu08b7fa92011-06-14 18:34:59 -0500825 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Liu Yubc35cbc2009-03-17 16:57:45 +0800826 vcpu_e500->mas0 &= ~MAS0_NV(~0);
Liu Yu08b7fa92011-06-14 18:34:59 -0500827 vcpu_e500->mas0 |= MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600828 vcpu_e500->mas1 = gtlbe->mas1;
829 vcpu_e500->mas2 = gtlbe->mas2;
830 vcpu_e500->mas3 = gtlbe->mas3;
831 vcpu_e500->mas7 = gtlbe->mas7;
832
833 return EMULATE_DONE;
834}
835
836int kvmppc_e500_emul_tlbsx(struct kvm_vcpu *vcpu, int rb)
837{
838 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
839 int as = !!get_cur_sas(vcpu_e500);
840 unsigned int pid = get_cur_spid(vcpu_e500);
841 int esel, tlbsel;
842 struct tlbe *gtlbe = NULL;
843 gva_t ea;
844
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100845 ea = kvmppc_get_gpr(vcpu, rb);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600846
847 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
848 esel = kvmppc_e500_tlb_index(vcpu_e500, ea, tlbsel, pid, as);
849 if (esel >= 0) {
Liu Yu08b7fa92011-06-14 18:34:59 -0500850 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600851 break;
852 }
853 }
854
855 if (gtlbe) {
856 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(esel)
Liu Yu08b7fa92011-06-14 18:34:59 -0500857 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600858 vcpu_e500->mas1 = gtlbe->mas1;
859 vcpu_e500->mas2 = gtlbe->mas2;
860 vcpu_e500->mas3 = gtlbe->mas3;
861 vcpu_e500->mas7 = gtlbe->mas7;
862 } else {
863 int victim;
864
Liu Yufb2838d2009-01-14 10:47:37 -0600865 /* since we only have two TLBs, only lower bit is used. */
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600866 tlbsel = vcpu_e500->mas4 >> 28 & 0x1;
Scott Wood0164c0f2011-08-18 15:25:18 -0500867 victim = (tlbsel == 0) ? gtlb0_get_next_victim(vcpu_e500) : 0;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600868
869 vcpu_e500->mas0 = MAS0_TLBSEL(tlbsel) | MAS0_ESEL(victim)
Liu Yu08b7fa92011-06-14 18:34:59 -0500870 | MAS0_NV(vcpu_e500->gtlb_nv[tlbsel]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600871 vcpu_e500->mas1 = (vcpu_e500->mas6 & MAS6_SPID0)
872 | (vcpu_e500->mas6 & (MAS6_SAS ? MAS1_TS : 0))
873 | (vcpu_e500->mas4 & MAS4_TSIZED(~0));
874 vcpu_e500->mas2 &= MAS2_EPN;
875 vcpu_e500->mas2 |= vcpu_e500->mas4 & MAS2_ATTRIB_MASK;
876 vcpu_e500->mas3 &= MAS3_U0 | MAS3_U1 | MAS3_U2 | MAS3_U3;
877 vcpu_e500->mas7 = 0;
878 }
879
Scott Wood49ea0692011-03-28 15:01:24 -0500880 kvmppc_set_exit_type(vcpu, EMULATED_TLBSX_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600881 return EMULATE_DONE;
882}
883
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500884/* sesel is index into the set, not the whole array */
885static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
886 struct tlbe *gtlbe,
887 struct tlbe *stlbe,
888 int stlbsel, int sesel)
889{
890 int stid;
891
892 preempt_disable();
893 stid = kvmppc_e500_get_sid(vcpu_e500, get_tlb_ts(gtlbe),
894 get_tlb_tid(gtlbe),
895 get_cur_pr(&vcpu_e500->vcpu), 0);
896
897 stlbe->mas1 |= MAS1_TID(stid);
898 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
899 preempt_enable();
900}
901
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600902int kvmppc_e500_emul_tlbwe(struct kvm_vcpu *vcpu)
903{
904 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600905 struct tlbe *gtlbe;
Liu Yu08b7fa92011-06-14 18:34:59 -0500906 int tlbsel, esel;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600907
908 tlbsel = get_tlb_tlbsel(vcpu_e500);
909 esel = get_tlb_esel(vcpu_e500, tlbsel);
910
Liu Yu08b7fa92011-06-14 18:34:59 -0500911 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600912
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500913 if (get_tlb_v(gtlbe))
Scott Wood0164c0f2011-08-18 15:25:18 -0500914 inval_gtlbe_on_host(vcpu_e500, tlbsel, esel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600915
916 gtlbe->mas1 = vcpu_e500->mas1;
917 gtlbe->mas2 = vcpu_e500->mas2;
918 gtlbe->mas3 = vcpu_e500->mas3;
919 gtlbe->mas7 = vcpu_e500->mas7;
920
Marcelo Tosatti46f43c62009-06-18 11:47:27 -0300921 trace_kvm_gtlb_write(vcpu_e500->mas0, gtlbe->mas1, gtlbe->mas2,
922 gtlbe->mas3, gtlbe->mas7);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600923
924 /* Invalidate shadow mappings for the about-to-be-clobbered TLBE. */
925 if (tlbe_is_host_safe(vcpu, gtlbe)) {
Liu Yu08b7fa92011-06-14 18:34:59 -0500926 struct tlbe stlbe;
927 int stlbsel, sesel;
928 u64 eaddr;
929 u64 raddr;
930
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600931 switch (tlbsel) {
932 case 0:
933 /* TLB0 */
934 gtlbe->mas1 &= ~MAS1_TSIZE(~0);
Liu Yu0cfb50e2009-06-05 14:54:29 +0800935 gtlbe->mas1 |= MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600936
937 stlbsel = 0;
Liu Yu08b7fa92011-06-14 18:34:59 -0500938 sesel = kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600939
940 break;
941
942 case 1:
943 /* TLB1 */
944 eaddr = get_tlb_eaddr(gtlbe);
945 raddr = get_tlb_raddr(gtlbe);
946
947 /* Create a 4KB mapping on the host.
948 * If the guest wanted a large page,
949 * only the first 4KB is mapped here and the rest
950 * are mapped on the fly. */
951 stlbsel = 1;
952 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr,
Liu Yu08b7fa92011-06-14 18:34:59 -0500953 raddr >> PAGE_SHIFT, gtlbe, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600954 break;
955
956 default:
957 BUG();
958 }
Scott Wood3bf3cdc2011-08-18 15:25:14 -0500959
960 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600961 }
962
Scott Wood49ea0692011-03-28 15:01:24 -0500963 kvmppc_set_exit_type(vcpu, EMULATED_TLBWE_EXITS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600964 return EMULATE_DONE;
965}
966
967int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
968{
Alexander Graf666e7252010-07-29 14:47:43 +0200969 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600970
971 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
972}
973
974int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr)
975{
Alexander Graf666e7252010-07-29 14:47:43 +0200976 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600977
978 return kvmppc_e500_tlb_search(vcpu, eaddr, get_cur_pid(vcpu), as);
979}
980
981void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu)
982{
Alexander Graf666e7252010-07-29 14:47:43 +0200983 unsigned int as = !!(vcpu->arch.shared->msr & MSR_IS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600984
985 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.pc, as);
986}
987
988void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu)
989{
Alexander Graf666e7252010-07-29 14:47:43 +0200990 unsigned int as = !!(vcpu->arch.shared->msr & MSR_DS);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -0600991
992 kvmppc_e500_deliver_tlb_miss(vcpu, vcpu->arch.fault_dear, as);
993}
994
995gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int index,
996 gva_t eaddr)
997{
998 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
999 struct tlbe *gtlbe =
Liu Yu08b7fa92011-06-14 18:34:59 -05001000 &vcpu_e500->gtlb_arch[tlbsel_of(index)][esel_of(index)];
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001001 u64 pgmask = get_tlb_bytes(gtlbe) - 1;
1002
1003 return get_tlb_raddr(gtlbe) | (eaddr & pgmask);
1004}
1005
1006void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1007{
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001008}
1009
1010void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
1011 unsigned int index)
1012{
1013 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
Liu Yu08b7fa92011-06-14 18:34:59 -05001014 struct tlbe_priv *priv;
1015 struct tlbe *gtlbe, stlbe;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001016 int tlbsel = tlbsel_of(index);
1017 int esel = esel_of(index);
1018 int stlbsel, sesel;
1019
Liu Yu08b7fa92011-06-14 18:34:59 -05001020 gtlbe = &vcpu_e500->gtlb_arch[tlbsel][esel];
1021
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001022 switch (tlbsel) {
1023 case 0:
1024 stlbsel = 0;
Scott Wood0164c0f2011-08-18 15:25:18 -05001025 sesel = esel & (host_tlb_params[0].ways - 1);
1026 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
Liu Yu08b7fa92011-06-14 18:34:59 -05001027
1028 kvmppc_e500_setup_stlbe(vcpu_e500, gtlbe, BOOK3E_PAGESZ_4K,
Scott Wood0164c0f2011-08-18 15:25:18 -05001029 &priv->ref, eaddr, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001030 break;
1031
1032 case 1: {
1033 gfn_t gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001034
1035 stlbsel = 1;
Liu Yu08b7fa92011-06-14 18:34:59 -05001036 sesel = kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn,
1037 gtlbe, &stlbe);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001038 break;
1039 }
1040
1041 default:
1042 BUG();
1043 break;
1044 }
Liu Yu08b7fa92011-06-14 18:34:59 -05001045
Scott Wood3bf3cdc2011-08-18 15:25:14 -05001046 write_stlbe(vcpu_e500, gtlbe, &stlbe, stlbsel, sesel);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001047}
1048
1049int kvmppc_e500_tlb_search(struct kvm_vcpu *vcpu,
1050 gva_t eaddr, unsigned int pid, int as)
1051{
1052 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1053 int esel, tlbsel;
1054
1055 for (tlbsel = 0; tlbsel < 2; tlbsel++) {
1056 esel = kvmppc_e500_tlb_index(vcpu_e500, eaddr, tlbsel, pid, as);
1057 if (esel >= 0)
1058 return index_of(tlbsel, esel);
1059 }
1060
1061 return -1;
1062}
1063
Scott Wood5ce941e2011-04-27 17:24:21 -05001064void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid)
1065{
1066 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
1067
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001068 if (vcpu->arch.pid != pid) {
1069 vcpu_e500->pid[0] = vcpu->arch.pid = pid;
1070 kvmppc_e500_recalc_shadow_pid(vcpu_e500);
1071 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001072}
1073
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001074void kvmppc_e500_tlb_setup(struct kvmppc_vcpu_e500 *vcpu_e500)
1075{
1076 struct tlbe *tlbe;
1077
1078 /* Insert large initial mapping for guest. */
Liu Yu08b7fa92011-06-14 18:34:59 -05001079 tlbe = &vcpu_e500->gtlb_arch[1][0];
Liu Yu0cfb50e2009-06-05 14:54:29 +08001080 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_256M);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001081 tlbe->mas2 = 0;
1082 tlbe->mas3 = E500_TLB_SUPER_PERM_MASK;
1083 tlbe->mas7 = 0;
1084
1085 /* 4K map for serial output. Used by kernel wrapper. */
Liu Yu08b7fa92011-06-14 18:34:59 -05001086 tlbe = &vcpu_e500->gtlb_arch[1][1];
Liu Yu0cfb50e2009-06-05 14:54:29 +08001087 tlbe->mas1 = MAS1_VALID | MAS1_TSIZE(BOOK3E_PAGESZ_4K);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001088 tlbe->mas2 = (0xe0004500 & 0xFFFFF000) | MAS2_I | MAS2_G;
1089 tlbe->mas3 = (0xe0004500 & 0xFFFFF000) | E500_TLB_SUPER_PERM_MASK;
1090 tlbe->mas7 = 0;
1091}
1092
1093int kvmppc_e500_tlb_init(struct kvmppc_vcpu_e500 *vcpu_e500)
1094{
Scott Wood0164c0f2011-08-18 15:25:18 -05001095 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
1096 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
1097
1098 /*
1099 * This should never happen on real e500 hardware, but is
1100 * architecturally possible -- e.g. in some weird nested
1101 * virtualization case.
1102 */
1103 if (host_tlb_params[0].entries == 0 ||
1104 host_tlb_params[1].entries == 0) {
1105 pr_err("%s: need to know host tlb size\n", __func__);
1106 return -ENODEV;
1107 }
1108
1109 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
1110 TLBnCFG_ASSOC_SHIFT;
1111 host_tlb_params[1].ways = host_tlb_params[1].entries;
1112
1113 if (!is_power_of_2(host_tlb_params[0].entries) ||
1114 !is_power_of_2(host_tlb_params[0].ways) ||
1115 host_tlb_params[0].entries < host_tlb_params[0].ways ||
1116 host_tlb_params[0].ways == 0) {
1117 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
1118 __func__, host_tlb_params[0].entries,
1119 host_tlb_params[0].ways);
1120 return -ENODEV;
1121 }
1122
1123 host_tlb_params[0].sets =
1124 host_tlb_params[0].entries / host_tlb_params[0].ways;
1125 host_tlb_params[1].sets = 1;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001126
Liu Yu08b7fa92011-06-14 18:34:59 -05001127 vcpu_e500->gtlb_size[0] = KVM_E500_TLB0_SIZE;
1128 vcpu_e500->gtlb_arch[0] =
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001129 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB0_SIZE, GFP_KERNEL);
Liu Yu08b7fa92011-06-14 18:34:59 -05001130 if (vcpu_e500->gtlb_arch[0] == NULL)
Scott Wood0164c0f2011-08-18 15:25:18 -05001131 goto err;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001132
Liu Yu08b7fa92011-06-14 18:34:59 -05001133 vcpu_e500->gtlb_size[1] = KVM_E500_TLB1_SIZE;
1134 vcpu_e500->gtlb_arch[1] =
1135 kzalloc(sizeof(struct tlbe) * KVM_E500_TLB1_SIZE, GFP_KERNEL);
1136 if (vcpu_e500->gtlb_arch[1] == NULL)
Scott Wood0164c0f2011-08-18 15:25:18 -05001137 goto err;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001138
Scott Wood0164c0f2011-08-18 15:25:18 -05001139 vcpu_e500->tlb_refs[0] =
1140 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
1141 GFP_KERNEL);
1142 if (!vcpu_e500->tlb_refs[0])
1143 goto err;
Liu Yu08b7fa92011-06-14 18:34:59 -05001144
Scott Wood0164c0f2011-08-18 15:25:18 -05001145 vcpu_e500->tlb_refs[1] =
1146 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
1147 GFP_KERNEL);
1148 if (!vcpu_e500->tlb_refs[1])
1149 goto err;
1150
1151 vcpu_e500->gtlb_priv[0] =
1152 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[0],
1153 GFP_KERNEL);
1154 if (!vcpu_e500->gtlb_priv[0])
1155 goto err;
1156
1157 vcpu_e500->gtlb_priv[1] =
1158 kzalloc(sizeof(struct tlbe_ref) * vcpu_e500->gtlb_size[1],
1159 GFP_KERNEL);
1160 if (!vcpu_e500->gtlb_priv[1])
1161 goto err;
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001162
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001163 if (kvmppc_e500_id_table_alloc(vcpu_e500) == NULL)
Scott Wood0164c0f2011-08-18 15:25:18 -05001164 goto err;
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001165
Liu Yuda15bf42010-01-22 19:36:53 +08001166 /* Init TLB configuration register */
1167 vcpu_e500->tlb0cfg = mfspr(SPRN_TLB0CFG) & ~0xfffUL;
Liu Yu08b7fa92011-06-14 18:34:59 -05001168 vcpu_e500->tlb0cfg |= vcpu_e500->gtlb_size[0];
Liu Yuda15bf42010-01-22 19:36:53 +08001169 vcpu_e500->tlb1cfg = mfspr(SPRN_TLB1CFG) & ~0xfffUL;
Liu Yu08b7fa92011-06-14 18:34:59 -05001170 vcpu_e500->tlb1cfg |= vcpu_e500->gtlb_size[1];
Liu Yuda15bf42010-01-22 19:36:53 +08001171
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001172 return 0;
1173
Scott Wood0164c0f2011-08-18 15:25:18 -05001174err:
1175 kfree(vcpu_e500->tlb_refs[0]);
1176 kfree(vcpu_e500->tlb_refs[1]);
Liu Yu08b7fa92011-06-14 18:34:59 -05001177 kfree(vcpu_e500->gtlb_priv[0]);
Scott Wood0164c0f2011-08-18 15:25:18 -05001178 kfree(vcpu_e500->gtlb_priv[1]);
Liu Yu08b7fa92011-06-14 18:34:59 -05001179 kfree(vcpu_e500->gtlb_arch[0]);
Scott Wood0164c0f2011-08-18 15:25:18 -05001180 kfree(vcpu_e500->gtlb_arch[1]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001181 return -1;
1182}
1183
1184void kvmppc_e500_tlb_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
1185{
Scott Wood0164c0f2011-08-18 15:25:18 -05001186 clear_tlb_refs(vcpu_e500);
Liu Yu08b7fa92011-06-14 18:34:59 -05001187
Liu Yudd9ebf1f2011-06-14 18:35:14 -05001188 kvmppc_e500_id_table_free(vcpu_e500);
Scott Wood0164c0f2011-08-18 15:25:18 -05001189
1190 kfree(vcpu_e500->tlb_refs[0]);
1191 kfree(vcpu_e500->tlb_refs[1]);
1192 kfree(vcpu_e500->gtlb_priv[0]);
1193 kfree(vcpu_e500->gtlb_priv[1]);
Liu Yu08b7fa92011-06-14 18:34:59 -05001194 kfree(vcpu_e500->gtlb_arch[1]);
1195 kfree(vcpu_e500->gtlb_arch[0]);
Hollis Blanchardbc8080c2009-01-03 16:23:10 -06001196}