blob: 4c32d65101334fa8b8e9bace9b62703363c60deb [file] [log] [blame]
Alexander Grafb71c9e22013-01-11 15:22:45 +01001/*
2 * Copyright (C) 2008-2013 Freescale Semiconductor, Inc. All rights reserved.
3 *
4 * Author: Yu Liu, yu.liu@freescale.com
5 * Scott Wood, scottwood@freescale.com
6 * Ashish Kalra, ashish.kalra@freescale.com
7 * Varun Sethi, varun.sethi@freescale.com
8 * Alexander Graf, agraf@suse.de
9 *
10 * Description:
11 * This file is based on arch/powerpc/kvm/44x_tlb.c,
12 * by Hollis Blanchard <hollisb@us.ibm.com>.
13 *
14 * This program is free software; you can redistribute it and/or modify
15 * it under the terms of the GNU General Public License, version 2, as
16 * published by the Free Software Foundation.
17 */
18
19#include <linux/kernel.h>
20#include <linux/types.h>
21#include <linux/slab.h>
22#include <linux/string.h>
23#include <linux/kvm.h>
24#include <linux/kvm_host.h>
25#include <linux/highmem.h>
26#include <linux/log2.h>
27#include <linux/uaccess.h>
28#include <linux/sched.h>
29#include <linux/rwsem.h>
30#include <linux/vmalloc.h>
31#include <linux/hugetlb.h>
32#include <asm/kvm_ppc.h>
33
34#include "e500.h"
35#include "trace.h"
36#include "timing.h"
37#include "e500_mmu_host.h"
38
39#define to_htlb1_esel(esel) (host_tlb_params[1].entries - (esel) - 1)
40
41static struct kvmppc_e500_tlb_params host_tlb_params[E500_TLB_NUM];
42
43static inline unsigned int tlb1_max_shadow_size(void)
44{
45 /* reserve one entry for magic page */
46 return host_tlb_params[1].entries - tlbcam_index - 1;
47}
48
49static inline u32 e500_shadow_mas3_attrib(u32 mas3, int usermode)
50{
51 /* Mask off reserved bits. */
52 mas3 &= MAS3_ATTRIB_MASK;
53
54#ifndef CONFIG_KVM_BOOKE_HV
55 if (!usermode) {
56 /* Guest is in supervisor mode,
57 * so we need to translate guest
58 * supervisor permissions into user permissions. */
59 mas3 &= ~E500_TLB_USER_PERM_MASK;
60 mas3 |= (mas3 & E500_TLB_SUPER_PERM_MASK) << 1;
61 }
62 mas3 |= E500_TLB_SUPER_PERM_MASK;
63#endif
64 return mas3;
65}
66
67static inline u32 e500_shadow_mas2_attrib(u32 mas2, int usermode)
68{
69#ifdef CONFIG_SMP
70 return (mas2 & MAS2_ATTRIB_MASK) | MAS2_M;
71#else
72 return mas2 & MAS2_ATTRIB_MASK;
73#endif
74}
75
76/*
77 * writing shadow tlb entry to host TLB
78 */
79static inline void __write_host_tlbe(struct kvm_book3e_206_tlb_entry *stlbe,
80 uint32_t mas0)
81{
82 unsigned long flags;
83
84 local_irq_save(flags);
85 mtspr(SPRN_MAS0, mas0);
86 mtspr(SPRN_MAS1, stlbe->mas1);
87 mtspr(SPRN_MAS2, (unsigned long)stlbe->mas2);
88 mtspr(SPRN_MAS3, (u32)stlbe->mas7_3);
89 mtspr(SPRN_MAS7, (u32)(stlbe->mas7_3 >> 32));
90#ifdef CONFIG_KVM_BOOKE_HV
91 mtspr(SPRN_MAS8, stlbe->mas8);
92#endif
93 asm volatile("isync; tlbwe" : : : "memory");
94
95#ifdef CONFIG_KVM_BOOKE_HV
96 /* Must clear mas8 for other host tlbwe's */
97 mtspr(SPRN_MAS8, 0);
98 isync();
99#endif
100 local_irq_restore(flags);
101
102 trace_kvm_booke206_stlb_write(mas0, stlbe->mas8, stlbe->mas1,
103 stlbe->mas2, stlbe->mas7_3);
104}
105
106/*
107 * Acquire a mas0 with victim hint, as if we just took a TLB miss.
108 *
109 * We don't care about the address we're searching for, other than that it's
110 * in the right set and is not present in the TLB. Using a zero PID and a
111 * userspace address means we don't have to set and then restore MAS5, or
112 * calculate a proper MAS6 value.
113 */
114static u32 get_host_mas0(unsigned long eaddr)
115{
116 unsigned long flags;
117 u32 mas0;
118
119 local_irq_save(flags);
120 mtspr(SPRN_MAS6, 0);
121 asm volatile("tlbsx 0, %0" : : "b" (eaddr & ~CONFIG_PAGE_OFFSET));
122 mas0 = mfspr(SPRN_MAS0);
123 local_irq_restore(flags);
124
125 return mas0;
126}
127
128/* sesel is for tlb1 only */
129static inline void write_host_tlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
130 int tlbsel, int sesel, struct kvm_book3e_206_tlb_entry *stlbe)
131{
132 u32 mas0;
133
134 if (tlbsel == 0) {
135 mas0 = get_host_mas0(stlbe->mas2);
136 __write_host_tlbe(stlbe, mas0);
137 } else {
138 __write_host_tlbe(stlbe,
139 MAS0_TLBSEL(1) |
140 MAS0_ESEL(to_htlb1_esel(sesel)));
141 }
142}
143
144/* sesel is for tlb1 only */
145static void write_stlbe(struct kvmppc_vcpu_e500 *vcpu_e500,
146 struct kvm_book3e_206_tlb_entry *gtlbe,
147 struct kvm_book3e_206_tlb_entry *stlbe,
148 int stlbsel, int sesel)
149{
150 int stid;
151
152 preempt_disable();
153 stid = kvmppc_e500_get_tlb_stid(&vcpu_e500->vcpu, gtlbe);
154
155 stlbe->mas1 |= MAS1_TID(stid);
156 write_host_tlbe(vcpu_e500, stlbsel, sesel, stlbe);
157 preempt_enable();
158}
159
160#ifdef CONFIG_KVM_E500V2
161/* XXX should be a hook in the gva2hpa translation */
162void kvmppc_map_magic(struct kvm_vcpu *vcpu)
163{
164 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
165 struct kvm_book3e_206_tlb_entry magic;
166 ulong shared_page = ((ulong)vcpu->arch.shared) & PAGE_MASK;
167 unsigned int stid;
168 pfn_t pfn;
169
170 pfn = (pfn_t)virt_to_phys((void *)shared_page) >> PAGE_SHIFT;
171 get_page(pfn_to_page(pfn));
172
173 preempt_disable();
174 stid = kvmppc_e500_get_sid(vcpu_e500, 0, 0, 0, 0);
175
176 magic.mas1 = MAS1_VALID | MAS1_TS | MAS1_TID(stid) |
177 MAS1_TSIZE(BOOK3E_PAGESZ_4K);
178 magic.mas2 = vcpu->arch.magic_page_ea | MAS2_M;
179 magic.mas7_3 = ((u64)pfn << PAGE_SHIFT) |
180 MAS3_SW | MAS3_SR | MAS3_UW | MAS3_UR;
181 magic.mas8 = 0;
182
183 __write_host_tlbe(&magic, MAS0_TLBSEL(1) | MAS0_ESEL(tlbcam_index));
184 preempt_enable();
185}
186#endif
187
188void inval_gtlbe_on_host(struct kvmppc_vcpu_e500 *vcpu_e500, int tlbsel,
189 int esel)
190{
191 struct kvm_book3e_206_tlb_entry *gtlbe =
192 get_entry(vcpu_e500, tlbsel, esel);
193 struct tlbe_ref *ref = &vcpu_e500->gtlb_priv[tlbsel][esel].ref;
194
195 /* Don't bother with unmapped entries */
196 if (!(ref->flags & E500_TLB_VALID))
197 return;
198
199 if (tlbsel == 1 && ref->flags & E500_TLB_BITMAP) {
200 u64 tmp = vcpu_e500->g2h_tlb1_map[esel];
201 int hw_tlb_indx;
202 unsigned long flags;
203
204 local_irq_save(flags);
205 while (tmp) {
206 hw_tlb_indx = __ilog2_u64(tmp & -tmp);
207 mtspr(SPRN_MAS0,
208 MAS0_TLBSEL(1) |
209 MAS0_ESEL(to_htlb1_esel(hw_tlb_indx)));
210 mtspr(SPRN_MAS1, 0);
211 asm volatile("tlbwe");
212 vcpu_e500->h2g_tlb1_rmap[hw_tlb_indx] = 0;
213 tmp &= tmp - 1;
214 }
215 mb();
216 vcpu_e500->g2h_tlb1_map[esel] = 0;
217 ref->flags &= ~(E500_TLB_BITMAP | E500_TLB_VALID);
218 local_irq_restore(flags);
219
220 return;
221 }
222
223 /* Guest tlbe is backed by at most one host tlbe per shadow pid. */
224 kvmppc_e500_tlbil_one(vcpu_e500, gtlbe);
225
226 /* Mark the TLB as not backed by the host anymore */
227 ref->flags &= ~E500_TLB_VALID;
228}
229
230static inline int tlbe_is_writable(struct kvm_book3e_206_tlb_entry *tlbe)
231{
232 return tlbe->mas7_3 & (MAS3_SW|MAS3_UW);
233}
234
235static inline void kvmppc_e500_ref_setup(struct tlbe_ref *ref,
236 struct kvm_book3e_206_tlb_entry *gtlbe,
237 pfn_t pfn)
238{
239 ref->pfn = pfn;
240 ref->flags = E500_TLB_VALID;
241
242 if (tlbe_is_writable(gtlbe))
243 kvm_set_pfn_dirty(pfn);
244}
245
246static inline void kvmppc_e500_ref_release(struct tlbe_ref *ref)
247{
248 if (ref->flags & E500_TLB_VALID) {
249 trace_kvm_booke206_ref_release(ref->pfn, ref->flags);
250 ref->flags = 0;
251 }
252}
253
254void clear_tlb1_bitmap(struct kvmppc_vcpu_e500 *vcpu_e500)
255{
256 if (vcpu_e500->g2h_tlb1_map)
257 memset(vcpu_e500->g2h_tlb1_map, 0,
258 sizeof(u64) * vcpu_e500->gtlb_params[1].entries);
259 if (vcpu_e500->h2g_tlb1_rmap)
260 memset(vcpu_e500->h2g_tlb1_rmap, 0,
261 sizeof(unsigned int) * host_tlb_params[1].entries);
262}
263
264static void clear_tlb_privs(struct kvmppc_vcpu_e500 *vcpu_e500)
265{
266 int tlbsel = 0;
267 int i;
268
269 for (i = 0; i < vcpu_e500->gtlb_params[tlbsel].entries; i++) {
270 struct tlbe_ref *ref =
271 &vcpu_e500->gtlb_priv[tlbsel][i].ref;
272 kvmppc_e500_ref_release(ref);
273 }
274}
275
276void clear_tlb_refs(struct kvmppc_vcpu_e500 *vcpu_e500)
277{
278 int stlbsel = 1;
279 int i;
280
281 kvmppc_e500_tlbil_all(vcpu_e500);
282
283 for (i = 0; i < host_tlb_params[stlbsel].entries; i++) {
284 struct tlbe_ref *ref =
285 &vcpu_e500->tlb_refs[stlbsel][i];
286 kvmppc_e500_ref_release(ref);
287 }
288
289 clear_tlb_privs(vcpu_e500);
290}
291
292void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu)
293{
294 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
295 clear_tlb_refs(vcpu_e500);
296 clear_tlb1_bitmap(vcpu_e500);
297}
298
299/* TID must be supplied by the caller */
300static void kvmppc_e500_setup_stlbe(
301 struct kvm_vcpu *vcpu,
302 struct kvm_book3e_206_tlb_entry *gtlbe,
303 int tsize, struct tlbe_ref *ref, u64 gvaddr,
304 struct kvm_book3e_206_tlb_entry *stlbe)
305{
306 pfn_t pfn = ref->pfn;
307 u32 pr = vcpu->arch.shared->msr & MSR_PR;
308
309 BUG_ON(!(ref->flags & E500_TLB_VALID));
310
311 /* Force IPROT=0 for all guest mappings. */
312 stlbe->mas1 = MAS1_TSIZE(tsize) | get_tlb_sts(gtlbe) | MAS1_VALID;
313 stlbe->mas2 = (gvaddr & MAS2_EPN) |
314 e500_shadow_mas2_attrib(gtlbe->mas2, pr);
315 stlbe->mas7_3 = ((u64)pfn << PAGE_SHIFT) |
316 e500_shadow_mas3_attrib(gtlbe->mas7_3, pr);
317
318#ifdef CONFIG_KVM_BOOKE_HV
319 stlbe->mas8 = MAS8_TGS | vcpu->kvm->arch.lpid;
320#endif
321}
322
323static inline int kvmppc_e500_shadow_map(struct kvmppc_vcpu_e500 *vcpu_e500,
324 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
325 int tlbsel, struct kvm_book3e_206_tlb_entry *stlbe,
326 struct tlbe_ref *ref)
327{
328 struct kvm_memory_slot *slot;
329 unsigned long pfn = 0; /* silence GCC warning */
330 unsigned long hva;
331 int pfnmap = 0;
332 int tsize = BOOK3E_PAGESZ_4K;
333
334 /*
335 * Translate guest physical to true physical, acquiring
336 * a page reference if it is normal, non-reserved memory.
337 *
338 * gfn_to_memslot() must succeed because otherwise we wouldn't
339 * have gotten this far. Eventually we should just pass the slot
340 * pointer through from the first lookup.
341 */
342 slot = gfn_to_memslot(vcpu_e500->vcpu.kvm, gfn);
343 hva = gfn_to_hva_memslot(slot, gfn);
344
345 if (tlbsel == 1) {
346 struct vm_area_struct *vma;
347 down_read(&current->mm->mmap_sem);
348
349 vma = find_vma(current->mm, hva);
350 if (vma && hva >= vma->vm_start &&
351 (vma->vm_flags & VM_PFNMAP)) {
352 /*
353 * This VMA is a physically contiguous region (e.g.
354 * /dev/mem) that bypasses normal Linux page
355 * management. Find the overlap between the
356 * vma and the memslot.
357 */
358
359 unsigned long start, end;
360 unsigned long slot_start, slot_end;
361
362 pfnmap = 1;
363
364 start = vma->vm_pgoff;
365 end = start +
366 ((vma->vm_end - vma->vm_start) >> PAGE_SHIFT);
367
368 pfn = start + ((hva - vma->vm_start) >> PAGE_SHIFT);
369
370 slot_start = pfn - (gfn - slot->base_gfn);
371 slot_end = slot_start + slot->npages;
372
373 if (start < slot_start)
374 start = slot_start;
375 if (end > slot_end)
376 end = slot_end;
377
378 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
379 MAS1_TSIZE_SHIFT;
380
381 /*
382 * e500 doesn't implement the lowest tsize bit,
383 * or 1K pages.
384 */
385 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
386
387 /*
388 * Now find the largest tsize (up to what the guest
389 * requested) that will cover gfn, stay within the
390 * range, and for which gfn and pfn are mutually
391 * aligned.
392 */
393
394 for (; tsize > BOOK3E_PAGESZ_4K; tsize -= 2) {
395 unsigned long gfn_start, gfn_end, tsize_pages;
396 tsize_pages = 1 << (tsize - 2);
397
398 gfn_start = gfn & ~(tsize_pages - 1);
399 gfn_end = gfn_start + tsize_pages;
400
401 if (gfn_start + pfn - gfn < start)
402 continue;
403 if (gfn_end + pfn - gfn > end)
404 continue;
405 if ((gfn & (tsize_pages - 1)) !=
406 (pfn & (tsize_pages - 1)))
407 continue;
408
409 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
410 pfn &= ~(tsize_pages - 1);
411 break;
412 }
413 } else if (vma && hva >= vma->vm_start &&
414 (vma->vm_flags & VM_HUGETLB)) {
415 unsigned long psize = vma_kernel_pagesize(vma);
416
417 tsize = (gtlbe->mas1 & MAS1_TSIZE_MASK) >>
418 MAS1_TSIZE_SHIFT;
419
420 /*
421 * Take the largest page size that satisfies both host
422 * and guest mapping
423 */
424 tsize = min(__ilog2(psize) - 10, tsize);
425
426 /*
427 * e500 doesn't implement the lowest tsize bit,
428 * or 1K pages.
429 */
430 tsize = max(BOOK3E_PAGESZ_4K, tsize & ~1);
431 }
432
433 up_read(&current->mm->mmap_sem);
434 }
435
436 if (likely(!pfnmap)) {
437 unsigned long tsize_pages = 1 << (tsize + 10 - PAGE_SHIFT);
438 pfn = gfn_to_pfn_memslot(slot, gfn);
439 if (is_error_noslot_pfn(pfn)) {
440 printk(KERN_ERR "Couldn't get real page for gfn %lx!\n",
441 (long)gfn);
442 return -EINVAL;
443 }
444
445 /* Align guest and physical address to page map boundaries */
446 pfn &= ~(tsize_pages - 1);
447 gvaddr &= ~((tsize_pages << PAGE_SHIFT) - 1);
448 }
449
450 /* Drop old ref and setup new one. */
451 kvmppc_e500_ref_release(ref);
452 kvmppc_e500_ref_setup(ref, gtlbe, pfn);
453
454 kvmppc_e500_setup_stlbe(&vcpu_e500->vcpu, gtlbe, tsize,
455 ref, gvaddr, stlbe);
456
457 /* Clear i-cache for new pages */
458 kvmppc_mmu_flush_icache(pfn);
459
460 /* Drop refcount on page, so that mmu notifiers can clear it */
461 kvm_release_pfn_clean(pfn);
462
463 return 0;
464}
465
466/* XXX only map the one-one case, for now use TLB0 */
467static int kvmppc_e500_tlb0_map(struct kvmppc_vcpu_e500 *vcpu_e500, int esel,
468 struct kvm_book3e_206_tlb_entry *stlbe)
469{
470 struct kvm_book3e_206_tlb_entry *gtlbe;
471 struct tlbe_ref *ref;
472 int stlbsel = 0;
473 int sesel = 0;
474 int r;
475
476 gtlbe = get_entry(vcpu_e500, 0, esel);
477 ref = &vcpu_e500->gtlb_priv[0][esel].ref;
478
479 r = kvmppc_e500_shadow_map(vcpu_e500, get_tlb_eaddr(gtlbe),
480 get_tlb_raddr(gtlbe) >> PAGE_SHIFT,
481 gtlbe, 0, stlbe, ref);
482 if (r)
483 return r;
484
485 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
486
487 return 0;
488}
489
490/* Caller must ensure that the specified guest TLB entry is safe to insert into
491 * the shadow TLB. */
492/* XXX for both one-one and one-to-many , for now use TLB1 */
493static int kvmppc_e500_tlb1_map(struct kvmppc_vcpu_e500 *vcpu_e500,
494 u64 gvaddr, gfn_t gfn, struct kvm_book3e_206_tlb_entry *gtlbe,
495 struct kvm_book3e_206_tlb_entry *stlbe, int esel)
496{
497 struct tlbe_ref *ref;
498 unsigned int sesel;
499 int r;
500 int stlbsel = 1;
501
502 sesel = vcpu_e500->host_tlb1_nv++;
503
504 if (unlikely(vcpu_e500->host_tlb1_nv >= tlb1_max_shadow_size()))
505 vcpu_e500->host_tlb1_nv = 0;
506
507 ref = &vcpu_e500->tlb_refs[1][sesel];
508 r = kvmppc_e500_shadow_map(vcpu_e500, gvaddr, gfn, gtlbe, 1, stlbe,
509 ref);
510 if (r)
511 return r;
512
513 vcpu_e500->g2h_tlb1_map[esel] |= (u64)1 << sesel;
514 vcpu_e500->gtlb_priv[1][esel].ref.flags |= E500_TLB_BITMAP;
515 if (vcpu_e500->h2g_tlb1_rmap[sesel]) {
516 unsigned int idx = vcpu_e500->h2g_tlb1_rmap[sesel];
517 vcpu_e500->g2h_tlb1_map[idx] &= ~(1ULL << sesel);
518 }
519 vcpu_e500->h2g_tlb1_rmap[sesel] = esel;
520
521 write_stlbe(vcpu_e500, gtlbe, stlbe, stlbsel, sesel);
522
523 return 0;
524}
525
526void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 eaddr, gpa_t gpaddr,
527 unsigned int index)
528{
529 struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
530 struct tlbe_priv *priv;
531 struct kvm_book3e_206_tlb_entry *gtlbe, stlbe;
532 int tlbsel = tlbsel_of(index);
533 int esel = esel_of(index);
534
535 gtlbe = get_entry(vcpu_e500, tlbsel, esel);
536
537 switch (tlbsel) {
538 case 0:
539 priv = &vcpu_e500->gtlb_priv[tlbsel][esel];
540
541 /* Triggers after clear_tlb_refs or on initial mapping */
542 if (!(priv->ref.flags & E500_TLB_VALID)) {
543 kvmppc_e500_tlb0_map(vcpu_e500, esel, &stlbe);
544 } else {
545 kvmppc_e500_setup_stlbe(vcpu, gtlbe, BOOK3E_PAGESZ_4K,
546 &priv->ref, eaddr, &stlbe);
547 write_stlbe(vcpu_e500, gtlbe, &stlbe, 0, 0);
548 }
549 break;
550
551 case 1: {
552 gfn_t gfn = gpaddr >> PAGE_SHIFT;
553 kvmppc_e500_tlb1_map(vcpu_e500, eaddr, gfn, gtlbe, &stlbe,
554 esel);
555 break;
556 }
557
558 default:
559 BUG();
560 break;
561 }
562}
563
564/************* MMU Notifiers *************/
565
566int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
567{
568 trace_kvm_unmap_hva(hva);
569
570 /*
571 * Flush all shadow tlb entries everywhere. This is slow, but
572 * we are 100% sure that we catch the to be unmapped page
573 */
574 kvm_flush_remote_tlbs(kvm);
575
576 return 0;
577}
578
579int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
580{
581 /* kvm_unmap_hva flushes everything anyways */
582 kvm_unmap_hva(kvm, start);
583
584 return 0;
585}
586
587int kvm_age_hva(struct kvm *kvm, unsigned long hva)
588{
589 /* XXX could be more clever ;) */
590 return 0;
591}
592
593int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
594{
595 /* XXX could be more clever ;) */
596 return 0;
597}
598
599void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
600{
601 /* The page will get remapped properly on its next fault */
602 kvm_unmap_hva(kvm, hva);
603}
604
605/*****************************************/
606
607int e500_mmu_host_init(struct kvmppc_vcpu_e500 *vcpu_e500)
608{
609 host_tlb_params[0].entries = mfspr(SPRN_TLB0CFG) & TLBnCFG_N_ENTRY;
610 host_tlb_params[1].entries = mfspr(SPRN_TLB1CFG) & TLBnCFG_N_ENTRY;
611
612 /*
613 * This should never happen on real e500 hardware, but is
614 * architecturally possible -- e.g. in some weird nested
615 * virtualization case.
616 */
617 if (host_tlb_params[0].entries == 0 ||
618 host_tlb_params[1].entries == 0) {
619 pr_err("%s: need to know host tlb size\n", __func__);
620 return -ENODEV;
621 }
622
623 host_tlb_params[0].ways = (mfspr(SPRN_TLB0CFG) & TLBnCFG_ASSOC) >>
624 TLBnCFG_ASSOC_SHIFT;
625 host_tlb_params[1].ways = host_tlb_params[1].entries;
626
627 if (!is_power_of_2(host_tlb_params[0].entries) ||
628 !is_power_of_2(host_tlb_params[0].ways) ||
629 host_tlb_params[0].entries < host_tlb_params[0].ways ||
630 host_tlb_params[0].ways == 0) {
631 pr_err("%s: bad tlb0 host config: %u entries %u ways\n",
632 __func__, host_tlb_params[0].entries,
633 host_tlb_params[0].ways);
634 return -ENODEV;
635 }
636
637 host_tlb_params[0].sets =
638 host_tlb_params[0].entries / host_tlb_params[0].ways;
639 host_tlb_params[1].sets = 1;
640
641 vcpu_e500->tlb_refs[0] =
642 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[0].entries,
643 GFP_KERNEL);
644 if (!vcpu_e500->tlb_refs[0])
645 goto err;
646
647 vcpu_e500->tlb_refs[1] =
648 kzalloc(sizeof(struct tlbe_ref) * host_tlb_params[1].entries,
649 GFP_KERNEL);
650 if (!vcpu_e500->tlb_refs[1])
651 goto err;
652
653 vcpu_e500->h2g_tlb1_rmap = kzalloc(sizeof(unsigned int) *
654 host_tlb_params[1].entries,
655 GFP_KERNEL);
656 if (!vcpu_e500->h2g_tlb1_rmap)
657 goto err;
658
659 return 0;
660
661err:
662 kfree(vcpu_e500->tlb_refs[0]);
663 kfree(vcpu_e500->tlb_refs[1]);
664 return -EINVAL;
665}
666
667void e500_mmu_host_uninit(struct kvmppc_vcpu_e500 *vcpu_e500)
668{
669 kfree(vcpu_e500->h2g_tlb1_rmap);
670 kfree(vcpu_e500->tlb_refs[0]);
671 kfree(vcpu_e500->tlb_refs[1]);
672}