blob: 1de4dbda37e7dcaa9a31b6abe17a9e388dd048ee [file] [log] [blame]
Xiantao Zhang964cd942008-04-01 14:50:59 +08001/*
2 * vtlb.c: guest virtual tlb handling module.
3 * Copyright (c) 2004, Intel Corporation.
4 * Yaozu Dong (Eddie Dong) <Eddie.dong@intel.com>
5 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
6 *
7 * Copyright (c) 2007, Intel Corporation.
8 * Xuefei Xu (Anthony Xu) <anthony.xu@intel.com>
9 * Xiantao Zhang <xiantao.zhang@intel.com>
10 *
11 * This program is free software; you can redistribute it and/or modify it
12 * under the terms and conditions of the GNU General Public License,
13 * version 2, as published by the Free Software Foundation.
14 *
15 * This program is distributed in the hope it will be useful, but WITHOUT
16 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
17 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
18 * more details.
19 *
20 * You should have received a copy of the GNU General Public License along with
21 * this program; if not, write to the Free Software Foundation, Inc., 59 Temple
22 * Place - Suite 330, Boston, MA 02111-1307 USA.
23 *
24 */
25
26#include "vcpu.h"
27
28#include <linux/rwsem.h>
29
30#include <asm/tlb.h>
31
32/*
33 * Check to see if the address rid:va is translated by the TLB
34 */
35
36static int __is_tr_translated(struct thash_data *trp, u64 rid, u64 va)
37{
38 return ((trp->p) && (trp->rid == rid)
39 && ((va-trp->vadr) < PSIZE(trp->ps)));
40}
41
42/*
43 * Only for GUEST TR format.
44 */
45static int __is_tr_overlap(struct thash_data *trp, u64 rid, u64 sva, u64 eva)
46{
47 u64 sa1, ea1;
48
49 if (!trp->p || trp->rid != rid)
50 return 0;
51
52 sa1 = trp->vadr;
53 ea1 = sa1 + PSIZE(trp->ps) - 1;
54 eva -= 1;
55 if ((sva > ea1) || (sa1 > eva))
56 return 0;
57 else
58 return 1;
59
60}
61
62void machine_tlb_purge(u64 va, u64 ps)
63{
64 ia64_ptcl(va, ps << 2);
65}
66
67void local_flush_tlb_all(void)
68{
69 int i, j;
70 unsigned long flags, count0, count1;
71 unsigned long stride0, stride1, addr;
72
73 addr = current_vcpu->arch.ptce_base;
74 count0 = current_vcpu->arch.ptce_count[0];
75 count1 = current_vcpu->arch.ptce_count[1];
76 stride0 = current_vcpu->arch.ptce_stride[0];
77 stride1 = current_vcpu->arch.ptce_stride[1];
78
79 local_irq_save(flags);
80 for (i = 0; i < count0; ++i) {
81 for (j = 0; j < count1; ++j) {
82 ia64_ptce(addr);
83 addr += stride1;
84 }
85 addr += stride0;
86 }
87 local_irq_restore(flags);
88 ia64_srlz_i(); /* srlz.i implies srlz.d */
89}
90
91int vhpt_enabled(struct kvm_vcpu *vcpu, u64 vadr, enum vhpt_ref ref)
92{
93 union ia64_rr vrr;
94 union ia64_pta vpta;
95 struct ia64_psr vpsr;
96
97 vpsr = *(struct ia64_psr *)&VCPU(vcpu, vpsr);
98 vrr.val = vcpu_get_rr(vcpu, vadr);
99 vpta.val = vcpu_get_pta(vcpu);
100
101 if (vrr.ve & vpta.ve) {
102 switch (ref) {
103 case DATA_REF:
104 case NA_REF:
105 return vpsr.dt;
106 case INST_REF:
107 return vpsr.dt && vpsr.it && vpsr.ic;
108 case RSE_REF:
109 return vpsr.dt && vpsr.rt;
110
111 }
112 }
113 return 0;
114}
115
116struct thash_data *vsa_thash(union ia64_pta vpta, u64 va, u64 vrr, u64 *tag)
117{
118 u64 index, pfn, rid, pfn_bits;
119
120 pfn_bits = vpta.size - 5 - 8;
121 pfn = REGION_OFFSET(va) >> _REGION_PAGE_SIZE(vrr);
122 rid = _REGION_ID(vrr);
123 index = ((rid & 0xff) << pfn_bits)|(pfn & ((1UL << pfn_bits) - 1));
124 *tag = ((rid >> 8) & 0xffff) | ((pfn >> pfn_bits) << 16);
125
126 return (struct thash_data *)((vpta.base << PTA_BASE_SHIFT) +
127 (index << 5));
128}
129
130struct thash_data *__vtr_lookup(struct kvm_vcpu *vcpu, u64 va, int type)
131{
132
133 struct thash_data *trp;
134 int i;
135 u64 rid;
136
137 rid = vcpu_get_rr(vcpu, va);
138 rid = rid & RR_RID_MASK;;
139 if (type == D_TLB) {
140 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
141 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
142 i < NDTRS; i++, trp++) {
143 if (__is_tr_translated(trp, rid, va))
144 return trp;
145 }
146 }
147 } else {
148 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
149 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
150 i < NITRS; i++, trp++) {
151 if (__is_tr_translated(trp, rid, va))
152 return trp;
153 }
154 }
155 }
156
157 return NULL;
158}
159
160static void vhpt_insert(u64 pte, u64 itir, u64 ifa, u64 gpte)
161{
162 union ia64_rr rr;
163 struct thash_data *head;
164 unsigned long ps, gpaddr;
165
166 ps = itir_ps(itir);
167
168 gpaddr = ((gpte & _PAGE_PPN_MASK) >> ps << ps) |
169 (ifa & ((1UL << ps) - 1));
170
171 rr.val = ia64_get_rr(ifa);
172 head = (struct thash_data *)ia64_thash(ifa);
173 head->etag = INVALID_TI_TAG;
174 ia64_mf();
175 head->page_flags = pte & ~PAGE_FLAGS_RV_MASK;
176 head->itir = rr.ps << 2;
177 head->etag = ia64_ttag(ifa);
178 head->gpaddr = gpaddr;
179}
180
181void mark_pages_dirty(struct kvm_vcpu *v, u64 pte, u64 ps)
182{
183 u64 i, dirty_pages = 1;
184 u64 base_gfn = (pte&_PAGE_PPN_MASK) >> PAGE_SHIFT;
185 spinlock_t *lock = __kvm_va(v->arch.dirty_log_lock_pa);
Xiantao Zhanga917f7a2008-10-23 14:56:44 +0800186 void *dirty_bitmap = (void *)KVM_MEM_DIRTY_LOG_BASE;
187
Xiantao Zhang964cd942008-04-01 14:50:59 +0800188 dirty_pages <<= ps <= PAGE_SHIFT ? 0 : ps - PAGE_SHIFT;
189
190 vmm_spin_lock(lock);
191 for (i = 0; i < dirty_pages; i++) {
192 /* avoid RMW */
193 if (!test_bit(base_gfn + i, dirty_bitmap))
194 set_bit(base_gfn + i , dirty_bitmap);
195 }
196 vmm_spin_unlock(lock);
197}
198
199void thash_vhpt_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va, int type)
200{
201 u64 phy_pte, psr;
202 union ia64_rr mrr;
203
204 mrr.val = ia64_get_rr(va);
205 phy_pte = translate_phy_pte(&pte, itir, va);
206
207 if (itir_ps(itir) >= mrr.ps) {
208 vhpt_insert(phy_pte, itir, va, pte);
209 } else {
210 phy_pte &= ~PAGE_FLAGS_RV_MASK;
211 psr = ia64_clear_ic();
212 ia64_itc(type, va, phy_pte, itir_ps(itir));
Isaku Yamahatadae17da2009-03-04 21:06:54 +0900213 paravirt_dv_serialize_data();
Xiantao Zhang964cd942008-04-01 14:50:59 +0800214 ia64_set_psr(psr);
215 }
216
217 if (!(pte&VTLB_PTE_IO))
218 mark_pages_dirty(v, pte, itir_ps(itir));
219}
220
221/*
222 * vhpt lookup
223 */
224struct thash_data *vhpt_lookup(u64 va)
225{
226 struct thash_data *head;
227 u64 tag;
228
229 head = (struct thash_data *)ia64_thash(va);
230 tag = ia64_ttag(va);
231 if (head->etag == tag)
232 return head;
233 return NULL;
234}
235
236u64 guest_vhpt_lookup(u64 iha, u64 *pte)
237{
238 u64 ret;
239 struct thash_data *data;
240
241 data = __vtr_lookup(current_vcpu, iha, D_TLB);
242 if (data != NULL)
243 thash_vhpt_insert(current_vcpu, data->page_flags,
244 data->itir, iha, D_TLB);
245
246 asm volatile ("rsm psr.ic|psr.i;;"
247 "srlz.d;;"
248 "ld8.s r9=[%1];;"
249 "tnat.nz p6,p7=r9;;"
250 "(p6) mov %0=1;"
251 "(p6) mov r9=r0;"
252 "(p7) extr.u r9=r9,0,53;;"
253 "(p7) mov %0=r0;"
254 "(p7) st8 [%2]=r9;;"
255 "ssm psr.ic;;"
256 "srlz.d;;"
257 /* "ssm psr.i;;" Once interrupts in vmm open, need fix*/
258 : "=r"(ret) : "r"(iha), "r"(pte):"memory");
259
260 return ret;
261}
262
263/*
264 * purge software guest tlb
265 */
266
267static void vtlb_purge(struct kvm_vcpu *v, u64 va, u64 ps)
268{
269 struct thash_data *cur;
270 u64 start, curadr, size, psbits, tag, rr_ps, num;
271 union ia64_rr vrr;
272 struct thash_cb *hcb = &v->arch.vtlb;
273
274 vrr.val = vcpu_get_rr(v, va);
275 psbits = VMX(v, psbits[(va >> 61)]);
276 start = va & ~((1UL << ps) - 1);
277 while (psbits) {
278 curadr = start;
279 rr_ps = __ffs(psbits);
280 psbits &= ~(1UL << rr_ps);
281 num = 1UL << ((ps < rr_ps) ? 0 : (ps - rr_ps));
282 size = PSIZE(rr_ps);
283 vrr.ps = rr_ps;
284 while (num) {
285 cur = vsa_thash(hcb->pta, curadr, vrr.val, &tag);
286 if (cur->etag == tag && cur->ps == rr_ps)
287 cur->etag = INVALID_TI_TAG;
288 curadr += size;
289 num--;
290 }
291 }
292}
293
294
295/*
296 * purge VHPT and machine TLB
297 */
298static void vhpt_purge(struct kvm_vcpu *v, u64 va, u64 ps)
299{
300 struct thash_data *cur;
301 u64 start, size, tag, num;
302 union ia64_rr rr;
303
304 start = va & ~((1UL << ps) - 1);
305 rr.val = ia64_get_rr(va);
306 size = PSIZE(rr.ps);
307 num = 1UL << ((ps < rr.ps) ? 0 : (ps - rr.ps));
308 while (num) {
309 cur = (struct thash_data *)ia64_thash(start);
310 tag = ia64_ttag(start);
311 if (cur->etag == tag)
312 cur->etag = INVALID_TI_TAG;
313 start += size;
314 num--;
315 }
316 machine_tlb_purge(va, ps);
317}
318
319/*
320 * Insert an entry into hash TLB or VHPT.
321 * NOTES:
322 * 1: When inserting VHPT to thash, "va" is a must covered
323 * address by the inserted machine VHPT entry.
324 * 2: The format of entry is always in TLB.
325 * 3: The caller need to make sure the new entry will not overlap
326 * with any existed entry.
327 */
328void vtlb_insert(struct kvm_vcpu *v, u64 pte, u64 itir, u64 va)
329{
330 struct thash_data *head;
331 union ia64_rr vrr;
332 u64 tag;
333 struct thash_cb *hcb = &v->arch.vtlb;
334
335 vrr.val = vcpu_get_rr(v, va);
336 vrr.ps = itir_ps(itir);
337 VMX(v, psbits[va >> 61]) |= (1UL << vrr.ps);
338 head = vsa_thash(hcb->pta, va, vrr.val, &tag);
339 head->page_flags = pte;
340 head->itir = itir;
341 head->etag = tag;
342}
343
344int vtr_find_overlap(struct kvm_vcpu *vcpu, u64 va, u64 ps, int type)
345{
346 struct thash_data *trp;
347 int i;
348 u64 end, rid;
349
350 rid = vcpu_get_rr(vcpu, va);
351 rid = rid & RR_RID_MASK;
352 end = va + PSIZE(ps);
353 if (type == D_TLB) {
354 if (vcpu_quick_region_check(vcpu->arch.dtr_regions, va)) {
355 for (trp = (struct thash_data *)&vcpu->arch.dtrs, i = 0;
356 i < NDTRS; i++, trp++) {
357 if (__is_tr_overlap(trp, rid, va, end))
358 return i;
359 }
360 }
361 } else {
362 if (vcpu_quick_region_check(vcpu->arch.itr_regions, va)) {
363 for (trp = (struct thash_data *)&vcpu->arch.itrs, i = 0;
364 i < NITRS; i++, trp++) {
365 if (__is_tr_overlap(trp, rid, va, end))
366 return i;
367 }
368 }
369 }
370 return -1;
371}
372
373/*
374 * Purge entries in VTLB and VHPT
375 */
376void thash_purge_entries(struct kvm_vcpu *v, u64 va, u64 ps)
377{
378 if (vcpu_quick_region_check(v->arch.tc_regions, va))
379 vtlb_purge(v, va, ps);
380 vhpt_purge(v, va, ps);
381}
382
383void thash_purge_entries_remote(struct kvm_vcpu *v, u64 va, u64 ps)
384{
385 u64 old_va = va;
386 va = REGION_OFFSET(va);
387 if (vcpu_quick_region_check(v->arch.tc_regions, old_va))
388 vtlb_purge(v, va, ps);
389 vhpt_purge(v, va, ps);
390}
391
392u64 translate_phy_pte(u64 *pte, u64 itir, u64 va)
393{
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700394 u64 ps, ps_mask, paddr, maddr, io_mask;
Xiantao Zhang964cd942008-04-01 14:50:59 +0800395 union pte_flags phy_pte;
396
397 ps = itir_ps(itir);
398 ps_mask = ~((1UL << ps) - 1);
399 phy_pte.val = *pte;
400 paddr = *pte;
401 paddr = ((paddr & _PAGE_PPN_MASK) & ps_mask) | (va & ~ps_mask);
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700402 maddr = kvm_get_mpt_entry(paddr >> PAGE_SHIFT);
403 io_mask = maddr & GPFN_IO_MASK;
404 if (io_mask && (io_mask != GPFN_PHYS_MMIO)) {
Xiantao Zhang964cd942008-04-01 14:50:59 +0800405 *pte |= VTLB_PTE_IO;
406 return -1;
407 }
408 maddr = ((maddr & _PAGE_PPN_MASK) & PAGE_MASK) |
409 (paddr & ~PAGE_MASK);
410 phy_pte.ppn = maddr >> ARCH_PAGE_SHIFT;
411 return phy_pte.val;
412}
413
414/*
415 * Purge overlap TCs and then insert the new entry to emulate itc ops.
416 * Notes: Only TC entry can purge and insert.
417 * 1 indicates this is MMIO
418 */
419int thash_purge_and_insert(struct kvm_vcpu *v, u64 pte, u64 itir,
420 u64 ifa, int type)
421{
422 u64 ps;
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700423 u64 phy_pte, io_mask, index;
Xiantao Zhang964cd942008-04-01 14:50:59 +0800424 union ia64_rr vrr, mrr;
425 int ret = 0;
426
427 ps = itir_ps(itir);
428 vrr.val = vcpu_get_rr(v, ifa);
429 mrr.val = ia64_get_rr(ifa);
430
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700431 index = (pte & _PAGE_PPN_MASK) >> PAGE_SHIFT;
432 io_mask = kvm_get_mpt_entry(index) & GPFN_IO_MASK;
Xiantao Zhang964cd942008-04-01 14:50:59 +0800433 phy_pte = translate_phy_pte(&pte, itir, ifa);
434
435 /* Ensure WB attribute if pte is related to a normal mem page,
436 * which is required by vga acceleration since qemu maps shared
437 * vram buffer with WB.
438 */
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700439 if (!(pte & VTLB_PTE_IO) && ((pte & _PAGE_MA_MASK) != _PAGE_MA_NAT) &&
440 io_mask != GPFN_PHYS_MMIO) {
Xiantao Zhang964cd942008-04-01 14:50:59 +0800441 pte &= ~_PAGE_MA_MASK;
442 phy_pte &= ~_PAGE_MA_MASK;
443 }
444
445 if (pte & VTLB_PTE_IO)
446 ret = 1;
447
448 vtlb_purge(v, ifa, ps);
449 vhpt_purge(v, ifa, ps);
450
451 if (ps == mrr.ps) {
452 if (!(pte&VTLB_PTE_IO)) {
453 vhpt_insert(phy_pte, itir, ifa, pte);
454 } else {
455 vtlb_insert(v, pte, itir, ifa);
456 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
457 }
458 } else if (ps > mrr.ps) {
459 vtlb_insert(v, pte, itir, ifa);
460 vcpu_quick_region_set(VMX(v, tc_regions), ifa);
461 if (!(pte&VTLB_PTE_IO))
462 vhpt_insert(phy_pte, itir, ifa, pte);
463 } else {
464 u64 psr;
465 phy_pte &= ~PAGE_FLAGS_RV_MASK;
466 psr = ia64_clear_ic();
467 ia64_itc(type, ifa, phy_pte, ps);
Isaku Yamahatadae17da2009-03-04 21:06:54 +0900468 paravirt_dv_serialize_data();
Xiantao Zhang964cd942008-04-01 14:50:59 +0800469 ia64_set_psr(psr);
470 }
471 if (!(pte&VTLB_PTE_IO))
472 mark_pages_dirty(v, pte, ps);
473
474 return ret;
475}
476
477/*
478 * Purge all TCs or VHPT entries including those in Hash table.
479 *
480 */
481
482void thash_purge_all(struct kvm_vcpu *v)
483{
484 int i;
485 struct thash_data *head;
486 struct thash_cb *vtlb, *vhpt;
487 vtlb = &v->arch.vtlb;
488 vhpt = &v->arch.vhpt;
489
490 for (i = 0; i < 8; i++)
491 VMX(v, psbits[i]) = 0;
492
493 head = vtlb->hash;
494 for (i = 0; i < vtlb->num; i++) {
495 head->page_flags = 0;
496 head->etag = INVALID_TI_TAG;
497 head->itir = 0;
498 head->next = 0;
499 head++;
500 };
501
502 head = vhpt->hash;
503 for (i = 0; i < vhpt->num; i++) {
504 head->page_flags = 0;
505 head->etag = INVALID_TI_TAG;
506 head->itir = 0;
507 head->next = 0;
508 head++;
509 };
510
511 local_flush_tlb_all();
512}
513
514
515/*
516 * Lookup the hash table and its collision chain to find an entry
517 * covering this address rid:va or the entry.
518 *
519 * INPUT:
520 * in: TLB format for both VHPT & TLB.
521 */
522
523struct thash_data *vtlb_lookup(struct kvm_vcpu *v, u64 va, int is_data)
524{
525 struct thash_data *cch;
526 u64 psbits, ps, tag;
527 union ia64_rr vrr;
528
529 struct thash_cb *hcb = &v->arch.vtlb;
530
531 cch = __vtr_lookup(v, va, is_data);;
532 if (cch)
533 return cch;
534
535 if (vcpu_quick_region_check(v->arch.tc_regions, va) == 0)
536 return NULL;
537
538 psbits = VMX(v, psbits[(va >> 61)]);
539 vrr.val = vcpu_get_rr(v, va);
540 while (psbits) {
541 ps = __ffs(psbits);
542 psbits &= ~(1UL << ps);
543 vrr.ps = ps;
544 cch = vsa_thash(hcb->pta, va, vrr.val, &tag);
545 if (cch->etag == tag && cch->ps == ps)
546 return cch;
547 }
548
549 return NULL;
550}
551
552
553/*
554 * Initialize internal control data before service.
555 */
556void thash_init(struct thash_cb *hcb, u64 sz)
557{
558 int i;
559 struct thash_data *head;
560
561 hcb->pta.val = (unsigned long)hcb->hash;
562 hcb->pta.vf = 1;
563 hcb->pta.ve = 1;
564 hcb->pta.size = sz;
565 head = hcb->hash;
566 for (i = 0; i < hcb->num; i++) {
567 head->page_flags = 0;
568 head->itir = 0;
569 head->etag = INVALID_TI_TAG;
570 head->next = 0;
571 head++;
572 }
573}
574
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700575u64 kvm_get_mpt_entry(u64 gpfn)
Xiantao Zhang964cd942008-04-01 14:50:59 +0800576{
577 u64 *base = (u64 *) KVM_P2M_BASE;
578 return *(base + gpfn);
579}
580
Xiantao Zhangb010eb52008-09-28 01:39:46 -0700581u64 kvm_lookup_mpa(u64 gpfn)
582{
583 u64 maddr;
584 maddr = kvm_get_mpt_entry(gpfn);
585 return maddr&_PAGE_PPN_MASK;
586}
587
Xiantao Zhang964cd942008-04-01 14:50:59 +0800588u64 kvm_gpa_to_mpa(u64 gpa)
589{
590 u64 pte = kvm_lookup_mpa(gpa >> PAGE_SHIFT);
591 return (pte >> PAGE_SHIFT << PAGE_SHIFT) | (gpa & ~PAGE_MASK);
592}
593
594
595/*
596 * Fetch guest bundle code.
597 * INPUT:
598 * gip: guest ip
599 * pbundle: used to return fetched bundle.
600 */
601int fetch_code(struct kvm_vcpu *vcpu, u64 gip, IA64_BUNDLE *pbundle)
602{
603 u64 gpip = 0; /* guest physical IP*/
604 u64 *vpa;
605 struct thash_data *tlb;
606 u64 maddr;
607
608 if (!(VCPU(vcpu, vpsr) & IA64_PSR_IT)) {
609 /* I-side physical mode */
610 gpip = gip;
611 } else {
612 tlb = vtlb_lookup(vcpu, gip, I_TLB);
613 if (tlb)
614 gpip = (tlb->ppn >> (tlb->ps - 12) << tlb->ps) |
615 (gip & (PSIZE(tlb->ps) - 1));
616 }
617 if (gpip) {
618 maddr = kvm_gpa_to_mpa(gpip);
619 } else {
620 tlb = vhpt_lookup(gip);
621 if (tlb == NULL) {
622 ia64_ptcl(gip, ARCH_PAGE_SHIFT << 2);
623 return IA64_FAULT;
624 }
625 maddr = (tlb->ppn >> (tlb->ps - 12) << tlb->ps)
626 | (gip & (PSIZE(tlb->ps) - 1));
627 }
628 vpa = (u64 *)__kvm_va(maddr);
629
630 pbundle->i64[0] = *vpa++;
631 pbundle->i64[1] = *vpa;
632
633 return IA64_NO_FAULT;
634}
635
636
637void kvm_init_vhpt(struct kvm_vcpu *v)
638{
639 v->arch.vhpt.num = VHPT_NUM_ENTRIES;
640 thash_init(&v->arch.vhpt, VHPT_SHIFT);
641 ia64_set_pta(v->arch.vhpt.pta.val);
642 /*Enable VHPT here?*/
643}
644
645void kvm_init_vtlb(struct kvm_vcpu *v)
646{
647 v->arch.vtlb.num = VTLB_NUM_ENTRIES;
648 thash_init(&v->arch.vtlb, VTLB_SHIFT);
649}