James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 1 | /* |
| 2 | * This file is subject to the terms and conditions of the GNU General Public |
| 3 | * License. See the file "COPYING" in the main directory of this archive |
| 4 | * for more details. |
| 5 | * |
| 6 | * KVM/MIPS MMU handling in the KVM module. |
| 7 | * |
| 8 | * Copyright (C) 2012 MIPS Technologies, Inc. All rights reserved. |
| 9 | * Authors: Sanjay Lal <sanjayl@kymasys.com> |
| 10 | */ |
| 11 | |
James Hogan | 28cc5bd | 2016-07-08 11:53:22 +0100 | [diff] [blame] | 12 | #include <linux/highmem.h> |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 13 | #include <linux/kvm_host.h> |
| 14 | #include <asm/mmu_context.h> |
James Hogan | a31b50d | 2016-12-16 15:57:00 +0000 | [diff] [blame] | 15 | #include <asm/pgalloc.h> |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 16 | |
James Hogan | aba85929 | 2016-12-16 15:57:00 +0000 | [diff] [blame^] | 17 | static void mmu_free_memory_cache(struct kvm_mmu_memory_cache *mc) |
| 18 | { |
| 19 | while (mc->nobjs) |
| 20 | free_page((unsigned long)mc->objects[--mc->nobjs]); |
| 21 | } |
| 22 | |
| 23 | static void *mmu_memory_cache_alloc(struct kvm_mmu_memory_cache *mc) |
| 24 | { |
| 25 | void *p; |
| 26 | |
| 27 | BUG_ON(!mc || !mc->nobjs); |
| 28 | p = mc->objects[--mc->nobjs]; |
| 29 | return p; |
| 30 | } |
| 31 | |
| 32 | void kvm_mmu_free_memory_caches(struct kvm_vcpu *vcpu) |
| 33 | { |
| 34 | mmu_free_memory_cache(&vcpu->arch.mmu_page_cache); |
| 35 | } |
| 36 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 37 | static u32 kvm_mips_get_kernel_asid(struct kvm_vcpu *vcpu) |
| 38 | { |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 39 | struct mm_struct *kern_mm = &vcpu->arch.guest_kernel_mm; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 40 | int cpu = smp_processor_id(); |
| 41 | |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 42 | return cpu_asid(cpu, kern_mm); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 43 | } |
| 44 | |
| 45 | static u32 kvm_mips_get_user_asid(struct kvm_vcpu *vcpu) |
| 46 | { |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 47 | struct mm_struct *user_mm = &vcpu->arch.guest_user_mm; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 48 | int cpu = smp_processor_id(); |
| 49 | |
James Hogan | c550d53 | 2016-10-11 23:14:39 +0100 | [diff] [blame] | 50 | return cpu_asid(cpu, user_mm); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 51 | } |
| 52 | |
James Hogan | aba85929 | 2016-12-16 15:57:00 +0000 | [diff] [blame^] | 53 | /** |
| 54 | * kvm_mips_walk_pgd() - Walk page table with optional allocation. |
| 55 | * @pgd: Page directory pointer. |
| 56 | * @addr: Address to index page table using. |
| 57 | * @cache: MMU page cache to allocate new page tables from, or NULL. |
| 58 | * |
| 59 | * Walk the page tables pointed to by @pgd to find the PTE corresponding to the |
| 60 | * address @addr. If page tables don't exist for @addr, they will be created |
| 61 | * from the MMU cache if @cache is not NULL. |
| 62 | * |
| 63 | * Returns: Pointer to pte_t corresponding to @addr. |
| 64 | * NULL if a page table doesn't exist for @addr and !@cache. |
| 65 | * NULL if a page table allocation failed. |
| 66 | */ |
| 67 | static pte_t *kvm_mips_walk_pgd(pgd_t *pgd, struct kvm_mmu_memory_cache *cache, |
| 68 | unsigned long addr) |
| 69 | { |
| 70 | pud_t *pud; |
| 71 | pmd_t *pmd; |
| 72 | |
| 73 | pgd += pgd_index(addr); |
| 74 | if (pgd_none(*pgd)) { |
| 75 | /* Not used on MIPS yet */ |
| 76 | BUG(); |
| 77 | return NULL; |
| 78 | } |
| 79 | pud = pud_offset(pgd, addr); |
| 80 | if (pud_none(*pud)) { |
| 81 | pmd_t *new_pmd; |
| 82 | |
| 83 | if (!cache) |
| 84 | return NULL; |
| 85 | new_pmd = mmu_memory_cache_alloc(cache); |
| 86 | pmd_init((unsigned long)new_pmd, |
| 87 | (unsigned long)invalid_pte_table); |
| 88 | pud_populate(NULL, pud, new_pmd); |
| 89 | } |
| 90 | pmd = pmd_offset(pud, addr); |
| 91 | if (pmd_none(*pmd)) { |
| 92 | pte_t *new_pte; |
| 93 | |
| 94 | if (!cache) |
| 95 | return NULL; |
| 96 | new_pte = mmu_memory_cache_alloc(cache); |
| 97 | clear_page(new_pte); |
| 98 | pmd_populate_kernel(NULL, pmd, new_pte); |
| 99 | } |
| 100 | return pte_offset(pmd, addr); |
| 101 | } |
| 102 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 103 | static int kvm_mips_map_page(struct kvm *kvm, gfn_t gfn) |
| 104 | { |
| 105 | int srcu_idx, err = 0; |
| 106 | kvm_pfn_t pfn; |
| 107 | |
| 108 | if (kvm->arch.guest_pmap[gfn] != KVM_INVALID_PAGE) |
| 109 | return 0; |
| 110 | |
| 111 | srcu_idx = srcu_read_lock(&kvm->srcu); |
James Hogan | 9befad2 | 2016-06-09 14:19:11 +0100 | [diff] [blame] | 112 | pfn = gfn_to_pfn(kvm, gfn); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 113 | |
James Hogan | ba913e4 | 2016-08-19 14:30:29 +0100 | [diff] [blame] | 114 | if (is_error_noslot_pfn(pfn)) { |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 115 | kvm_err("Couldn't get pfn for gfn %#llx!\n", gfn); |
| 116 | err = -EFAULT; |
| 117 | goto out; |
| 118 | } |
| 119 | |
| 120 | kvm->arch.guest_pmap[gfn] = pfn; |
| 121 | out: |
| 122 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 123 | return err; |
| 124 | } |
| 125 | |
| 126 | /* Translate guest KSEG0 addresses to Host PA */ |
| 127 | unsigned long kvm_mips_translate_guest_kseg0_to_hpa(struct kvm_vcpu *vcpu, |
| 128 | unsigned long gva) |
| 129 | { |
| 130 | gfn_t gfn; |
| 131 | unsigned long offset = gva & ~PAGE_MASK; |
| 132 | struct kvm *kvm = vcpu->kvm; |
| 133 | |
| 134 | if (KVM_GUEST_KSEGX(gva) != KVM_GUEST_KSEG0) { |
| 135 | kvm_err("%s/%p: Invalid gva: %#lx\n", __func__, |
| 136 | __builtin_return_address(0), gva); |
| 137 | return KVM_INVALID_PAGE; |
| 138 | } |
| 139 | |
| 140 | gfn = (KVM_GUEST_CPHYSADDR(gva) >> PAGE_SHIFT); |
| 141 | |
| 142 | if (gfn >= kvm->arch.guest_pmap_npages) { |
| 143 | kvm_err("%s: Invalid gfn: %#llx, GVA: %#lx\n", __func__, gfn, |
| 144 | gva); |
| 145 | return KVM_INVALID_PAGE; |
| 146 | } |
| 147 | |
| 148 | if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) |
| 149 | return KVM_INVALID_ADDR; |
| 150 | |
| 151 | return (kvm->arch.guest_pmap[gfn] << PAGE_SHIFT) + offset; |
| 152 | } |
| 153 | |
James Hogan | aba85929 | 2016-12-16 15:57:00 +0000 | [diff] [blame^] | 154 | void kvm_trap_emul_invalidate_gva(struct kvm_vcpu *vcpu, unsigned long addr, |
| 155 | bool user) |
| 156 | { |
| 157 | pgd_t *pgdp; |
| 158 | pte_t *ptep; |
| 159 | |
| 160 | addr &= PAGE_MASK << 1; |
| 161 | |
| 162 | pgdp = vcpu->arch.guest_kernel_mm.pgd; |
| 163 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); |
| 164 | if (ptep) { |
| 165 | ptep[0] = pfn_pte(0, __pgprot(0)); |
| 166 | ptep[1] = pfn_pte(0, __pgprot(0)); |
| 167 | } |
| 168 | |
| 169 | if (user) { |
| 170 | pgdp = vcpu->arch.guest_user_mm.pgd; |
| 171 | ptep = kvm_mips_walk_pgd(pgdp, NULL, addr); |
| 172 | if (ptep) { |
| 173 | ptep[0] = pfn_pte(0, __pgprot(0)); |
| 174 | ptep[1] = pfn_pte(0, __pgprot(0)); |
| 175 | } |
| 176 | } |
| 177 | } |
| 178 | |
James Hogan | a31b50d | 2016-12-16 15:57:00 +0000 | [diff] [blame] | 179 | /* |
| 180 | * kvm_mips_flush_gva_{pte,pmd,pud,pgd,pt}. |
| 181 | * Flush a range of guest physical address space from the VM's GPA page tables. |
| 182 | */ |
| 183 | |
| 184 | static bool kvm_mips_flush_gva_pte(pte_t *pte, unsigned long start_gva, |
| 185 | unsigned long end_gva) |
| 186 | { |
| 187 | int i_min = __pte_offset(start_gva); |
| 188 | int i_max = __pte_offset(end_gva); |
| 189 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PTE - 1); |
| 190 | int i; |
| 191 | |
| 192 | /* |
| 193 | * There's no freeing to do, so there's no point clearing individual |
| 194 | * entries unless only part of the last level page table needs flushing. |
| 195 | */ |
| 196 | if (safe_to_remove) |
| 197 | return true; |
| 198 | |
| 199 | for (i = i_min; i <= i_max; ++i) { |
| 200 | if (!pte_present(pte[i])) |
| 201 | continue; |
| 202 | |
| 203 | set_pte(pte + i, __pte(0)); |
| 204 | } |
| 205 | return false; |
| 206 | } |
| 207 | |
| 208 | static bool kvm_mips_flush_gva_pmd(pmd_t *pmd, unsigned long start_gva, |
| 209 | unsigned long end_gva) |
| 210 | { |
| 211 | pte_t *pte; |
| 212 | unsigned long end = ~0ul; |
| 213 | int i_min = __pmd_offset(start_gva); |
| 214 | int i_max = __pmd_offset(end_gva); |
| 215 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PMD - 1); |
| 216 | int i; |
| 217 | |
| 218 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { |
| 219 | if (!pmd_present(pmd[i])) |
| 220 | continue; |
| 221 | |
| 222 | pte = pte_offset(pmd + i, 0); |
| 223 | if (i == i_max) |
| 224 | end = end_gva; |
| 225 | |
| 226 | if (kvm_mips_flush_gva_pte(pte, start_gva, end)) { |
| 227 | pmd_clear(pmd + i); |
| 228 | pte_free_kernel(NULL, pte); |
| 229 | } else { |
| 230 | safe_to_remove = false; |
| 231 | } |
| 232 | } |
| 233 | return safe_to_remove; |
| 234 | } |
| 235 | |
| 236 | static bool kvm_mips_flush_gva_pud(pud_t *pud, unsigned long start_gva, |
| 237 | unsigned long end_gva) |
| 238 | { |
| 239 | pmd_t *pmd; |
| 240 | unsigned long end = ~0ul; |
| 241 | int i_min = __pud_offset(start_gva); |
| 242 | int i_max = __pud_offset(end_gva); |
| 243 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PUD - 1); |
| 244 | int i; |
| 245 | |
| 246 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { |
| 247 | if (!pud_present(pud[i])) |
| 248 | continue; |
| 249 | |
| 250 | pmd = pmd_offset(pud + i, 0); |
| 251 | if (i == i_max) |
| 252 | end = end_gva; |
| 253 | |
| 254 | if (kvm_mips_flush_gva_pmd(pmd, start_gva, end)) { |
| 255 | pud_clear(pud + i); |
| 256 | pmd_free(NULL, pmd); |
| 257 | } else { |
| 258 | safe_to_remove = false; |
| 259 | } |
| 260 | } |
| 261 | return safe_to_remove; |
| 262 | } |
| 263 | |
| 264 | static bool kvm_mips_flush_gva_pgd(pgd_t *pgd, unsigned long start_gva, |
| 265 | unsigned long end_gva) |
| 266 | { |
| 267 | pud_t *pud; |
| 268 | unsigned long end = ~0ul; |
| 269 | int i_min = pgd_index(start_gva); |
| 270 | int i_max = pgd_index(end_gva); |
| 271 | bool safe_to_remove = (i_min == 0 && i_max == PTRS_PER_PGD - 1); |
| 272 | int i; |
| 273 | |
| 274 | for (i = i_min; i <= i_max; ++i, start_gva = 0) { |
| 275 | if (!pgd_present(pgd[i])) |
| 276 | continue; |
| 277 | |
| 278 | pud = pud_offset(pgd + i, 0); |
| 279 | if (i == i_max) |
| 280 | end = end_gva; |
| 281 | |
| 282 | if (kvm_mips_flush_gva_pud(pud, start_gva, end)) { |
| 283 | pgd_clear(pgd + i); |
| 284 | pud_free(NULL, pud); |
| 285 | } else { |
| 286 | safe_to_remove = false; |
| 287 | } |
| 288 | } |
| 289 | return safe_to_remove; |
| 290 | } |
| 291 | |
| 292 | void kvm_mips_flush_gva_pt(pgd_t *pgd, enum kvm_mips_flush flags) |
| 293 | { |
| 294 | if (flags & KMF_GPA) { |
| 295 | /* all of guest virtual address space could be affected */ |
| 296 | if (flags & KMF_KERN) |
| 297 | /* useg, kseg0, seg2/3 */ |
| 298 | kvm_mips_flush_gva_pgd(pgd, 0, 0x7fffffff); |
| 299 | else |
| 300 | /* useg */ |
| 301 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); |
| 302 | } else { |
| 303 | /* useg */ |
| 304 | kvm_mips_flush_gva_pgd(pgd, 0, 0x3fffffff); |
| 305 | |
| 306 | /* kseg2/3 */ |
| 307 | if (flags & KMF_KERN) |
| 308 | kvm_mips_flush_gva_pgd(pgd, 0x60000000, 0x7fffffff); |
| 309 | } |
| 310 | } |
| 311 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 312 | /* XXXKYMA: Must be called with interrupts disabled */ |
| 313 | int kvm_mips_handle_kseg0_tlb_fault(unsigned long badvaddr, |
| 314 | struct kvm_vcpu *vcpu) |
| 315 | { |
| 316 | gfn_t gfn; |
| 317 | kvm_pfn_t pfn0, pfn1; |
| 318 | unsigned long vaddr = 0; |
| 319 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 320 | struct kvm *kvm = vcpu->kvm; |
| 321 | const int flush_dcache_mask = 0; |
| 322 | int ret; |
| 323 | |
| 324 | if (KVM_GUEST_KSEGX(badvaddr) != KVM_GUEST_KSEG0) { |
| 325 | kvm_err("%s: Invalid BadVaddr: %#lx\n", __func__, badvaddr); |
| 326 | kvm_mips_dump_host_tlbs(); |
| 327 | return -1; |
| 328 | } |
| 329 | |
| 330 | gfn = (KVM_GUEST_CPHYSADDR(badvaddr) >> PAGE_SHIFT); |
James Hogan | 0741f52 | 2016-08-11 11:58:14 +0100 | [diff] [blame] | 331 | if ((gfn | 1) >= kvm->arch.guest_pmap_npages) { |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 332 | kvm_err("%s: Invalid gfn: %#llx, BadVaddr: %#lx\n", __func__, |
| 333 | gfn, badvaddr); |
| 334 | kvm_mips_dump_host_tlbs(); |
| 335 | return -1; |
| 336 | } |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 337 | vaddr = badvaddr & (PAGE_MASK << 1); |
| 338 | |
| 339 | if (kvm_mips_map_page(vcpu->kvm, gfn) < 0) |
| 340 | return -1; |
| 341 | |
| 342 | if (kvm_mips_map_page(vcpu->kvm, gfn ^ 0x1) < 0) |
| 343 | return -1; |
| 344 | |
James Hogan | 021df20 | 2016-06-09 14:19:12 +0100 | [diff] [blame] | 345 | pfn0 = kvm->arch.guest_pmap[gfn & ~0x1]; |
| 346 | pfn1 = kvm->arch.guest_pmap[gfn | 0x1]; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 347 | |
James Hogan | e6207bb | 2016-06-09 14:19:19 +0100 | [diff] [blame] | 348 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | |
James Hogan | 7414d2f | 2016-06-15 19:29:58 +0100 | [diff] [blame] | 349 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
| 350 | ENTRYLO_D | ENTRYLO_V; |
James Hogan | e6207bb | 2016-06-09 14:19:19 +0100 | [diff] [blame] | 351 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | |
James Hogan | 7414d2f | 2016-06-15 19:29:58 +0100 | [diff] [blame] | 352 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
| 353 | ENTRYLO_D | ENTRYLO_V; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 354 | |
| 355 | preempt_disable(); |
| 356 | entryhi = (vaddr | kvm_mips_get_kernel_asid(vcpu)); |
| 357 | ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
| 358 | flush_dcache_mask); |
| 359 | preempt_enable(); |
| 360 | |
| 361 | return ret; |
| 362 | } |
| 363 | |
| 364 | int kvm_mips_handle_mapped_seg_tlb_fault(struct kvm_vcpu *vcpu, |
James Hogan | 26ee17f | 2016-06-09 14:19:13 +0100 | [diff] [blame] | 365 | struct kvm_mips_tlb *tlb) |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 366 | { |
| 367 | unsigned long entryhi = 0, entrylo0 = 0, entrylo1 = 0; |
| 368 | struct kvm *kvm = vcpu->kvm; |
| 369 | kvm_pfn_t pfn0, pfn1; |
James Hogan | 8985d50 | 2016-08-11 11:58:13 +0100 | [diff] [blame] | 370 | gfn_t gfn0, gfn1; |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 371 | long tlb_lo[2]; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 372 | int ret; |
| 373 | |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 374 | tlb_lo[0] = tlb->tlb_lo[0]; |
| 375 | tlb_lo[1] = tlb->tlb_lo[1]; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 376 | |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 377 | /* |
| 378 | * The commpage address must not be mapped to anything else if the guest |
| 379 | * TLB contains entries nearby, or commpage accesses will break. |
| 380 | */ |
| 381 | if (!((tlb->tlb_hi ^ KVM_GUEST_COMMPAGE_ADDR) & |
| 382 | VPN2_MASK & (PAGE_MASK << 1))) |
| 383 | tlb_lo[(KVM_GUEST_COMMPAGE_ADDR >> PAGE_SHIFT) & 1] = 0; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 384 | |
James Hogan | 8985d50 | 2016-08-11 11:58:13 +0100 | [diff] [blame] | 385 | gfn0 = mips3_tlbpfn_to_paddr(tlb_lo[0]) >> PAGE_SHIFT; |
| 386 | gfn1 = mips3_tlbpfn_to_paddr(tlb_lo[1]) >> PAGE_SHIFT; |
| 387 | if (gfn0 >= kvm->arch.guest_pmap_npages || |
| 388 | gfn1 >= kvm->arch.guest_pmap_npages) { |
| 389 | kvm_err("%s: Invalid gfn: [%#llx, %#llx], EHi: %#lx\n", |
| 390 | __func__, gfn0, gfn1, tlb->tlb_hi); |
| 391 | kvm_mips_dump_guest_tlbs(vcpu); |
| 392 | return -1; |
| 393 | } |
| 394 | |
| 395 | if (kvm_mips_map_page(kvm, gfn0) < 0) |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 396 | return -1; |
| 397 | |
James Hogan | 8985d50 | 2016-08-11 11:58:13 +0100 | [diff] [blame] | 398 | if (kvm_mips_map_page(kvm, gfn1) < 0) |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 399 | return -1; |
| 400 | |
James Hogan | 8985d50 | 2016-08-11 11:58:13 +0100 | [diff] [blame] | 401 | pfn0 = kvm->arch.guest_pmap[gfn0]; |
| 402 | pfn1 = kvm->arch.guest_pmap[gfn1]; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 403 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 404 | /* Get attributes from the Guest TLB */ |
James Hogan | e6207bb | 2016-06-09 14:19:19 +0100 | [diff] [blame] | 405 | entrylo0 = mips3_paddr_to_tlbpfn(pfn0 << PAGE_SHIFT) | |
James Hogan | 7414d2f | 2016-06-15 19:29:58 +0100 | [diff] [blame] | 406 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 407 | (tlb_lo[0] & ENTRYLO_D) | |
| 408 | (tlb_lo[0] & ENTRYLO_V); |
James Hogan | e6207bb | 2016-06-09 14:19:19 +0100 | [diff] [blame] | 409 | entrylo1 = mips3_paddr_to_tlbpfn(pfn1 << PAGE_SHIFT) | |
James Hogan | 7414d2f | 2016-06-15 19:29:58 +0100 | [diff] [blame] | 410 | ((_page_cachable_default >> _CACHE_SHIFT) << ENTRYLO_C_SHIFT) | |
James Hogan | c604cff | 2016-08-11 11:58:12 +0100 | [diff] [blame] | 411 | (tlb_lo[1] & ENTRYLO_D) | |
| 412 | (tlb_lo[1] & ENTRYLO_V); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 413 | |
| 414 | kvm_debug("@ %#lx tlb_lo0: 0x%08lx tlb_lo1: 0x%08lx\n", vcpu->arch.pc, |
James Hogan | 9fbfb06 | 2016-06-09 14:19:17 +0100 | [diff] [blame] | 415 | tlb->tlb_lo[0], tlb->tlb_lo[1]); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 416 | |
| 417 | preempt_disable(); |
| 418 | entryhi = (tlb->tlb_hi & VPN2_MASK) | (KVM_GUEST_KERNEL_MODE(vcpu) ? |
| 419 | kvm_mips_get_kernel_asid(vcpu) : |
| 420 | kvm_mips_get_user_asid(vcpu)); |
| 421 | ret = kvm_mips_host_tlb_write(vcpu, entryhi, entrylo0, entrylo1, |
| 422 | tlb->tlb_mask); |
| 423 | preempt_enable(); |
| 424 | |
| 425 | return ret; |
| 426 | } |
| 427 | |
| 428 | void kvm_get_new_mmu_context(struct mm_struct *mm, unsigned long cpu, |
| 429 | struct kvm_vcpu *vcpu) |
| 430 | { |
| 431 | unsigned long asid = asid_cache(cpu); |
| 432 | |
| 433 | asid += cpu_asid_inc(); |
| 434 | if (!(asid & cpu_asid_mask(&cpu_data[cpu]))) { |
| 435 | if (cpu_has_vtag_icache) |
| 436 | flush_icache_all(); |
| 437 | |
| 438 | kvm_local_flush_tlb_all(); /* start new asid cycle */ |
| 439 | |
| 440 | if (!asid) /* fix version if needed */ |
| 441 | asid = asid_first_version(cpu); |
| 442 | } |
| 443 | |
| 444 | cpu_context(cpu, mm) = asid_cache(cpu) = asid; |
| 445 | } |
| 446 | |
| 447 | /** |
| 448 | * kvm_mips_migrate_count() - Migrate timer. |
| 449 | * @vcpu: Virtual CPU. |
| 450 | * |
| 451 | * Migrate CP0_Count hrtimer to the current CPU by cancelling and restarting it |
| 452 | * if it was running prior to being cancelled. |
| 453 | * |
| 454 | * Must be called when the VCPU is migrated to a different CPU to ensure that |
| 455 | * timer expiry during guest execution interrupts the guest and causes the |
| 456 | * interrupt to be delivered in a timely manner. |
| 457 | */ |
| 458 | static void kvm_mips_migrate_count(struct kvm_vcpu *vcpu) |
| 459 | { |
| 460 | if (hrtimer_cancel(&vcpu->arch.comparecount_timer)) |
| 461 | hrtimer_restart(&vcpu->arch.comparecount_timer); |
| 462 | } |
| 463 | |
| 464 | /* Restore ASID once we are scheduled back after preemption */ |
| 465 | void kvm_arch_vcpu_load(struct kvm_vcpu *vcpu, int cpu) |
| 466 | { |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 467 | unsigned long flags; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 468 | |
| 469 | kvm_debug("%s: vcpu %p, cpu: %d\n", __func__, vcpu, cpu); |
| 470 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 471 | local_irq_save(flags); |
| 472 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 473 | if (vcpu->arch.last_sched_cpu != cpu) { |
| 474 | kvm_debug("[%d->%d]KVM VCPU[%d] switch\n", |
| 475 | vcpu->arch.last_sched_cpu, cpu, vcpu->vcpu_id); |
| 476 | /* |
| 477 | * Migrate the timer interrupt to the current CPU so that it |
| 478 | * always interrupts the guest and synchronously triggers a |
| 479 | * guest timer interrupt. |
| 480 | */ |
| 481 | kvm_mips_migrate_count(vcpu); |
| 482 | } |
| 483 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 484 | /* restore guest state to registers */ |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 485 | kvm_mips_callbacks->vcpu_load(vcpu, cpu); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 486 | |
| 487 | local_irq_restore(flags); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 488 | } |
| 489 | |
| 490 | /* ASID can change if another task is scheduled during preemption */ |
| 491 | void kvm_arch_vcpu_put(struct kvm_vcpu *vcpu) |
| 492 | { |
| 493 | unsigned long flags; |
| 494 | int cpu; |
| 495 | |
| 496 | local_irq_save(flags); |
| 497 | |
| 498 | cpu = smp_processor_id(); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 499 | vcpu->arch.last_sched_cpu = cpu; |
| 500 | |
| 501 | /* save guest state in registers */ |
James Hogan | a60b843 | 2016-11-12 00:00:13 +0000 | [diff] [blame] | 502 | kvm_mips_callbacks->vcpu_put(vcpu, cpu); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 503 | |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 504 | local_irq_restore(flags); |
| 505 | } |
| 506 | |
| 507 | u32 kvm_get_inst(u32 *opc, struct kvm_vcpu *vcpu) |
| 508 | { |
| 509 | struct mips_coproc *cop0 = vcpu->arch.cop0; |
| 510 | unsigned long paddr, flags, vpn2, asid; |
James Hogan | 35fec26 | 2016-06-09 14:19:21 +0100 | [diff] [blame] | 511 | unsigned long va = (unsigned long)opc; |
James Hogan | 28cc5bd | 2016-07-08 11:53:22 +0100 | [diff] [blame] | 512 | void *vaddr; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 513 | u32 inst; |
| 514 | int index; |
| 515 | |
James Hogan | 35fec26 | 2016-06-09 14:19:21 +0100 | [diff] [blame] | 516 | if (KVM_GUEST_KSEGX(va) < KVM_GUEST_KSEG0 || |
| 517 | KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG23) { |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 518 | local_irq_save(flags); |
James Hogan | 35fec26 | 2016-06-09 14:19:21 +0100 | [diff] [blame] | 519 | index = kvm_mips_host_tlb_lookup(vcpu, va); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 520 | if (index >= 0) { |
| 521 | inst = *(opc); |
| 522 | } else { |
James Hogan | 35fec26 | 2016-06-09 14:19:21 +0100 | [diff] [blame] | 523 | vpn2 = va & VPN2_MASK; |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 524 | asid = kvm_read_c0_guest_entryhi(cop0) & |
| 525 | KVM_ENTRYHI_ASID; |
| 526 | index = kvm_mips_guest_tlb_lookup(vcpu, vpn2 | asid); |
| 527 | if (index < 0) { |
| 528 | kvm_err("%s: get_user_failed for %p, vcpu: %p, ASID: %#lx\n", |
| 529 | __func__, opc, vcpu, read_c0_entryhi()); |
| 530 | kvm_mips_dump_host_tlbs(); |
James Hogan | eafc4ed | 2016-06-14 09:40:16 +0100 | [diff] [blame] | 531 | kvm_mips_dump_guest_tlbs(vcpu); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 532 | local_irq_restore(flags); |
| 533 | return KVM_INVALID_INST; |
| 534 | } |
James Hogan | 9b731bc | 2016-08-11 11:58:15 +0100 | [diff] [blame] | 535 | if (kvm_mips_handle_mapped_seg_tlb_fault(vcpu, |
| 536 | &vcpu->arch.guest_tlb[index])) { |
| 537 | kvm_err("%s: handling mapped seg tlb fault failed for %p, index: %u, vcpu: %p, ASID: %#lx\n", |
| 538 | __func__, opc, index, vcpu, |
| 539 | read_c0_entryhi()); |
| 540 | kvm_mips_dump_guest_tlbs(vcpu); |
| 541 | local_irq_restore(flags); |
| 542 | return KVM_INVALID_INST; |
| 543 | } |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 544 | inst = *(opc); |
| 545 | } |
| 546 | local_irq_restore(flags); |
James Hogan | 35fec26 | 2016-06-09 14:19:21 +0100 | [diff] [blame] | 547 | } else if (KVM_GUEST_KSEGX(va) == KVM_GUEST_KSEG0) { |
| 548 | paddr = kvm_mips_translate_guest_kseg0_to_hpa(vcpu, va); |
James Hogan | 28cc5bd | 2016-07-08 11:53:22 +0100 | [diff] [blame] | 549 | vaddr = kmap_atomic(pfn_to_page(PHYS_PFN(paddr))); |
| 550 | vaddr += paddr & ~PAGE_MASK; |
| 551 | inst = *(u32 *)vaddr; |
| 552 | kunmap_atomic(vaddr); |
James Hogan | 403015b | 2016-06-09 14:19:10 +0100 | [diff] [blame] | 553 | } else { |
| 554 | kvm_err("%s: illegal address: %p\n", __func__, opc); |
| 555 | return KVM_INVALID_INST; |
| 556 | } |
| 557 | |
| 558 | return inst; |
| 559 | } |