Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1 | /* |
| 2 | * This program is free software; you can redistribute it and/or modify |
| 3 | * it under the terms of the GNU General Public License, version 2, as |
| 4 | * published by the Free Software Foundation. |
| 5 | * |
| 6 | * This program is distributed in the hope that it will be useful, |
| 7 | * but WITHOUT ANY WARRANTY; without even the implied warranty of |
| 8 | * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the |
| 9 | * GNU General Public License for more details. |
| 10 | * |
| 11 | * You should have received a copy of the GNU General Public License |
| 12 | * along with this program; if not, write to the Free Software |
| 13 | * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. |
| 14 | * |
| 15 | * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com> |
| 16 | */ |
| 17 | |
| 18 | #include <linux/types.h> |
| 19 | #include <linux/string.h> |
| 20 | #include <linux/kvm.h> |
| 21 | #include <linux/kvm_host.h> |
| 22 | #include <linux/highmem.h> |
| 23 | #include <linux/gfp.h> |
| 24 | #include <linux/slab.h> |
| 25 | #include <linux/hugetlb.h> |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 26 | #include <linux/vmalloc.h> |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 27 | #include <linux/srcu.h> |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 28 | #include <linux/anon_inodes.h> |
| 29 | #include <linux/file.h> |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 30 | |
| 31 | #include <asm/tlbflush.h> |
| 32 | #include <asm/kvm_ppc.h> |
| 33 | #include <asm/kvm_book3s.h> |
| 34 | #include <asm/mmu-hash64.h> |
| 35 | #include <asm/hvcall.h> |
| 36 | #include <asm/synch.h> |
| 37 | #include <asm/ppc-opcode.h> |
| 38 | #include <asm/cputable.h> |
| 39 | |
Aneesh Kumar K.V | 990978e | 2013-07-02 11:15:18 +0530 | [diff] [blame] | 40 | #include "book3s_hv_cma.h" |
| 41 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 42 | /* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */ |
| 43 | #define MAX_LPID_970 63 |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 44 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 45 | /* Power architecture requires HPT is at least 256kB */ |
| 46 | #define PPC_MIN_HPT_ORDER 18 |
| 47 | |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 48 | static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
| 49 | long pte_index, unsigned long pteh, |
| 50 | unsigned long ptel, unsigned long *pte_idx_ret); |
Paul Mackerras | a64fd70 | 2012-11-21 23:27:19 +0000 | [diff] [blame] | 51 | static void kvmppc_rmap_reset(struct kvm *kvm); |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 52 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 53 | long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 54 | { |
Aneesh Kumar K.V | 792fc49 | 2014-05-06 21:24:18 +0530 | [diff] [blame] | 55 | unsigned long hpt = 0; |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 56 | struct revmap_entry *rev; |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 57 | struct page *page = NULL; |
| 58 | long order = KVM_DEFAULT_HPT_ORDER; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 59 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 60 | if (htab_orderp) { |
| 61 | order = *htab_orderp; |
| 62 | if (order < PPC_MIN_HPT_ORDER) |
| 63 | order = PPC_MIN_HPT_ORDER; |
| 64 | } |
| 65 | |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 66 | kvm->arch.hpt_cma_alloc = 0; |
Aneesh Kumar K.V | 792fc49 | 2014-05-06 21:24:18 +0530 | [diff] [blame] | 67 | VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER); |
| 68 | page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT)); |
| 69 | if (page) { |
| 70 | hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page)); |
| 71 | kvm->arch.hpt_cma_alloc = 1; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 72 | } |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 73 | |
| 74 | /* Lastly try successively smaller sizes from the page allocator */ |
| 75 | while (!hpt && order > PPC_MIN_HPT_ORDER) { |
| 76 | hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT| |
| 77 | __GFP_NOWARN, order - PAGE_SHIFT); |
| 78 | if (!hpt) |
| 79 | --order; |
| 80 | } |
| 81 | |
| 82 | if (!hpt) |
| 83 | return -ENOMEM; |
| 84 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 85 | kvm->arch.hpt_virt = hpt; |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 86 | kvm->arch.hpt_order = order; |
| 87 | /* HPTEs are 2**4 bytes long */ |
| 88 | kvm->arch.hpt_npte = 1ul << (order - 4); |
| 89 | /* 128 (2**7) bytes in each HPTEG */ |
| 90 | kvm->arch.hpt_mask = (1ul << (order - 7)) - 1; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 91 | |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 92 | /* Allocate reverse map array */ |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 93 | rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte); |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 94 | if (!rev) { |
| 95 | pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n"); |
| 96 | goto out_freehpt; |
| 97 | } |
| 98 | kvm->arch.revmap = rev; |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 99 | kvm->arch.sdr1 = __pa(hpt) | (order - 18); |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 100 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 101 | pr_info("KVM guest htab at %lx (order %ld), LPID %x\n", |
| 102 | hpt, order, kvm->arch.lpid); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 103 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 104 | if (htab_orderp) |
| 105 | *htab_orderp = order; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 106 | return 0; |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 107 | |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 108 | out_freehpt: |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 109 | if (kvm->arch.hpt_cma_alloc) |
| 110 | kvm_release_hpt(page, 1 << (order - PAGE_SHIFT)); |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 111 | else |
| 112 | free_pages(hpt, order - PAGE_SHIFT); |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 113 | return -ENOMEM; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 114 | } |
| 115 | |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 116 | long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp) |
| 117 | { |
| 118 | long err = -EBUSY; |
| 119 | long order; |
| 120 | |
| 121 | mutex_lock(&kvm->lock); |
| 122 | if (kvm->arch.rma_setup_done) { |
| 123 | kvm->arch.rma_setup_done = 0; |
| 124 | /* order rma_setup_done vs. vcpus_running */ |
| 125 | smp_mb(); |
| 126 | if (atomic_read(&kvm->arch.vcpus_running)) { |
| 127 | kvm->arch.rma_setup_done = 1; |
| 128 | goto out; |
| 129 | } |
| 130 | } |
| 131 | if (kvm->arch.hpt_virt) { |
| 132 | order = kvm->arch.hpt_order; |
| 133 | /* Set the entire HPT to 0, i.e. invalid HPTEs */ |
| 134 | memset((void *)kvm->arch.hpt_virt, 0, 1ul << order); |
| 135 | /* |
Paul Mackerras | a64fd70 | 2012-11-21 23:27:19 +0000 | [diff] [blame] | 136 | * Reset all the reverse-mapping chains for all memslots |
| 137 | */ |
| 138 | kvmppc_rmap_reset(kvm); |
Paul Mackerras | 1b400ba | 2012-11-21 23:28:08 +0000 | [diff] [blame] | 139 | /* Ensure that each vcpu will flush its TLB on next entry. */ |
| 140 | cpumask_setall(&kvm->arch.need_tlb_flush); |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 141 | *htab_orderp = order; |
| 142 | err = 0; |
| 143 | } else { |
| 144 | err = kvmppc_alloc_hpt(kvm, htab_orderp); |
| 145 | order = *htab_orderp; |
| 146 | } |
| 147 | out: |
| 148 | mutex_unlock(&kvm->lock); |
| 149 | return err; |
| 150 | } |
| 151 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 152 | void kvmppc_free_hpt(struct kvm *kvm) |
| 153 | { |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 154 | kvmppc_free_lpid(kvm->arch.lpid); |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 155 | vfree(kvm->arch.revmap); |
Aneesh Kumar K.V | fa61a4e3 | 2013-07-02 11:15:16 +0530 | [diff] [blame] | 156 | if (kvm->arch.hpt_cma_alloc) |
| 157 | kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt), |
| 158 | 1 << (kvm->arch.hpt_order - PAGE_SHIFT)); |
Alexander Graf | d2a1b48 | 2012-01-16 19:12:11 +0100 | [diff] [blame] | 159 | else |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 160 | free_pages(kvm->arch.hpt_virt, |
| 161 | kvm->arch.hpt_order - PAGE_SHIFT); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 162 | } |
| 163 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 164 | /* Bits in first HPTE dword for pagesize 4k, 64k or 16M */ |
| 165 | static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 166 | { |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 167 | return (pgsize > 0x1000) ? HPTE_V_LARGE : 0; |
| 168 | } |
| 169 | |
| 170 | /* Bits in second HPTE dword for pagesize 4k, 64k or 16M */ |
| 171 | static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize) |
| 172 | { |
| 173 | return (pgsize == 0x10000) ? 0x1000 : 0; |
| 174 | } |
| 175 | |
| 176 | void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot, |
| 177 | unsigned long porder) |
| 178 | { |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 179 | unsigned long i; |
Paul Mackerras | b2b2f16 | 2011-12-12 12:28:21 +0000 | [diff] [blame] | 180 | unsigned long npages; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 181 | unsigned long hp_v, hp_r; |
| 182 | unsigned long addr, hash; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 183 | unsigned long psize; |
| 184 | unsigned long hp0, hp1; |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 185 | unsigned long idx_ret; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 186 | long ret; |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 187 | struct kvm *kvm = vcpu->kvm; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 188 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 189 | psize = 1ul << porder; |
| 190 | npages = memslot->npages >> (porder - PAGE_SHIFT); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 191 | |
| 192 | /* VRMA can't be > 1TB */ |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 193 | if (npages > 1ul << (40 - porder)) |
| 194 | npages = 1ul << (40 - porder); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 195 | /* Can't use more than 1 HPTE per HPTEG */ |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 196 | if (npages > kvm->arch.hpt_mask + 1) |
| 197 | npages = kvm->arch.hpt_mask + 1; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 198 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 199 | hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) | |
| 200 | HPTE_V_BOLTED | hpte0_pgsize_encoding(psize); |
| 201 | hp1 = hpte1_pgsize_encoding(psize) | |
| 202 | HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX; |
| 203 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 204 | for (i = 0; i < npages; ++i) { |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 205 | addr = i << porder; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 206 | /* can't use hpt_hash since va > 64 bits */ |
Paul Mackerras | 32fad28 | 2012-05-04 02:32:53 +0000 | [diff] [blame] | 207 | hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 208 | /* |
| 209 | * We assume that the hash table is empty and no |
| 210 | * vcpus are using it at this stage. Since we create |
| 211 | * at most one HPTE per HPTEG, we just assume entry 7 |
| 212 | * is available and use it. |
| 213 | */ |
Paul Mackerras | 8936dda | 2011-12-12 12:27:39 +0000 | [diff] [blame] | 214 | hash = (hash << 3) + 7; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 215 | hp_v = hp0 | ((addr >> 16) & ~0x7fUL); |
| 216 | hp_r = hp1 | addr; |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 217 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r, |
| 218 | &idx_ret); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 219 | if (ret != H_SUCCESS) { |
| 220 | pr_err("KVM: map_vrma at %lx failed, ret=%ld\n", |
| 221 | addr, ret); |
| 222 | break; |
| 223 | } |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 224 | } |
| 225 | } |
| 226 | |
| 227 | int kvmppc_mmu_hv_init(void) |
| 228 | { |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 229 | unsigned long host_lpid, rsvd_lpid; |
| 230 | |
| 231 | if (!cpu_has_feature(CPU_FTR_HVMODE)) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 232 | return -EINVAL; |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 233 | |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 234 | /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */ |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 235 | if (cpu_has_feature(CPU_FTR_ARCH_206)) { |
| 236 | host_lpid = mfspr(SPRN_LPID); /* POWER7 */ |
| 237 | rsvd_lpid = LPID_RSVD; |
| 238 | } else { |
| 239 | host_lpid = 0; /* PPC970 */ |
| 240 | rsvd_lpid = MAX_LPID_970; |
| 241 | } |
| 242 | |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 243 | kvmppc_init_lpid(rsvd_lpid + 1); |
| 244 | |
| 245 | kvmppc_claim_lpid(host_lpid); |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 246 | /* rsvd_lpid is reserved for use in partition switching */ |
Scott Wood | 043cc4d | 2011-12-20 15:34:20 +0000 | [diff] [blame] | 247 | kvmppc_claim_lpid(rsvd_lpid); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 248 | |
| 249 | return 0; |
| 250 | } |
| 251 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 252 | static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu) |
| 253 | { |
Michael Neuling | e4e3812 | 2014-03-25 10:47:02 +1100 | [diff] [blame] | 254 | unsigned long msr = vcpu->arch.intr_msr; |
| 255 | |
| 256 | /* If transactional, change to suspend mode on IRQ delivery */ |
| 257 | if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr)) |
| 258 | msr |= MSR_TS_S; |
| 259 | else |
| 260 | msr |= vcpu->arch.shregs.msr & MSR_TS_MASK; |
| 261 | kvmppc_set_msr(vcpu, msr); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 262 | } |
| 263 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 264 | /* |
| 265 | * This is called to get a reference to a guest page if there isn't |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 266 | * one already in the memslot->arch.slot_phys[] array. |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 267 | */ |
| 268 | static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn, |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 269 | struct kvm_memory_slot *memslot, |
| 270 | unsigned long psize) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 271 | { |
| 272 | unsigned long start; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 273 | long np, err; |
| 274 | struct page *page, *hpage, *pages[1]; |
| 275 | unsigned long s, pgsize; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 276 | unsigned long *physp; |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 277 | unsigned int is_io, got, pgorder; |
| 278 | struct vm_area_struct *vma; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 279 | unsigned long pfn, i, npages; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 280 | |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 281 | physp = memslot->arch.slot_phys; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 282 | if (!physp) |
| 283 | return -EINVAL; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 284 | if (physp[gfn - memslot->base_gfn]) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 285 | return 0; |
| 286 | |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 287 | is_io = 0; |
| 288 | got = 0; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 289 | page = NULL; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 290 | pgsize = psize; |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 291 | err = -EINVAL; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 292 | start = gfn_to_hva_memslot(memslot, gfn); |
| 293 | |
| 294 | /* Instantiate and get the page we want access to */ |
| 295 | np = get_user_pages_fast(start, 1, 1, pages); |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 296 | if (np != 1) { |
| 297 | /* Look up the vma for the page */ |
| 298 | down_read(¤t->mm->mmap_sem); |
| 299 | vma = find_vma(current->mm, start); |
| 300 | if (!vma || vma->vm_start > start || |
| 301 | start + psize > vma->vm_end || |
| 302 | !(vma->vm_flags & VM_PFNMAP)) |
| 303 | goto up_err; |
| 304 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); |
| 305 | pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT); |
| 306 | /* check alignment of pfn vs. requested page size */ |
| 307 | if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1))) |
| 308 | goto up_err; |
| 309 | up_read(¤t->mm->mmap_sem); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 310 | |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 311 | } else { |
| 312 | page = pages[0]; |
| 313 | got = KVMPPC_GOT_PAGE; |
| 314 | |
| 315 | /* See if this is a large page */ |
| 316 | s = PAGE_SIZE; |
| 317 | if (PageHuge(page)) { |
| 318 | hpage = compound_head(page); |
| 319 | s <<= compound_order(hpage); |
| 320 | /* Get the whole large page if slot alignment is ok */ |
| 321 | if (s > psize && slot_is_aligned(memslot, s) && |
| 322 | !(memslot->userspace_addr & (s - 1))) { |
| 323 | start &= ~(s - 1); |
| 324 | pgsize = s; |
David Gibson | de6c0b0 | 2012-05-08 20:24:08 +1000 | [diff] [blame] | 325 | get_page(hpage); |
| 326 | put_page(page); |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 327 | page = hpage; |
| 328 | } |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 329 | } |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 330 | if (s < psize) |
| 331 | goto out; |
| 332 | pfn = page_to_pfn(page); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 333 | } |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 334 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 335 | npages = pgsize >> PAGE_SHIFT; |
| 336 | pgorder = __ilog2(npages); |
| 337 | physp += (gfn - memslot->base_gfn) & ~(npages - 1); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 338 | spin_lock(&kvm->arch.slot_phys_lock); |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 339 | for (i = 0; i < npages; ++i) { |
| 340 | if (!physp[i]) { |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 341 | physp[i] = ((pfn + i) << PAGE_SHIFT) + |
| 342 | got + is_io + pgorder; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 343 | got = 0; |
| 344 | } |
| 345 | } |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 346 | spin_unlock(&kvm->arch.slot_phys_lock); |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 347 | err = 0; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 348 | |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 349 | out: |
David Gibson | de6c0b0 | 2012-05-08 20:24:08 +1000 | [diff] [blame] | 350 | if (got) |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 351 | put_page(page); |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 352 | return err; |
Paul Mackerras | 9d0ef5ea | 2011-12-12 12:32:27 +0000 | [diff] [blame] | 353 | |
| 354 | up_err: |
| 355 | up_read(¤t->mm->mmap_sem); |
| 356 | return err; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 357 | } |
| 358 | |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 359 | long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags, |
| 360 | long pte_index, unsigned long pteh, |
| 361 | unsigned long ptel, unsigned long *pte_idx_ret) |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 362 | { |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 363 | unsigned long psize, gpa, gfn; |
| 364 | struct kvm_memory_slot *memslot; |
| 365 | long ret; |
| 366 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 367 | if (kvm->arch.using_mmu_notifiers) |
| 368 | goto do_insert; |
| 369 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 370 | psize = hpte_page_size(pteh, ptel); |
| 371 | if (!psize) |
| 372 | return H_PARAMETER; |
| 373 | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 374 | pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID); |
| 375 | |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 376 | /* Find the memslot (if any) for this address */ |
| 377 | gpa = (ptel & HPTE_R_RPN) & ~(psize - 1); |
| 378 | gfn = gpa >> PAGE_SHIFT; |
| 379 | memslot = gfn_to_memslot(kvm, gfn); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 380 | if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) { |
| 381 | if (!slot_is_aligned(memslot, psize)) |
| 382 | return H_PARAMETER; |
| 383 | if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0) |
| 384 | return H_PARAMETER; |
| 385 | } |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 386 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 387 | do_insert: |
| 388 | /* Protect linux PTE lookup from page table destruction */ |
| 389 | rcu_read_lock_sched(); /* this disables preemption too */ |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 390 | ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel, |
| 391 | current->mm->pgd, false, pte_idx_ret); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 392 | rcu_read_unlock_sched(); |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 393 | if (ret == H_TOO_HARD) { |
| 394 | /* this can't happen */ |
| 395 | pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n"); |
| 396 | ret = H_RESOURCE; /* or something */ |
| 397 | } |
| 398 | return ret; |
| 399 | |
| 400 | } |
| 401 | |
Paul Mackerras | 7ed661b | 2012-11-13 18:31:32 +0000 | [diff] [blame] | 402 | /* |
| 403 | * We come here on a H_ENTER call from the guest when we are not |
| 404 | * using mmu notifiers and we don't have the requested page pinned |
| 405 | * already. |
| 406 | */ |
| 407 | long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags, |
| 408 | long pte_index, unsigned long pteh, |
| 409 | unsigned long ptel) |
| 410 | { |
| 411 | return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index, |
| 412 | pteh, ptel, &vcpu->arch.gpr[4]); |
| 413 | } |
| 414 | |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 415 | static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu, |
| 416 | gva_t eaddr) |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 417 | { |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 418 | u64 mask; |
| 419 | int i; |
| 420 | |
| 421 | for (i = 0; i < vcpu->arch.slb_nr; i++) { |
| 422 | if (!(vcpu->arch.slb[i].orige & SLB_ESID_V)) |
| 423 | continue; |
| 424 | |
| 425 | if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T) |
| 426 | mask = ESID_MASK_1T; |
| 427 | else |
| 428 | mask = ESID_MASK; |
| 429 | |
| 430 | if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0) |
| 431 | return &vcpu->arch.slb[i]; |
| 432 | } |
| 433 | return NULL; |
| 434 | } |
| 435 | |
| 436 | static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r, |
| 437 | unsigned long ea) |
| 438 | { |
| 439 | unsigned long ra_mask; |
| 440 | |
| 441 | ra_mask = hpte_page_size(v, r) - 1; |
| 442 | return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask); |
| 443 | } |
| 444 | |
| 445 | static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr, |
Paul Mackerras | 93b159b | 2013-09-20 14:52:51 +1000 | [diff] [blame] | 446 | struct kvmppc_pte *gpte, bool data, bool iswrite) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 447 | { |
| 448 | struct kvm *kvm = vcpu->kvm; |
| 449 | struct kvmppc_slb *slbe; |
| 450 | unsigned long slb_v; |
| 451 | unsigned long pp, key; |
| 452 | unsigned long v, gr; |
| 453 | unsigned long *hptep; |
| 454 | int index; |
| 455 | int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR); |
| 456 | |
| 457 | /* Get SLB entry */ |
| 458 | if (virtmode) { |
| 459 | slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr); |
| 460 | if (!slbe) |
| 461 | return -EINVAL; |
| 462 | slb_v = slbe->origv; |
| 463 | } else { |
| 464 | /* real mode access */ |
| 465 | slb_v = vcpu->kvm->arch.vrma_slb_v; |
| 466 | } |
| 467 | |
pingfan liu | 91648ec | 2013-11-15 16:35:00 +0800 | [diff] [blame] | 468 | preempt_disable(); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 469 | /* Find the HPTE in the hash table */ |
| 470 | index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v, |
| 471 | HPTE_V_VALID | HPTE_V_ABSENT); |
pingfan liu | 91648ec | 2013-11-15 16:35:00 +0800 | [diff] [blame] | 472 | if (index < 0) { |
| 473 | preempt_enable(); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 474 | return -ENOENT; |
pingfan liu | 91648ec | 2013-11-15 16:35:00 +0800 | [diff] [blame] | 475 | } |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 476 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
| 477 | v = hptep[0] & ~HPTE_V_HVLOCK; |
| 478 | gr = kvm->arch.revmap[index].guest_rpte; |
| 479 | |
| 480 | /* Unlock the HPTE */ |
| 481 | asm volatile("lwsync" : : : "memory"); |
| 482 | hptep[0] = v; |
pingfan liu | 91648ec | 2013-11-15 16:35:00 +0800 | [diff] [blame] | 483 | preempt_enable(); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 484 | |
| 485 | gpte->eaddr = eaddr; |
| 486 | gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff); |
| 487 | |
| 488 | /* Get PP bits and key for permission check */ |
| 489 | pp = gr & (HPTE_R_PP0 | HPTE_R_PP); |
| 490 | key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS; |
| 491 | key &= slb_v; |
| 492 | |
| 493 | /* Calculate permissions */ |
| 494 | gpte->may_read = hpte_read_permission(pp, key); |
| 495 | gpte->may_write = hpte_write_permission(pp, key); |
| 496 | gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G)); |
| 497 | |
| 498 | /* Storage key permission check for POWER7 */ |
| 499 | if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) { |
| 500 | int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr); |
| 501 | if (amrfield & 1) |
| 502 | gpte->may_read = 0; |
| 503 | if (amrfield & 2) |
| 504 | gpte->may_write = 0; |
| 505 | } |
| 506 | |
| 507 | /* Get the guest physical address */ |
| 508 | gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr); |
| 509 | return 0; |
| 510 | } |
| 511 | |
| 512 | /* |
| 513 | * Quick test for whether an instruction is a load or a store. |
| 514 | * If the instruction is a load or a store, then this will indicate |
| 515 | * which it is, at least on server processors. (Embedded processors |
| 516 | * have some external PID instructions that don't follow the rule |
| 517 | * embodied here.) If the instruction isn't a load or store, then |
| 518 | * this doesn't return anything useful. |
| 519 | */ |
| 520 | static int instruction_is_store(unsigned int instr) |
| 521 | { |
| 522 | unsigned int mask; |
| 523 | |
| 524 | mask = 0x10000000; |
| 525 | if ((instr & 0xfc000000) == 0x7c000000) |
| 526 | mask = 0x100; /* major opcode 31 */ |
| 527 | return (instr & mask) != 0; |
| 528 | } |
| 529 | |
| 530 | static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu, |
Alexander Graf | 6020c0f | 2012-03-12 02:26:30 +0100 | [diff] [blame] | 531 | unsigned long gpa, gva_t ea, int is_store) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 532 | { |
| 533 | int ret; |
| 534 | u32 last_inst; |
| 535 | unsigned long srr0 = kvmppc_get_pc(vcpu); |
| 536 | |
| 537 | /* We try to load the last instruction. We don't let |
| 538 | * emulate_instruction do it as it doesn't check what |
| 539 | * kvmppc_ld returns. |
| 540 | * If we fail, we just return to the guest and try executing it again. |
| 541 | */ |
| 542 | if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) { |
| 543 | ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false); |
| 544 | if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED) |
| 545 | return RESUME_GUEST; |
| 546 | vcpu->arch.last_inst = last_inst; |
| 547 | } |
| 548 | |
| 549 | /* |
| 550 | * WARNING: We do not know for sure whether the instruction we just |
| 551 | * read from memory is the same that caused the fault in the first |
| 552 | * place. If the instruction we read is neither an load or a store, |
| 553 | * then it can't access memory, so we don't need to worry about |
| 554 | * enforcing access permissions. So, assuming it is a load or |
| 555 | * store, we just check that its direction (load or store) is |
| 556 | * consistent with the original fault, since that's what we |
| 557 | * checked the access permissions against. If there is a mismatch |
| 558 | * we just return and retry the instruction. |
| 559 | */ |
| 560 | |
Cédric Le Goater | 7360177 | 2014-01-09 11:51:16 +0100 | [diff] [blame] | 561 | if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store) |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 562 | return RESUME_GUEST; |
| 563 | |
| 564 | /* |
| 565 | * Emulated accesses are emulated by looking at the hash for |
| 566 | * translation once, then performing the access later. The |
| 567 | * translation could be invalidated in the meantime in which |
| 568 | * point performing the subsequent memory access on the old |
| 569 | * physical address could possibly be a security hole for the |
| 570 | * guest (but not the host). |
| 571 | * |
| 572 | * This is less of an issue for MMIO stores since they aren't |
| 573 | * globally visible. It could be an issue for MMIO loads to |
| 574 | * a certain extent but we'll ignore it for now. |
| 575 | */ |
| 576 | |
| 577 | vcpu->arch.paddr_accessed = gpa; |
Alexander Graf | 6020c0f | 2012-03-12 02:26:30 +0100 | [diff] [blame] | 578 | vcpu->arch.vaddr_accessed = ea; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 579 | return kvmppc_emulate_mmio(run, vcpu); |
| 580 | } |
| 581 | |
| 582 | int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu, |
| 583 | unsigned long ea, unsigned long dsisr) |
| 584 | { |
| 585 | struct kvm *kvm = vcpu->kvm; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 586 | unsigned long *hptep, hpte[3], r; |
| 587 | unsigned long mmu_seq, psize, pte_size; |
Paul Mackerras | 1066f77 | 2014-05-26 19:48:37 +1000 | [diff] [blame] | 588 | unsigned long gpa_base, gfn_base; |
Paul Mackerras | 70bddfe | 2012-09-20 19:39:21 +0000 | [diff] [blame] | 589 | unsigned long gpa, gfn, hva, pfn; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 590 | struct kvm_memory_slot *memslot; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 591 | unsigned long *rmap; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 592 | struct revmap_entry *rev; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 593 | struct page *page, *pages[1]; |
| 594 | long index, ret, npages; |
| 595 | unsigned long is_io; |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 596 | unsigned int writing, write_ok; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 597 | struct vm_area_struct *vma; |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 598 | unsigned long rcbits; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 599 | |
| 600 | /* |
| 601 | * Real-mode code has already searched the HPT and found the |
| 602 | * entry we're interested in. Lock the entry and check that |
| 603 | * it hasn't changed. If it has, just return and re-execute the |
| 604 | * instruction. |
| 605 | */ |
| 606 | if (ea != vcpu->arch.pgfault_addr) |
| 607 | return RESUME_GUEST; |
| 608 | index = vcpu->arch.pgfault_index; |
| 609 | hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4)); |
| 610 | rev = &kvm->arch.revmap[index]; |
| 611 | preempt_disable(); |
| 612 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
| 613 | cpu_relax(); |
| 614 | hpte[0] = hptep[0] & ~HPTE_V_HVLOCK; |
| 615 | hpte[1] = hptep[1]; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 616 | hpte[2] = r = rev->guest_rpte; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 617 | asm volatile("lwsync" : : : "memory"); |
| 618 | hptep[0] = hpte[0]; |
| 619 | preempt_enable(); |
| 620 | |
| 621 | if (hpte[0] != vcpu->arch.pgfault_hpte[0] || |
| 622 | hpte[1] != vcpu->arch.pgfault_hpte[1]) |
| 623 | return RESUME_GUEST; |
| 624 | |
| 625 | /* Translate the logical address and get the page */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 626 | psize = hpte_page_size(hpte[0], r); |
Paul Mackerras | 1066f77 | 2014-05-26 19:48:37 +1000 | [diff] [blame] | 627 | gpa_base = r & HPTE_R_RPN & ~(psize - 1); |
| 628 | gfn_base = gpa_base >> PAGE_SHIFT; |
| 629 | gpa = gpa_base | (ea & (psize - 1)); |
Paul Mackerras | 70bddfe | 2012-09-20 19:39:21 +0000 | [diff] [blame] | 630 | gfn = gpa >> PAGE_SHIFT; |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 631 | memslot = gfn_to_memslot(kvm, gfn); |
| 632 | |
| 633 | /* No memslot means it's an emulated MMIO region */ |
Paul Mackerras | 70bddfe | 2012-09-20 19:39:21 +0000 | [diff] [blame] | 634 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
Alexander Graf | 6020c0f | 2012-03-12 02:26:30 +0100 | [diff] [blame] | 635 | return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea, |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 636 | dsisr & DSISR_ISSTORE); |
Paul Mackerras | 697d389 | 2011-12-12 12:36:37 +0000 | [diff] [blame] | 637 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 638 | if (!kvm->arch.using_mmu_notifiers) |
| 639 | return -EFAULT; /* should never get here */ |
| 640 | |
Paul Mackerras | 1066f77 | 2014-05-26 19:48:37 +1000 | [diff] [blame] | 641 | /* |
| 642 | * This should never happen, because of the slot_is_aligned() |
| 643 | * check in kvmppc_do_h_enter(). |
| 644 | */ |
| 645 | if (gfn_base < memslot->base_gfn) |
| 646 | return -EFAULT; |
| 647 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 648 | /* used to check for invalidations in progress */ |
| 649 | mmu_seq = kvm->mmu_notifier_seq; |
| 650 | smp_rmb(); |
| 651 | |
| 652 | is_io = 0; |
| 653 | pfn = 0; |
| 654 | page = NULL; |
| 655 | pte_size = PAGE_SIZE; |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 656 | writing = (dsisr & DSISR_ISSTORE) != 0; |
| 657 | /* If writing != 0, then the HPTE must allow writing, if we get here */ |
| 658 | write_ok = writing; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 659 | hva = gfn_to_hva_memslot(memslot, gfn); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 660 | npages = get_user_pages_fast(hva, 1, writing, pages); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 661 | if (npages < 1) { |
| 662 | /* Check if it's an I/O mapping */ |
| 663 | down_read(¤t->mm->mmap_sem); |
| 664 | vma = find_vma(current->mm, hva); |
| 665 | if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end && |
| 666 | (vma->vm_flags & VM_PFNMAP)) { |
| 667 | pfn = vma->vm_pgoff + |
| 668 | ((hva - vma->vm_start) >> PAGE_SHIFT); |
| 669 | pte_size = psize; |
| 670 | is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot)); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 671 | write_ok = vma->vm_flags & VM_WRITE; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 672 | } |
| 673 | up_read(¤t->mm->mmap_sem); |
| 674 | if (!pfn) |
| 675 | return -EFAULT; |
| 676 | } else { |
| 677 | page = pages[0]; |
Paul Mackerras | caaa4c80 | 2013-11-16 17:46:02 +1100 | [diff] [blame] | 678 | pfn = page_to_pfn(page); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 679 | if (PageHuge(page)) { |
| 680 | page = compound_head(page); |
| 681 | pte_size <<= compound_order(page); |
| 682 | } |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 683 | /* if the guest wants write access, see if that is OK */ |
| 684 | if (!writing && hpte_is_writable(r)) { |
Aneesh Kumar K.V | db7cb5b | 2013-06-20 14:30:19 +0530 | [diff] [blame] | 685 | unsigned int hugepage_shift; |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 686 | pte_t *ptep, pte; |
| 687 | |
| 688 | /* |
| 689 | * We need to protect against page table destruction |
| 690 | * while looking up and updating the pte. |
| 691 | */ |
| 692 | rcu_read_lock_sched(); |
| 693 | ptep = find_linux_pte_or_hugepte(current->mm->pgd, |
Aneesh Kumar K.V | db7cb5b | 2013-06-20 14:30:19 +0530 | [diff] [blame] | 694 | hva, &hugepage_shift); |
| 695 | if (ptep) { |
| 696 | pte = kvmppc_read_update_linux_pte(ptep, 1, |
| 697 | hugepage_shift); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 698 | if (pte_write(pte)) |
| 699 | write_ok = 1; |
| 700 | } |
| 701 | rcu_read_unlock_sched(); |
| 702 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 703 | } |
| 704 | |
| 705 | ret = -EFAULT; |
| 706 | if (psize > pte_size) |
| 707 | goto out_put; |
| 708 | |
| 709 | /* Check WIMG vs. the actual page we're accessing */ |
| 710 | if (!hpte_cache_flags_ok(r, is_io)) { |
| 711 | if (is_io) |
| 712 | return -EFAULT; |
| 713 | /* |
| 714 | * Allow guest to map emulated device memory as |
| 715 | * uncacheable, but actually make it cacheable. |
| 716 | */ |
| 717 | r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M; |
| 718 | } |
| 719 | |
Paul Mackerras | caaa4c80 | 2013-11-16 17:46:02 +1100 | [diff] [blame] | 720 | /* |
| 721 | * Set the HPTE to point to pfn. |
| 722 | * Since the pfn is at PAGE_SIZE granularity, make sure we |
| 723 | * don't mask out lower-order bits if psize < PAGE_SIZE. |
| 724 | */ |
| 725 | if (psize < PAGE_SIZE) |
| 726 | psize = PAGE_SIZE; |
| 727 | r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1)); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 728 | if (hpte_is_writable(r) && !write_ok) |
| 729 | r = hpte_make_readonly(r); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 730 | ret = RESUME_GUEST; |
| 731 | preempt_disable(); |
| 732 | while (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) |
| 733 | cpu_relax(); |
| 734 | if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] || |
| 735 | rev->guest_rpte != hpte[2]) |
| 736 | /* HPTE has been changed under us; let the guest retry */ |
| 737 | goto out_unlock; |
| 738 | hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID; |
| 739 | |
Paul Mackerras | 1066f77 | 2014-05-26 19:48:37 +1000 | [diff] [blame] | 740 | /* Always put the HPTE in the rmap chain for the page base address */ |
| 741 | rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn]; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 742 | lock_rmap(rmap); |
| 743 | |
| 744 | /* Check if we might have been invalidated; let the guest retry if so */ |
| 745 | ret = RESUME_GUEST; |
Christoffer Dall | 8ca40a7 | 2012-10-14 23:10:18 -0400 | [diff] [blame] | 746 | if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) { |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 747 | unlock_rmap(rmap); |
| 748 | goto out_unlock; |
| 749 | } |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 750 | |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 751 | /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */ |
| 752 | rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT; |
| 753 | r &= rcbits | ~(HPTE_R_R | HPTE_R_C); |
| 754 | |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 755 | if (hptep[0] & HPTE_V_VALID) { |
| 756 | /* HPTE was previously valid, so we need to invalidate it */ |
| 757 | unlock_rmap(rmap); |
| 758 | hptep[0] |= HPTE_V_ABSENT; |
| 759 | kvmppc_invalidate_hpte(kvm, hptep, index); |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 760 | /* don't lose previous R and C bits */ |
| 761 | r |= hptep[1] & (HPTE_R_R | HPTE_R_C); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 762 | } else { |
| 763 | kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0); |
| 764 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 765 | |
| 766 | hptep[1] = r; |
| 767 | eieio(); |
| 768 | hptep[0] = hpte[0]; |
| 769 | asm volatile("ptesync" : : : "memory"); |
| 770 | preempt_enable(); |
Paul Mackerras | 4cf302b | 2011-12-12 12:38:51 +0000 | [diff] [blame] | 771 | if (page && hpte_is_writable(r)) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 772 | SetPageDirty(page); |
| 773 | |
| 774 | out_put: |
David Gibson | de6c0b0 | 2012-05-08 20:24:08 +1000 | [diff] [blame] | 775 | if (page) { |
| 776 | /* |
| 777 | * We drop pages[0] here, not page because page might |
| 778 | * have been set to the head page of a compound, but |
| 779 | * we have to drop the reference on the correct tail |
| 780 | * page to match the get inside gup() |
| 781 | */ |
| 782 | put_page(pages[0]); |
| 783 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 784 | return ret; |
| 785 | |
| 786 | out_unlock: |
| 787 | hptep[0] &= ~HPTE_V_HVLOCK; |
| 788 | preempt_enable(); |
| 789 | goto out_put; |
| 790 | } |
| 791 | |
Paul Mackerras | a64fd70 | 2012-11-21 23:27:19 +0000 | [diff] [blame] | 792 | static void kvmppc_rmap_reset(struct kvm *kvm) |
| 793 | { |
| 794 | struct kvm_memslots *slots; |
| 795 | struct kvm_memory_slot *memslot; |
| 796 | int srcu_idx; |
| 797 | |
| 798 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 799 | slots = kvm->memslots; |
| 800 | kvm_for_each_memslot(memslot, slots) { |
| 801 | /* |
| 802 | * This assumes it is acceptable to lose reference and |
| 803 | * change bits across a reset. |
| 804 | */ |
| 805 | memset(memslot->arch.rmap, 0, |
| 806 | memslot->npages * sizeof(*memslot->arch.rmap)); |
| 807 | } |
| 808 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 809 | } |
| 810 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 811 | static int kvm_handle_hva_range(struct kvm *kvm, |
| 812 | unsigned long start, |
| 813 | unsigned long end, |
| 814 | int (*handler)(struct kvm *kvm, |
| 815 | unsigned long *rmapp, |
| 816 | unsigned long gfn)) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 817 | { |
| 818 | int ret; |
| 819 | int retval = 0; |
| 820 | struct kvm_memslots *slots; |
| 821 | struct kvm_memory_slot *memslot; |
| 822 | |
| 823 | slots = kvm_memslots(kvm); |
| 824 | kvm_for_each_memslot(memslot, slots) { |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 825 | unsigned long hva_start, hva_end; |
| 826 | gfn_t gfn, gfn_end; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 827 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 828 | hva_start = max(start, memslot->userspace_addr); |
| 829 | hva_end = min(end, memslot->userspace_addr + |
| 830 | (memslot->npages << PAGE_SHIFT)); |
| 831 | if (hva_start >= hva_end) |
| 832 | continue; |
| 833 | /* |
| 834 | * {gfn(page) | page intersects with [hva_start, hva_end)} = |
| 835 | * {gfn, gfn+1, ..., gfn_end-1}. |
| 836 | */ |
| 837 | gfn = hva_to_gfn_memslot(hva_start, memslot); |
| 838 | gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot); |
| 839 | |
| 840 | for (; gfn < gfn_end; ++gfn) { |
Takuya Yoshikawa | d19a748 | 2012-07-02 17:54:30 +0900 | [diff] [blame] | 841 | gfn_t gfn_offset = gfn - memslot->base_gfn; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 842 | |
Takuya Yoshikawa | d89cc61 | 2012-08-01 18:03:28 +0900 | [diff] [blame] | 843 | ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 844 | retval |= ret; |
| 845 | } |
| 846 | } |
| 847 | |
| 848 | return retval; |
| 849 | } |
| 850 | |
Takuya Yoshikawa | 84504ef | 2012-07-02 17:55:48 +0900 | [diff] [blame] | 851 | static int kvm_handle_hva(struct kvm *kvm, unsigned long hva, |
| 852 | int (*handler)(struct kvm *kvm, unsigned long *rmapp, |
| 853 | unsigned long gfn)) |
| 854 | { |
| 855 | return kvm_handle_hva_range(kvm, hva, hva + 1, handler); |
| 856 | } |
| 857 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 858 | static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 859 | unsigned long gfn) |
| 860 | { |
| 861 | struct revmap_entry *rev = kvm->arch.revmap; |
| 862 | unsigned long h, i, j; |
| 863 | unsigned long *hptep; |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 864 | unsigned long ptel, psize, rcbits; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 865 | |
| 866 | for (;;) { |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 867 | lock_rmap(rmapp); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 868 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 869 | unlock_rmap(rmapp); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 870 | break; |
| 871 | } |
| 872 | |
| 873 | /* |
| 874 | * To avoid an ABBA deadlock with the HPTE lock bit, |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 875 | * we can't spin on the HPTE lock while holding the |
| 876 | * rmap chain lock. |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 877 | */ |
| 878 | i = *rmapp & KVMPPC_RMAP_INDEX; |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 879 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
| 880 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 881 | /* unlock rmap before spinning on the HPTE lock */ |
| 882 | unlock_rmap(rmapp); |
| 883 | while (hptep[0] & HPTE_V_HVLOCK) |
| 884 | cpu_relax(); |
| 885 | continue; |
| 886 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 887 | j = rev[i].forw; |
| 888 | if (j == i) { |
| 889 | /* chain is now empty */ |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 890 | *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 891 | } else { |
| 892 | /* remove i from chain */ |
| 893 | h = rev[i].back; |
| 894 | rev[h].forw = j; |
| 895 | rev[j].back = h; |
| 896 | rev[i].forw = rev[i].back = i; |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 897 | *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 898 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 899 | |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 900 | /* Now check and modify the HPTE */ |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 901 | ptel = rev[i].guest_rpte; |
| 902 | psize = hpte_page_size(hptep[0], ptel); |
| 903 | if ((hptep[0] & HPTE_V_VALID) && |
| 904 | hpte_rpn(ptel, psize) == gfn) { |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 905 | if (kvm->arch.using_mmu_notifiers) |
| 906 | hptep[0] |= HPTE_V_ABSENT; |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 907 | kvmppc_invalidate_hpte(kvm, hptep, i); |
| 908 | /* Harvest R and C */ |
| 909 | rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C); |
| 910 | *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 911 | if (rcbits & ~rev[i].guest_rpte) { |
| 912 | rev[i].guest_rpte = ptel | rcbits; |
| 913 | note_hpte_modification(kvm, &rev[i]); |
| 914 | } |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 915 | } |
Paul Mackerras | bad3b50 | 2011-12-15 02:02:02 +0000 | [diff] [blame] | 916 | unlock_rmap(rmapp); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 917 | hptep[0] &= ~HPTE_V_HVLOCK; |
| 918 | } |
| 919 | return 0; |
| 920 | } |
| 921 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 922 | int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 923 | { |
| 924 | if (kvm->arch.using_mmu_notifiers) |
| 925 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); |
| 926 | return 0; |
| 927 | } |
| 928 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 929 | int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end) |
Takuya Yoshikawa | b3ae209 | 2012-07-02 17:56:33 +0900 | [diff] [blame] | 930 | { |
| 931 | if (kvm->arch.using_mmu_notifiers) |
| 932 | kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp); |
| 933 | return 0; |
| 934 | } |
| 935 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 936 | void kvmppc_core_flush_memslot_hv(struct kvm *kvm, |
| 937 | struct kvm_memory_slot *memslot) |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 938 | { |
| 939 | unsigned long *rmapp; |
| 940 | unsigned long gfn; |
| 941 | unsigned long n; |
| 942 | |
| 943 | rmapp = memslot->arch.rmap; |
| 944 | gfn = memslot->base_gfn; |
| 945 | for (n = memslot->npages; n; --n) { |
| 946 | /* |
| 947 | * Testing the present bit without locking is OK because |
| 948 | * the memslot has been marked invalid already, and hence |
| 949 | * no new HPTEs referencing this page can be created, |
| 950 | * thus the present bit can't go from 0 to 1. |
| 951 | */ |
| 952 | if (*rmapp & KVMPPC_RMAP_PRESENT) |
| 953 | kvm_unmap_rmapp(kvm, rmapp, gfn); |
| 954 | ++rmapp; |
| 955 | ++gfn; |
| 956 | } |
| 957 | } |
| 958 | |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 959 | static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 960 | unsigned long gfn) |
| 961 | { |
Paul Mackerras | 5551489 | 2011-12-15 02:02:47 +0000 | [diff] [blame] | 962 | struct revmap_entry *rev = kvm->arch.revmap; |
| 963 | unsigned long head, i, j; |
| 964 | unsigned long *hptep; |
| 965 | int ret = 0; |
| 966 | |
| 967 | retry: |
| 968 | lock_rmap(rmapp); |
| 969 | if (*rmapp & KVMPPC_RMAP_REFERENCED) { |
| 970 | *rmapp &= ~KVMPPC_RMAP_REFERENCED; |
| 971 | ret = 1; |
| 972 | } |
| 973 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
| 974 | unlock_rmap(rmapp); |
| 975 | return ret; |
| 976 | } |
| 977 | |
| 978 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 979 | do { |
| 980 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
| 981 | j = rev[i].forw; |
| 982 | |
| 983 | /* If this HPTE isn't referenced, ignore it */ |
| 984 | if (!(hptep[1] & HPTE_R_R)) |
| 985 | continue; |
| 986 | |
| 987 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 988 | /* unlock rmap before spinning on the HPTE lock */ |
| 989 | unlock_rmap(rmapp); |
| 990 | while (hptep[0] & HPTE_V_HVLOCK) |
| 991 | cpu_relax(); |
| 992 | goto retry; |
| 993 | } |
| 994 | |
| 995 | /* Now check and modify the HPTE */ |
| 996 | if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) { |
| 997 | kvmppc_clear_ref_hpte(kvm, hptep, i); |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 998 | if (!(rev[i].guest_rpte & HPTE_R_R)) { |
| 999 | rev[i].guest_rpte |= HPTE_R_R; |
| 1000 | note_hpte_modification(kvm, &rev[i]); |
| 1001 | } |
Paul Mackerras | 5551489 | 2011-12-15 02:02:47 +0000 | [diff] [blame] | 1002 | ret = 1; |
| 1003 | } |
| 1004 | hptep[0] &= ~HPTE_V_HVLOCK; |
| 1005 | } while ((i = j) != head); |
| 1006 | |
| 1007 | unlock_rmap(rmapp); |
| 1008 | return ret; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1009 | } |
| 1010 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1011 | int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1012 | { |
| 1013 | if (!kvm->arch.using_mmu_notifiers) |
| 1014 | return 0; |
| 1015 | return kvm_handle_hva(kvm, hva, kvm_age_rmapp); |
| 1016 | } |
| 1017 | |
| 1018 | static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp, |
| 1019 | unsigned long gfn) |
| 1020 | { |
Paul Mackerras | 5551489 | 2011-12-15 02:02:47 +0000 | [diff] [blame] | 1021 | struct revmap_entry *rev = kvm->arch.revmap; |
| 1022 | unsigned long head, i, j; |
| 1023 | unsigned long *hp; |
| 1024 | int ret = 1; |
| 1025 | |
| 1026 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
| 1027 | return 1; |
| 1028 | |
| 1029 | lock_rmap(rmapp); |
| 1030 | if (*rmapp & KVMPPC_RMAP_REFERENCED) |
| 1031 | goto out; |
| 1032 | |
| 1033 | if (*rmapp & KVMPPC_RMAP_PRESENT) { |
| 1034 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 1035 | do { |
| 1036 | hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4)); |
| 1037 | j = rev[i].forw; |
| 1038 | if (hp[1] & HPTE_R_R) |
| 1039 | goto out; |
| 1040 | } while ((i = j) != head); |
| 1041 | } |
| 1042 | ret = 0; |
| 1043 | |
| 1044 | out: |
| 1045 | unlock_rmap(rmapp); |
| 1046 | return ret; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1047 | } |
| 1048 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1049 | int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1050 | { |
| 1051 | if (!kvm->arch.using_mmu_notifiers) |
| 1052 | return 0; |
| 1053 | return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp); |
| 1054 | } |
| 1055 | |
Aneesh Kumar K.V | 3a167bea | 2013-10-07 22:17:53 +0530 | [diff] [blame] | 1056 | void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte) |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1057 | { |
| 1058 | if (!kvm->arch.using_mmu_notifiers) |
| 1059 | return; |
| 1060 | kvm_handle_hva(kvm, hva, kvm_unmap_rmapp); |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1061 | } |
| 1062 | |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1063 | static int vcpus_running(struct kvm *kvm) |
| 1064 | { |
| 1065 | return atomic_read(&kvm->arch.vcpus_running) != 0; |
| 1066 | } |
| 1067 | |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1068 | /* |
| 1069 | * Returns the number of system pages that are dirty. |
| 1070 | * This can be more than 1 if we find a huge-page HPTE. |
| 1071 | */ |
| 1072 | static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp) |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1073 | { |
| 1074 | struct revmap_entry *rev = kvm->arch.revmap; |
| 1075 | unsigned long head, i, j; |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1076 | unsigned long n; |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1077 | unsigned long v, r; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1078 | unsigned long *hptep; |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1079 | int npages_dirty = 0; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1080 | |
| 1081 | retry: |
| 1082 | lock_rmap(rmapp); |
| 1083 | if (*rmapp & KVMPPC_RMAP_CHANGED) { |
| 1084 | *rmapp &= ~KVMPPC_RMAP_CHANGED; |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1085 | npages_dirty = 1; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1086 | } |
| 1087 | if (!(*rmapp & KVMPPC_RMAP_PRESENT)) { |
| 1088 | unlock_rmap(rmapp); |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1089 | return npages_dirty; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1090 | } |
| 1091 | |
| 1092 | i = head = *rmapp & KVMPPC_RMAP_INDEX; |
| 1093 | do { |
| 1094 | hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4)); |
| 1095 | j = rev[i].forw; |
| 1096 | |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1097 | /* |
| 1098 | * Checking the C (changed) bit here is racy since there |
| 1099 | * is no guarantee about when the hardware writes it back. |
| 1100 | * If the HPTE is not writable then it is stable since the |
| 1101 | * page can't be written to, and we would have done a tlbie |
| 1102 | * (which forces the hardware to complete any writeback) |
| 1103 | * when making the HPTE read-only. |
| 1104 | * If vcpus are running then this call is racy anyway |
| 1105 | * since the page could get dirtied subsequently, so we |
| 1106 | * expect there to be a further call which would pick up |
| 1107 | * any delayed C bit writeback. |
| 1108 | * Otherwise we need to do the tlbie even if C==0 in |
| 1109 | * order to pick up any delayed writeback of C. |
| 1110 | */ |
| 1111 | if (!(hptep[1] & HPTE_R_C) && |
| 1112 | (!hpte_is_writable(hptep[1]) || vcpus_running(kvm))) |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1113 | continue; |
| 1114 | |
| 1115 | if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) { |
| 1116 | /* unlock rmap before spinning on the HPTE lock */ |
| 1117 | unlock_rmap(rmapp); |
| 1118 | while (hptep[0] & HPTE_V_HVLOCK) |
| 1119 | cpu_relax(); |
| 1120 | goto retry; |
| 1121 | } |
| 1122 | |
| 1123 | /* Now check and modify the HPTE */ |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1124 | if (!(hptep[0] & HPTE_V_VALID)) |
| 1125 | continue; |
| 1126 | |
| 1127 | /* need to make it temporarily absent so C is stable */ |
| 1128 | hptep[0] |= HPTE_V_ABSENT; |
| 1129 | kvmppc_invalidate_hpte(kvm, hptep, i); |
| 1130 | v = hptep[0]; |
| 1131 | r = hptep[1]; |
| 1132 | if (r & HPTE_R_C) { |
| 1133 | hptep[1] = r & ~HPTE_R_C; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1134 | if (!(rev[i].guest_rpte & HPTE_R_C)) { |
| 1135 | rev[i].guest_rpte |= HPTE_R_C; |
| 1136 | note_hpte_modification(kvm, &rev[i]); |
| 1137 | } |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1138 | n = hpte_page_size(v, r); |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1139 | n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT; |
| 1140 | if (n > npages_dirty) |
| 1141 | npages_dirty = n; |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1142 | eieio(); |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1143 | } |
Paul Mackerras | 6c576e7 | 2014-05-26 19:48:39 +1000 | [diff] [blame] | 1144 | v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK); |
| 1145 | v |= HPTE_V_VALID; |
| 1146 | hptep[0] = v; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1147 | } while ((i = j) != head); |
| 1148 | |
| 1149 | unlock_rmap(rmapp); |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1150 | return npages_dirty; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1151 | } |
| 1152 | |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1153 | static void harvest_vpa_dirty(struct kvmppc_vpa *vpa, |
| 1154 | struct kvm_memory_slot *memslot, |
| 1155 | unsigned long *map) |
| 1156 | { |
| 1157 | unsigned long gfn; |
| 1158 | |
| 1159 | if (!vpa->dirty || !vpa->pinned_addr) |
| 1160 | return; |
| 1161 | gfn = vpa->gpa >> PAGE_SHIFT; |
| 1162 | if (gfn < memslot->base_gfn || |
| 1163 | gfn >= memslot->base_gfn + memslot->npages) |
| 1164 | return; |
| 1165 | |
| 1166 | vpa->dirty = false; |
| 1167 | if (map) |
| 1168 | __set_bit_le(gfn - memslot->base_gfn, map); |
| 1169 | } |
| 1170 | |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 1171 | long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot, |
| 1172 | unsigned long *map) |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1173 | { |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1174 | unsigned long i, j; |
Paul Mackerras | dfe49db | 2012-09-11 13:28:18 +0000 | [diff] [blame] | 1175 | unsigned long *rmapp; |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1176 | struct kvm_vcpu *vcpu; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1177 | |
| 1178 | preempt_disable(); |
Takuya Yoshikawa | d89cc61 | 2012-08-01 18:03:28 +0900 | [diff] [blame] | 1179 | rmapp = memslot->arch.rmap; |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1180 | for (i = 0; i < memslot->npages; ++i) { |
Alexey Kardashevskiy | 687414b | 2014-05-26 19:48:38 +1000 | [diff] [blame] | 1181 | int npages = kvm_test_clear_dirty_npages(kvm, rmapp); |
| 1182 | /* |
| 1183 | * Note that if npages > 0 then i must be a multiple of npages, |
| 1184 | * since we always put huge-page HPTEs in the rmap chain |
| 1185 | * corresponding to their page base address. |
| 1186 | */ |
| 1187 | if (npages && map) |
| 1188 | for (j = i; npages; ++j, --npages) |
| 1189 | __set_bit_le(j, map); |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1190 | ++rmapp; |
| 1191 | } |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1192 | |
| 1193 | /* Harvest dirty bits from VPA and DTL updates */ |
| 1194 | /* Note: we never modify the SLB shadow buffer areas */ |
| 1195 | kvm_for_each_vcpu(i, vcpu, kvm) { |
| 1196 | spin_lock(&vcpu->arch.vpa_update_lock); |
| 1197 | harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map); |
| 1198 | harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map); |
| 1199 | spin_unlock(&vcpu->arch.vpa_update_lock); |
| 1200 | } |
Paul Mackerras | 82ed361 | 2011-12-15 02:03:22 +0000 | [diff] [blame] | 1201 | preempt_enable(); |
| 1202 | return 0; |
| 1203 | } |
| 1204 | |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1205 | void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa, |
| 1206 | unsigned long *nb_ret) |
| 1207 | { |
| 1208 | struct kvm_memory_slot *memslot; |
| 1209 | unsigned long gfn = gpa >> PAGE_SHIFT; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1210 | struct page *page, *pages[1]; |
| 1211 | int npages; |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1212 | unsigned long hva, offset; |
Paul Mackerras | da9d1d7 | 2011-12-12 12:31:41 +0000 | [diff] [blame] | 1213 | unsigned long pa; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1214 | unsigned long *physp; |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1215 | int srcu_idx; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1216 | |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1217 | srcu_idx = srcu_read_lock(&kvm->srcu); |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1218 | memslot = gfn_to_memslot(kvm, gfn); |
| 1219 | if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID)) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1220 | goto err; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1221 | if (!kvm->arch.using_mmu_notifiers) { |
Paul Mackerras | a66b48c | 2012-09-11 13:27:46 +0000 | [diff] [blame] | 1222 | physp = memslot->arch.slot_phys; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1223 | if (!physp) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1224 | goto err; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1225 | physp += gfn - memslot->base_gfn; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 1226 | pa = *physp; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1227 | if (!pa) { |
| 1228 | if (kvmppc_get_guest_page(kvm, gfn, memslot, |
| 1229 | PAGE_SIZE) < 0) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1230 | goto err; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1231 | pa = *physp; |
| 1232 | } |
| 1233 | page = pfn_to_page(pa >> PAGE_SHIFT); |
David Gibson | de6c0b0 | 2012-05-08 20:24:08 +1000 | [diff] [blame] | 1234 | get_page(page); |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1235 | } else { |
| 1236 | hva = gfn_to_hva_memslot(memslot, gfn); |
| 1237 | npages = get_user_pages_fast(hva, 1, 1, pages); |
| 1238 | if (npages < 1) |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1239 | goto err; |
Paul Mackerras | 342d3db | 2011-12-12 12:38:05 +0000 | [diff] [blame] | 1240 | page = pages[0]; |
Paul Mackerras | c77162d | 2011-12-12 12:31:00 +0000 | [diff] [blame] | 1241 | } |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1242 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1243 | |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1244 | offset = gpa & (PAGE_SIZE - 1); |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1245 | if (nb_ret) |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1246 | *nb_ret = PAGE_SIZE - offset; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1247 | return page_address(page) + offset; |
Paul Mackerras | 2c9097e | 2012-09-11 13:27:01 +0000 | [diff] [blame] | 1248 | |
| 1249 | err: |
| 1250 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
| 1251 | return NULL; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1252 | } |
| 1253 | |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1254 | void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa, |
| 1255 | bool dirty) |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1256 | { |
| 1257 | struct page *page = virt_to_page(va); |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1258 | struct kvm_memory_slot *memslot; |
| 1259 | unsigned long gfn; |
| 1260 | unsigned long *rmap; |
| 1261 | int srcu_idx; |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1262 | |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1263 | put_page(page); |
Paul Mackerras | c35635e | 2013-04-18 19:51:04 +0000 | [diff] [blame] | 1264 | |
| 1265 | if (!dirty || !kvm->arch.using_mmu_notifiers) |
| 1266 | return; |
| 1267 | |
| 1268 | /* We need to mark this page dirty in the rmap chain */ |
| 1269 | gfn = gpa >> PAGE_SHIFT; |
| 1270 | srcu_idx = srcu_read_lock(&kvm->srcu); |
| 1271 | memslot = gfn_to_memslot(kvm, gfn); |
| 1272 | if (memslot) { |
| 1273 | rmap = &memslot->arch.rmap[gfn - memslot->base_gfn]; |
| 1274 | lock_rmap(rmap); |
| 1275 | *rmap |= KVMPPC_RMAP_CHANGED; |
| 1276 | unlock_rmap(rmap); |
| 1277 | } |
| 1278 | srcu_read_unlock(&kvm->srcu, srcu_idx); |
Paul Mackerras | 93e6024 | 2011-12-12 12:28:55 +0000 | [diff] [blame] | 1279 | } |
| 1280 | |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1281 | /* |
| 1282 | * Functions for reading and writing the hash table via reads and |
| 1283 | * writes on a file descriptor. |
| 1284 | * |
| 1285 | * Reads return the guest view of the hash table, which has to be |
| 1286 | * pieced together from the real hash table and the guest_rpte |
| 1287 | * values in the revmap array. |
| 1288 | * |
| 1289 | * On writes, each HPTE written is considered in turn, and if it |
| 1290 | * is valid, it is written to the HPT as if an H_ENTER with the |
| 1291 | * exact flag set was done. When the invalid count is non-zero |
| 1292 | * in the header written to the stream, the kernel will make |
| 1293 | * sure that that many HPTEs are invalid, and invalidate them |
| 1294 | * if not. |
| 1295 | */ |
| 1296 | |
| 1297 | struct kvm_htab_ctx { |
| 1298 | unsigned long index; |
| 1299 | unsigned long flags; |
| 1300 | struct kvm *kvm; |
| 1301 | int first_pass; |
| 1302 | }; |
| 1303 | |
| 1304 | #define HPTE_SIZE (2 * sizeof(unsigned long)) |
| 1305 | |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1306 | /* |
| 1307 | * Returns 1 if this HPT entry has been modified or has pending |
| 1308 | * R/C bit changes. |
| 1309 | */ |
| 1310 | static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp) |
| 1311 | { |
| 1312 | unsigned long rcbits_unset; |
| 1313 | |
| 1314 | if (revp->guest_rpte & HPTE_GR_MODIFIED) |
| 1315 | return 1; |
| 1316 | |
| 1317 | /* Also need to consider changes in reference and changed bits */ |
| 1318 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
| 1319 | if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset)) |
| 1320 | return 1; |
| 1321 | |
| 1322 | return 0; |
| 1323 | } |
| 1324 | |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1325 | static long record_hpte(unsigned long flags, unsigned long *hptp, |
| 1326 | unsigned long *hpte, struct revmap_entry *revp, |
| 1327 | int want_valid, int first_pass) |
| 1328 | { |
| 1329 | unsigned long v, r; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1330 | unsigned long rcbits_unset; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1331 | int ok = 1; |
| 1332 | int valid, dirty; |
| 1333 | |
| 1334 | /* Unmodified entries are uninteresting except on the first pass */ |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1335 | dirty = hpte_dirty(revp, hptp); |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1336 | if (!first_pass && !dirty) |
| 1337 | return 0; |
| 1338 | |
| 1339 | valid = 0; |
| 1340 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) { |
| 1341 | valid = 1; |
| 1342 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && |
| 1343 | !(hptp[0] & HPTE_V_BOLTED)) |
| 1344 | valid = 0; |
| 1345 | } |
| 1346 | if (valid != want_valid) |
| 1347 | return 0; |
| 1348 | |
| 1349 | v = r = 0; |
| 1350 | if (valid || dirty) { |
| 1351 | /* lock the HPTE so it's stable and read it */ |
| 1352 | preempt_disable(); |
| 1353 | while (!try_lock_hpte(hptp, HPTE_V_HVLOCK)) |
| 1354 | cpu_relax(); |
| 1355 | v = hptp[0]; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1356 | |
| 1357 | /* re-evaluate valid and dirty from synchronized HPTE value */ |
| 1358 | valid = !!(v & HPTE_V_VALID); |
| 1359 | dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED); |
| 1360 | |
| 1361 | /* Harvest R and C into guest view if necessary */ |
| 1362 | rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C); |
| 1363 | if (valid && (rcbits_unset & hptp[1])) { |
| 1364 | revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) | |
| 1365 | HPTE_GR_MODIFIED; |
| 1366 | dirty = 1; |
| 1367 | } |
| 1368 | |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1369 | if (v & HPTE_V_ABSENT) { |
| 1370 | v &= ~HPTE_V_ABSENT; |
| 1371 | v |= HPTE_V_VALID; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1372 | valid = 1; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1373 | } |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1374 | if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED)) |
| 1375 | valid = 0; |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1376 | |
| 1377 | r = revp->guest_rpte; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1378 | /* only clear modified if this is the right sort of entry */ |
| 1379 | if (valid == want_valid && dirty) { |
| 1380 | r &= ~HPTE_GR_MODIFIED; |
| 1381 | revp->guest_rpte = r; |
| 1382 | } |
| 1383 | asm volatile(PPC_RELEASE_BARRIER "" : : : "memory"); |
| 1384 | hptp[0] &= ~HPTE_V_HVLOCK; |
| 1385 | preempt_enable(); |
| 1386 | if (!(valid == want_valid && (first_pass || dirty))) |
| 1387 | ok = 0; |
| 1388 | } |
| 1389 | hpte[0] = v; |
| 1390 | hpte[1] = r; |
| 1391 | return ok; |
| 1392 | } |
| 1393 | |
| 1394 | static ssize_t kvm_htab_read(struct file *file, char __user *buf, |
| 1395 | size_t count, loff_t *ppos) |
| 1396 | { |
| 1397 | struct kvm_htab_ctx *ctx = file->private_data; |
| 1398 | struct kvm *kvm = ctx->kvm; |
| 1399 | struct kvm_get_htab_header hdr; |
| 1400 | unsigned long *hptp; |
| 1401 | struct revmap_entry *revp; |
| 1402 | unsigned long i, nb, nw; |
| 1403 | unsigned long __user *lbuf; |
| 1404 | struct kvm_get_htab_header __user *hptr; |
| 1405 | unsigned long flags; |
| 1406 | int first_pass; |
| 1407 | unsigned long hpte[2]; |
| 1408 | |
| 1409 | if (!access_ok(VERIFY_WRITE, buf, count)) |
| 1410 | return -EFAULT; |
| 1411 | |
| 1412 | first_pass = ctx->first_pass; |
| 1413 | flags = ctx->flags; |
| 1414 | |
| 1415 | i = ctx->index; |
| 1416 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
| 1417 | revp = kvm->arch.revmap + i; |
| 1418 | lbuf = (unsigned long __user *)buf; |
| 1419 | |
| 1420 | nb = 0; |
| 1421 | while (nb + sizeof(hdr) + HPTE_SIZE < count) { |
| 1422 | /* Initialize header */ |
| 1423 | hptr = (struct kvm_get_htab_header __user *)buf; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1424 | hdr.n_valid = 0; |
| 1425 | hdr.n_invalid = 0; |
| 1426 | nw = nb; |
| 1427 | nb += sizeof(hdr); |
| 1428 | lbuf = (unsigned long __user *)(buf + sizeof(hdr)); |
| 1429 | |
| 1430 | /* Skip uninteresting entries, i.e. clean on not-first pass */ |
| 1431 | if (!first_pass) { |
| 1432 | while (i < kvm->arch.hpt_npte && |
Paul Mackerras | a1b4a0f | 2013-04-18 19:50:24 +0000 | [diff] [blame] | 1433 | !hpte_dirty(revp, hptp)) { |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1434 | ++i; |
| 1435 | hptp += 2; |
| 1436 | ++revp; |
| 1437 | } |
| 1438 | } |
Paul Mackerras | 05dd85f | 2012-11-21 23:29:12 +0000 | [diff] [blame] | 1439 | hdr.index = i; |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1440 | |
| 1441 | /* Grab a series of valid entries */ |
| 1442 | while (i < kvm->arch.hpt_npte && |
| 1443 | hdr.n_valid < 0xffff && |
| 1444 | nb + HPTE_SIZE < count && |
| 1445 | record_hpte(flags, hptp, hpte, revp, 1, first_pass)) { |
| 1446 | /* valid entry, write it out */ |
| 1447 | ++hdr.n_valid; |
| 1448 | if (__put_user(hpte[0], lbuf) || |
| 1449 | __put_user(hpte[1], lbuf + 1)) |
| 1450 | return -EFAULT; |
| 1451 | nb += HPTE_SIZE; |
| 1452 | lbuf += 2; |
| 1453 | ++i; |
| 1454 | hptp += 2; |
| 1455 | ++revp; |
| 1456 | } |
| 1457 | /* Now skip invalid entries while we can */ |
| 1458 | while (i < kvm->arch.hpt_npte && |
| 1459 | hdr.n_invalid < 0xffff && |
| 1460 | record_hpte(flags, hptp, hpte, revp, 0, first_pass)) { |
| 1461 | /* found an invalid entry */ |
| 1462 | ++hdr.n_invalid; |
| 1463 | ++i; |
| 1464 | hptp += 2; |
| 1465 | ++revp; |
| 1466 | } |
| 1467 | |
| 1468 | if (hdr.n_valid || hdr.n_invalid) { |
| 1469 | /* write back the header */ |
| 1470 | if (__copy_to_user(hptr, &hdr, sizeof(hdr))) |
| 1471 | return -EFAULT; |
| 1472 | nw = nb; |
| 1473 | buf = (char __user *)lbuf; |
| 1474 | } else { |
| 1475 | nb = nw; |
| 1476 | } |
| 1477 | |
| 1478 | /* Check if we've wrapped around the hash table */ |
| 1479 | if (i >= kvm->arch.hpt_npte) { |
| 1480 | i = 0; |
| 1481 | ctx->first_pass = 0; |
| 1482 | break; |
| 1483 | } |
| 1484 | } |
| 1485 | |
| 1486 | ctx->index = i; |
| 1487 | |
| 1488 | return nb; |
| 1489 | } |
| 1490 | |
| 1491 | static ssize_t kvm_htab_write(struct file *file, const char __user *buf, |
| 1492 | size_t count, loff_t *ppos) |
| 1493 | { |
| 1494 | struct kvm_htab_ctx *ctx = file->private_data; |
| 1495 | struct kvm *kvm = ctx->kvm; |
| 1496 | struct kvm_get_htab_header hdr; |
| 1497 | unsigned long i, j; |
| 1498 | unsigned long v, r; |
| 1499 | unsigned long __user *lbuf; |
| 1500 | unsigned long *hptp; |
| 1501 | unsigned long tmp[2]; |
| 1502 | ssize_t nb; |
| 1503 | long int err, ret; |
| 1504 | int rma_setup; |
| 1505 | |
| 1506 | if (!access_ok(VERIFY_READ, buf, count)) |
| 1507 | return -EFAULT; |
| 1508 | |
| 1509 | /* lock out vcpus from running while we're doing this */ |
| 1510 | mutex_lock(&kvm->lock); |
| 1511 | rma_setup = kvm->arch.rma_setup_done; |
| 1512 | if (rma_setup) { |
| 1513 | kvm->arch.rma_setup_done = 0; /* temporarily */ |
| 1514 | /* order rma_setup_done vs. vcpus_running */ |
| 1515 | smp_mb(); |
| 1516 | if (atomic_read(&kvm->arch.vcpus_running)) { |
| 1517 | kvm->arch.rma_setup_done = 1; |
| 1518 | mutex_unlock(&kvm->lock); |
| 1519 | return -EBUSY; |
| 1520 | } |
| 1521 | } |
| 1522 | |
| 1523 | err = 0; |
| 1524 | for (nb = 0; nb + sizeof(hdr) <= count; ) { |
| 1525 | err = -EFAULT; |
| 1526 | if (__copy_from_user(&hdr, buf, sizeof(hdr))) |
| 1527 | break; |
| 1528 | |
| 1529 | err = 0; |
| 1530 | if (nb + hdr.n_valid * HPTE_SIZE > count) |
| 1531 | break; |
| 1532 | |
| 1533 | nb += sizeof(hdr); |
| 1534 | buf += sizeof(hdr); |
| 1535 | |
| 1536 | err = -EINVAL; |
| 1537 | i = hdr.index; |
| 1538 | if (i >= kvm->arch.hpt_npte || |
| 1539 | i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte) |
| 1540 | break; |
| 1541 | |
| 1542 | hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE)); |
| 1543 | lbuf = (unsigned long __user *)buf; |
| 1544 | for (j = 0; j < hdr.n_valid; ++j) { |
| 1545 | err = -EFAULT; |
| 1546 | if (__get_user(v, lbuf) || __get_user(r, lbuf + 1)) |
| 1547 | goto out; |
| 1548 | err = -EINVAL; |
| 1549 | if (!(v & HPTE_V_VALID)) |
| 1550 | goto out; |
| 1551 | lbuf += 2; |
| 1552 | nb += HPTE_SIZE; |
| 1553 | |
| 1554 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) |
| 1555 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
| 1556 | err = -EIO; |
| 1557 | ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r, |
| 1558 | tmp); |
| 1559 | if (ret != H_SUCCESS) { |
| 1560 | pr_err("kvm_htab_write ret %ld i=%ld v=%lx " |
| 1561 | "r=%lx\n", ret, i, v, r); |
| 1562 | goto out; |
| 1563 | } |
| 1564 | if (!rma_setup && is_vrma_hpte(v)) { |
| 1565 | unsigned long psize = hpte_page_size(v, r); |
| 1566 | unsigned long senc = slb_pgsize_encoding(psize); |
| 1567 | unsigned long lpcr; |
| 1568 | |
| 1569 | kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T | |
| 1570 | (VRMA_VSID << SLB_VSID_SHIFT_1T); |
Paul Mackerras | a0144e2 | 2013-09-20 14:52:38 +1000 | [diff] [blame] | 1571 | lpcr = senc << (LPCR_VRMASD_SH - 4); |
| 1572 | kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD); |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1573 | rma_setup = 1; |
| 1574 | } |
| 1575 | ++i; |
| 1576 | hptp += 2; |
| 1577 | } |
| 1578 | |
| 1579 | for (j = 0; j < hdr.n_invalid; ++j) { |
| 1580 | if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) |
| 1581 | kvmppc_do_h_remove(kvm, 0, i, 0, tmp); |
| 1582 | ++i; |
| 1583 | hptp += 2; |
| 1584 | } |
| 1585 | err = 0; |
| 1586 | } |
| 1587 | |
| 1588 | out: |
| 1589 | /* Order HPTE updates vs. rma_setup_done */ |
| 1590 | smp_wmb(); |
| 1591 | kvm->arch.rma_setup_done = rma_setup; |
| 1592 | mutex_unlock(&kvm->lock); |
| 1593 | |
| 1594 | if (err) |
| 1595 | return err; |
| 1596 | return nb; |
| 1597 | } |
| 1598 | |
| 1599 | static int kvm_htab_release(struct inode *inode, struct file *filp) |
| 1600 | { |
| 1601 | struct kvm_htab_ctx *ctx = filp->private_data; |
| 1602 | |
| 1603 | filp->private_data = NULL; |
| 1604 | if (!(ctx->flags & KVM_GET_HTAB_WRITE)) |
| 1605 | atomic_dec(&ctx->kvm->arch.hpte_mod_interest); |
| 1606 | kvm_put_kvm(ctx->kvm); |
| 1607 | kfree(ctx); |
| 1608 | return 0; |
| 1609 | } |
| 1610 | |
Al Viro | 75ef9de | 2013-04-04 19:09:41 -0400 | [diff] [blame] | 1611 | static const struct file_operations kvm_htab_fops = { |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1612 | .read = kvm_htab_read, |
| 1613 | .write = kvm_htab_write, |
| 1614 | .llseek = default_llseek, |
| 1615 | .release = kvm_htab_release, |
| 1616 | }; |
| 1617 | |
| 1618 | int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf) |
| 1619 | { |
| 1620 | int ret; |
| 1621 | struct kvm_htab_ctx *ctx; |
| 1622 | int rwflag; |
| 1623 | |
| 1624 | /* reject flags we don't recognize */ |
| 1625 | if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE)) |
| 1626 | return -EINVAL; |
| 1627 | ctx = kzalloc(sizeof(*ctx), GFP_KERNEL); |
| 1628 | if (!ctx) |
| 1629 | return -ENOMEM; |
| 1630 | kvm_get_kvm(kvm); |
| 1631 | ctx->kvm = kvm; |
| 1632 | ctx->index = ghf->start_index; |
| 1633 | ctx->flags = ghf->flags; |
| 1634 | ctx->first_pass = 1; |
| 1635 | |
| 1636 | rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY; |
Yann Droneaud | 2f84d5e | 2013-08-24 22:14:08 +0200 | [diff] [blame] | 1637 | ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC); |
Paul Mackerras | a293292 | 2012-11-19 22:57:20 +0000 | [diff] [blame] | 1638 | if (ret < 0) { |
| 1639 | kvm_put_kvm(kvm); |
| 1640 | return ret; |
| 1641 | } |
| 1642 | |
| 1643 | if (rwflag == O_RDONLY) { |
| 1644 | mutex_lock(&kvm->slots_lock); |
| 1645 | atomic_inc(&kvm->arch.hpte_mod_interest); |
| 1646 | /* make sure kvmppc_do_h_enter etc. see the increment */ |
| 1647 | synchronize_srcu_expedited(&kvm->srcu); |
| 1648 | mutex_unlock(&kvm->slots_lock); |
| 1649 | } |
| 1650 | |
| 1651 | return ret; |
| 1652 | } |
| 1653 | |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1654 | void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu) |
| 1655 | { |
| 1656 | struct kvmppc_mmu *mmu = &vcpu->arch.mmu; |
| 1657 | |
Paul Mackerras | 9e368f2 | 2011-06-29 00:40:08 +0000 | [diff] [blame] | 1658 | if (cpu_has_feature(CPU_FTR_ARCH_206)) |
| 1659 | vcpu->arch.slb_nr = 32; /* POWER7 */ |
| 1660 | else |
| 1661 | vcpu->arch.slb_nr = 64; |
Paul Mackerras | de56a94 | 2011-06-29 00:21:34 +0000 | [diff] [blame] | 1662 | |
| 1663 | mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate; |
| 1664 | mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr; |
| 1665 | |
| 1666 | vcpu->arch.hflags |= BOOK3S_HFLAG_SLB; |
| 1667 | } |