blob: 68468d695f12ab864281f19a3cfd872a6976fc8d [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright 2010 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
16 */
17
18#include <linux/types.h>
19#include <linux/string.h>
20#include <linux/kvm.h>
21#include <linux/kvm_host.h>
22#include <linux/highmem.h>
23#include <linux/gfp.h>
24#include <linux/slab.h>
25#include <linux/hugetlb.h>
Paul Mackerras8936dda2011-12-12 12:27:39 +000026#include <linux/vmalloc.h>
Paul Mackerras2c9097e2012-09-11 13:27:01 +000027#include <linux/srcu.h>
Paul Mackerrasa2932922012-11-19 22:57:20 +000028#include <linux/anon_inodes.h>
29#include <linux/file.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000030
31#include <asm/tlbflush.h>
32#include <asm/kvm_ppc.h>
33#include <asm/kvm_book3s.h>
34#include <asm/mmu-hash64.h>
35#include <asm/hvcall.h>
36#include <asm/synch.h>
37#include <asm/ppc-opcode.h>
38#include <asm/cputable.h>
39
Aneesh Kumar K.V990978e2013-07-02 11:15:18 +053040#include "book3s_hv_cma.h"
41
Paul Mackerras9e368f22011-06-29 00:40:08 +000042/* POWER7 has 10-bit LPIDs, PPC970 has 6-bit LPIDs */
43#define MAX_LPID_970 63
Paul Mackerrasde56a942011-06-29 00:21:34 +000044
Paul Mackerras32fad282012-05-04 02:32:53 +000045/* Power architecture requires HPT is at least 256kB */
46#define PPC_MIN_HPT_ORDER 18
47
Paul Mackerras7ed661b2012-11-13 18:31:32 +000048static long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
49 long pte_index, unsigned long pteh,
50 unsigned long ptel, unsigned long *pte_idx_ret);
Paul Mackerrasa64fd702012-11-21 23:27:19 +000051static void kvmppc_rmap_reset(struct kvm *kvm);
Paul Mackerras7ed661b2012-11-13 18:31:32 +000052
Paul Mackerras32fad282012-05-04 02:32:53 +000053long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp)
Paul Mackerrasde56a942011-06-29 00:21:34 +000054{
Aneesh Kumar K.V792fc492014-05-06 21:24:18 +053055 unsigned long hpt = 0;
Paul Mackerras8936dda2011-12-12 12:27:39 +000056 struct revmap_entry *rev;
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053057 struct page *page = NULL;
58 long order = KVM_DEFAULT_HPT_ORDER;
Paul Mackerrasde56a942011-06-29 00:21:34 +000059
Paul Mackerras32fad282012-05-04 02:32:53 +000060 if (htab_orderp) {
61 order = *htab_orderp;
62 if (order < PPC_MIN_HPT_ORDER)
63 order = PPC_MIN_HPT_ORDER;
64 }
65
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +053066 kvm->arch.hpt_cma_alloc = 0;
Aneesh Kumar K.V792fc492014-05-06 21:24:18 +053067 VM_BUG_ON(order < KVM_CMA_CHUNK_ORDER);
68 page = kvm_alloc_hpt(1 << (order - PAGE_SHIFT));
69 if (page) {
70 hpt = (unsigned long)pfn_to_kaddr(page_to_pfn(page));
71 kvm->arch.hpt_cma_alloc = 1;
Paul Mackerrasde56a942011-06-29 00:21:34 +000072 }
Paul Mackerras32fad282012-05-04 02:32:53 +000073
74 /* Lastly try successively smaller sizes from the page allocator */
75 while (!hpt && order > PPC_MIN_HPT_ORDER) {
76 hpt = __get_free_pages(GFP_KERNEL|__GFP_ZERO|__GFP_REPEAT|
77 __GFP_NOWARN, order - PAGE_SHIFT);
78 if (!hpt)
79 --order;
80 }
81
82 if (!hpt)
83 return -ENOMEM;
84
Paul Mackerrasde56a942011-06-29 00:21:34 +000085 kvm->arch.hpt_virt = hpt;
Paul Mackerras32fad282012-05-04 02:32:53 +000086 kvm->arch.hpt_order = order;
87 /* HPTEs are 2**4 bytes long */
88 kvm->arch.hpt_npte = 1ul << (order - 4);
89 /* 128 (2**7) bytes in each HPTEG */
90 kvm->arch.hpt_mask = (1ul << (order - 7)) - 1;
Paul Mackerrasde56a942011-06-29 00:21:34 +000091
Paul Mackerras8936dda2011-12-12 12:27:39 +000092 /* Allocate reverse map array */
Paul Mackerras32fad282012-05-04 02:32:53 +000093 rev = vmalloc(sizeof(struct revmap_entry) * kvm->arch.hpt_npte);
Paul Mackerras8936dda2011-12-12 12:27:39 +000094 if (!rev) {
95 pr_err("kvmppc_alloc_hpt: Couldn't alloc reverse map array\n");
96 goto out_freehpt;
97 }
98 kvm->arch.revmap = rev;
Paul Mackerras32fad282012-05-04 02:32:53 +000099 kvm->arch.sdr1 = __pa(hpt) | (order - 18);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000100
Paul Mackerras32fad282012-05-04 02:32:53 +0000101 pr_info("KVM guest htab at %lx (order %ld), LPID %x\n",
102 hpt, order, kvm->arch.lpid);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000103
Paul Mackerras32fad282012-05-04 02:32:53 +0000104 if (htab_orderp)
105 *htab_orderp = order;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000106 return 0;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000107
Paul Mackerras8936dda2011-12-12 12:27:39 +0000108 out_freehpt:
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530109 if (kvm->arch.hpt_cma_alloc)
110 kvm_release_hpt(page, 1 << (order - PAGE_SHIFT));
Paul Mackerras32fad282012-05-04 02:32:53 +0000111 else
112 free_pages(hpt, order - PAGE_SHIFT);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000113 return -ENOMEM;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000114}
115
Paul Mackerras32fad282012-05-04 02:32:53 +0000116long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp)
117{
118 long err = -EBUSY;
119 long order;
120
121 mutex_lock(&kvm->lock);
122 if (kvm->arch.rma_setup_done) {
123 kvm->arch.rma_setup_done = 0;
124 /* order rma_setup_done vs. vcpus_running */
125 smp_mb();
126 if (atomic_read(&kvm->arch.vcpus_running)) {
127 kvm->arch.rma_setup_done = 1;
128 goto out;
129 }
130 }
131 if (kvm->arch.hpt_virt) {
132 order = kvm->arch.hpt_order;
133 /* Set the entire HPT to 0, i.e. invalid HPTEs */
134 memset((void *)kvm->arch.hpt_virt, 0, 1ul << order);
135 /*
Paul Mackerrasa64fd702012-11-21 23:27:19 +0000136 * Reset all the reverse-mapping chains for all memslots
137 */
138 kvmppc_rmap_reset(kvm);
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000139 /* Ensure that each vcpu will flush its TLB on next entry. */
140 cpumask_setall(&kvm->arch.need_tlb_flush);
Paul Mackerras32fad282012-05-04 02:32:53 +0000141 *htab_orderp = order;
142 err = 0;
143 } else {
144 err = kvmppc_alloc_hpt(kvm, htab_orderp);
145 order = *htab_orderp;
146 }
147 out:
148 mutex_unlock(&kvm->lock);
149 return err;
150}
151
Paul Mackerrasde56a942011-06-29 00:21:34 +0000152void kvmppc_free_hpt(struct kvm *kvm)
153{
Scott Wood043cc4d2011-12-20 15:34:20 +0000154 kvmppc_free_lpid(kvm->arch.lpid);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000155 vfree(kvm->arch.revmap);
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530156 if (kvm->arch.hpt_cma_alloc)
157 kvm_release_hpt(virt_to_page(kvm->arch.hpt_virt),
158 1 << (kvm->arch.hpt_order - PAGE_SHIFT));
Alexander Grafd2a1b482012-01-16 19:12:11 +0100159 else
Paul Mackerras32fad282012-05-04 02:32:53 +0000160 free_pages(kvm->arch.hpt_virt,
161 kvm->arch.hpt_order - PAGE_SHIFT);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000162}
163
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000164/* Bits in first HPTE dword for pagesize 4k, 64k or 16M */
165static inline unsigned long hpte0_pgsize_encoding(unsigned long pgsize)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000166{
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000167 return (pgsize > 0x1000) ? HPTE_V_LARGE : 0;
168}
169
170/* Bits in second HPTE dword for pagesize 4k, 64k or 16M */
171static inline unsigned long hpte1_pgsize_encoding(unsigned long pgsize)
172{
173 return (pgsize == 0x10000) ? 0x1000 : 0;
174}
175
176void kvmppc_map_vrma(struct kvm_vcpu *vcpu, struct kvm_memory_slot *memslot,
177 unsigned long porder)
178{
Paul Mackerrasde56a942011-06-29 00:21:34 +0000179 unsigned long i;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000180 unsigned long npages;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000181 unsigned long hp_v, hp_r;
182 unsigned long addr, hash;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000183 unsigned long psize;
184 unsigned long hp0, hp1;
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000185 unsigned long idx_ret;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000186 long ret;
Paul Mackerras32fad282012-05-04 02:32:53 +0000187 struct kvm *kvm = vcpu->kvm;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000188
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000189 psize = 1ul << porder;
190 npages = memslot->npages >> (porder - PAGE_SHIFT);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000191
192 /* VRMA can't be > 1TB */
Paul Mackerras8936dda2011-12-12 12:27:39 +0000193 if (npages > 1ul << (40 - porder))
194 npages = 1ul << (40 - porder);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000195 /* Can't use more than 1 HPTE per HPTEG */
Paul Mackerras32fad282012-05-04 02:32:53 +0000196 if (npages > kvm->arch.hpt_mask + 1)
197 npages = kvm->arch.hpt_mask + 1;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000198
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000199 hp0 = HPTE_V_1TB_SEG | (VRMA_VSID << (40 - 16)) |
200 HPTE_V_BOLTED | hpte0_pgsize_encoding(psize);
201 hp1 = hpte1_pgsize_encoding(psize) |
202 HPTE_R_R | HPTE_R_C | HPTE_R_M | PP_RWXX;
203
Paul Mackerrasde56a942011-06-29 00:21:34 +0000204 for (i = 0; i < npages; ++i) {
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000205 addr = i << porder;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000206 /* can't use hpt_hash since va > 64 bits */
Paul Mackerras32fad282012-05-04 02:32:53 +0000207 hash = (i ^ (VRMA_VSID ^ (VRMA_VSID << 25))) & kvm->arch.hpt_mask;
Paul Mackerrasde56a942011-06-29 00:21:34 +0000208 /*
209 * We assume that the hash table is empty and no
210 * vcpus are using it at this stage. Since we create
211 * at most one HPTE per HPTEG, we just assume entry 7
212 * is available and use it.
213 */
Paul Mackerras8936dda2011-12-12 12:27:39 +0000214 hash = (hash << 3) + 7;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000215 hp_v = hp0 | ((addr >> 16) & ~0x7fUL);
216 hp_r = hp1 | addr;
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000217 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, hash, hp_v, hp_r,
218 &idx_ret);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000219 if (ret != H_SUCCESS) {
220 pr_err("KVM: map_vrma at %lx failed, ret=%ld\n",
221 addr, ret);
222 break;
223 }
Paul Mackerrasde56a942011-06-29 00:21:34 +0000224 }
225}
226
227int kvmppc_mmu_hv_init(void)
228{
Paul Mackerras9e368f22011-06-29 00:40:08 +0000229 unsigned long host_lpid, rsvd_lpid;
230
231 if (!cpu_has_feature(CPU_FTR_HVMODE))
Paul Mackerrasde56a942011-06-29 00:21:34 +0000232 return -EINVAL;
Paul Mackerras9e368f22011-06-29 00:40:08 +0000233
Scott Wood043cc4d2011-12-20 15:34:20 +0000234 /* POWER7 has 10-bit LPIDs, PPC970 and e500mc have 6-bit LPIDs */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000235 if (cpu_has_feature(CPU_FTR_ARCH_206)) {
236 host_lpid = mfspr(SPRN_LPID); /* POWER7 */
237 rsvd_lpid = LPID_RSVD;
238 } else {
239 host_lpid = 0; /* PPC970 */
240 rsvd_lpid = MAX_LPID_970;
241 }
242
Scott Wood043cc4d2011-12-20 15:34:20 +0000243 kvmppc_init_lpid(rsvd_lpid + 1);
244
245 kvmppc_claim_lpid(host_lpid);
Paul Mackerras9e368f22011-06-29 00:40:08 +0000246 /* rsvd_lpid is reserved for use in partition switching */
Scott Wood043cc4d2011-12-20 15:34:20 +0000247 kvmppc_claim_lpid(rsvd_lpid);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000248
249 return 0;
250}
251
Paul Mackerrasde56a942011-06-29 00:21:34 +0000252static void kvmppc_mmu_book3s_64_hv_reset_msr(struct kvm_vcpu *vcpu)
253{
Michael Neulinge4e38122014-03-25 10:47:02 +1100254 unsigned long msr = vcpu->arch.intr_msr;
255
256 /* If transactional, change to suspend mode on IRQ delivery */
257 if (MSR_TM_TRANSACTIONAL(vcpu->arch.shregs.msr))
258 msr |= MSR_TS_S;
259 else
260 msr |= vcpu->arch.shregs.msr & MSR_TS_MASK;
261 kvmppc_set_msr(vcpu, msr);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000262}
263
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000264/*
265 * This is called to get a reference to a guest page if there isn't
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000266 * one already in the memslot->arch.slot_phys[] array.
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000267 */
268static long kvmppc_get_guest_page(struct kvm *kvm, unsigned long gfn,
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000269 struct kvm_memory_slot *memslot,
270 unsigned long psize)
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000271{
272 unsigned long start;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000273 long np, err;
274 struct page *page, *hpage, *pages[1];
275 unsigned long s, pgsize;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000276 unsigned long *physp;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000277 unsigned int is_io, got, pgorder;
278 struct vm_area_struct *vma;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000279 unsigned long pfn, i, npages;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000280
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000281 physp = memslot->arch.slot_phys;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000282 if (!physp)
283 return -EINVAL;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000284 if (physp[gfn - memslot->base_gfn])
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000285 return 0;
286
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000287 is_io = 0;
288 got = 0;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000289 page = NULL;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000290 pgsize = psize;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000291 err = -EINVAL;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000292 start = gfn_to_hva_memslot(memslot, gfn);
293
294 /* Instantiate and get the page we want access to */
295 np = get_user_pages_fast(start, 1, 1, pages);
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000296 if (np != 1) {
297 /* Look up the vma for the page */
298 down_read(&current->mm->mmap_sem);
299 vma = find_vma(current->mm, start);
300 if (!vma || vma->vm_start > start ||
301 start + psize > vma->vm_end ||
302 !(vma->vm_flags & VM_PFNMAP))
303 goto up_err;
304 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
305 pfn = vma->vm_pgoff + ((start - vma->vm_start) >> PAGE_SHIFT);
306 /* check alignment of pfn vs. requested page size */
307 if (psize > PAGE_SIZE && (pfn & ((psize >> PAGE_SHIFT) - 1)))
308 goto up_err;
309 up_read(&current->mm->mmap_sem);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000310
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000311 } else {
312 page = pages[0];
313 got = KVMPPC_GOT_PAGE;
314
315 /* See if this is a large page */
316 s = PAGE_SIZE;
317 if (PageHuge(page)) {
318 hpage = compound_head(page);
319 s <<= compound_order(hpage);
320 /* Get the whole large page if slot alignment is ok */
321 if (s > psize && slot_is_aligned(memslot, s) &&
322 !(memslot->userspace_addr & (s - 1))) {
323 start &= ~(s - 1);
324 pgsize = s;
David Gibsonde6c0b02012-05-08 20:24:08 +1000325 get_page(hpage);
326 put_page(page);
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000327 page = hpage;
328 }
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000329 }
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000330 if (s < psize)
331 goto out;
332 pfn = page_to_pfn(page);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000333 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000334
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000335 npages = pgsize >> PAGE_SHIFT;
336 pgorder = __ilog2(npages);
337 physp += (gfn - memslot->base_gfn) & ~(npages - 1);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000338 spin_lock(&kvm->arch.slot_phys_lock);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000339 for (i = 0; i < npages; ++i) {
340 if (!physp[i]) {
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000341 physp[i] = ((pfn + i) << PAGE_SHIFT) +
342 got + is_io + pgorder;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000343 got = 0;
344 }
345 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000346 spin_unlock(&kvm->arch.slot_phys_lock);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000347 err = 0;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000348
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000349 out:
David Gibsonde6c0b02012-05-08 20:24:08 +1000350 if (got)
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000351 put_page(page);
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000352 return err;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000353
354 up_err:
355 up_read(&current->mm->mmap_sem);
356 return err;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000357}
358
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000359long kvmppc_virtmode_do_h_enter(struct kvm *kvm, unsigned long flags,
360 long pte_index, unsigned long pteh,
361 unsigned long ptel, unsigned long *pte_idx_ret)
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000362{
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000363 unsigned long psize, gpa, gfn;
364 struct kvm_memory_slot *memslot;
365 long ret;
366
Paul Mackerras342d3db2011-12-12 12:38:05 +0000367 if (kvm->arch.using_mmu_notifiers)
368 goto do_insert;
369
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000370 psize = hpte_page_size(pteh, ptel);
371 if (!psize)
372 return H_PARAMETER;
373
Paul Mackerras697d3892011-12-12 12:36:37 +0000374 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
375
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000376 /* Find the memslot (if any) for this address */
377 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
378 gfn = gpa >> PAGE_SHIFT;
379 memslot = gfn_to_memslot(kvm, gfn);
Paul Mackerras697d3892011-12-12 12:36:37 +0000380 if (memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)) {
381 if (!slot_is_aligned(memslot, psize))
382 return H_PARAMETER;
383 if (kvmppc_get_guest_page(kvm, gfn, memslot, psize) < 0)
384 return H_PARAMETER;
385 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000386
Paul Mackerras342d3db2011-12-12 12:38:05 +0000387 do_insert:
388 /* Protect linux PTE lookup from page table destruction */
389 rcu_read_lock_sched(); /* this disables preemption too */
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000390 ret = kvmppc_do_h_enter(kvm, flags, pte_index, pteh, ptel,
391 current->mm->pgd, false, pte_idx_ret);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000392 rcu_read_unlock_sched();
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000393 if (ret == H_TOO_HARD) {
394 /* this can't happen */
395 pr_err("KVM: Oops, kvmppc_h_enter returned too hard!\n");
396 ret = H_RESOURCE; /* or something */
397 }
398 return ret;
399
400}
401
Paul Mackerras7ed661b2012-11-13 18:31:32 +0000402/*
403 * We come here on a H_ENTER call from the guest when we are not
404 * using mmu notifiers and we don't have the requested page pinned
405 * already.
406 */
407long kvmppc_virtmode_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
408 long pte_index, unsigned long pteh,
409 unsigned long ptel)
410{
411 return kvmppc_virtmode_do_h_enter(vcpu->kvm, flags, pte_index,
412 pteh, ptel, &vcpu->arch.gpr[4]);
413}
414
Paul Mackerras697d3892011-12-12 12:36:37 +0000415static struct kvmppc_slb *kvmppc_mmu_book3s_hv_find_slbe(struct kvm_vcpu *vcpu,
416 gva_t eaddr)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000417{
Paul Mackerras697d3892011-12-12 12:36:37 +0000418 u64 mask;
419 int i;
420
421 for (i = 0; i < vcpu->arch.slb_nr; i++) {
422 if (!(vcpu->arch.slb[i].orige & SLB_ESID_V))
423 continue;
424
425 if (vcpu->arch.slb[i].origv & SLB_VSID_B_1T)
426 mask = ESID_MASK_1T;
427 else
428 mask = ESID_MASK;
429
430 if (((vcpu->arch.slb[i].orige ^ eaddr) & mask) == 0)
431 return &vcpu->arch.slb[i];
432 }
433 return NULL;
434}
435
436static unsigned long kvmppc_mmu_get_real_addr(unsigned long v, unsigned long r,
437 unsigned long ea)
438{
439 unsigned long ra_mask;
440
441 ra_mask = hpte_page_size(v, r) - 1;
442 return (r & HPTE_R_RPN & ~ra_mask) | (ea & ra_mask);
443}
444
445static int kvmppc_mmu_book3s_64_hv_xlate(struct kvm_vcpu *vcpu, gva_t eaddr,
Paul Mackerras93b159b2013-09-20 14:52:51 +1000446 struct kvmppc_pte *gpte, bool data, bool iswrite)
Paul Mackerras697d3892011-12-12 12:36:37 +0000447{
448 struct kvm *kvm = vcpu->kvm;
449 struct kvmppc_slb *slbe;
450 unsigned long slb_v;
451 unsigned long pp, key;
452 unsigned long v, gr;
453 unsigned long *hptep;
454 int index;
455 int virtmode = vcpu->arch.shregs.msr & (data ? MSR_DR : MSR_IR);
456
457 /* Get SLB entry */
458 if (virtmode) {
459 slbe = kvmppc_mmu_book3s_hv_find_slbe(vcpu, eaddr);
460 if (!slbe)
461 return -EINVAL;
462 slb_v = slbe->origv;
463 } else {
464 /* real mode access */
465 slb_v = vcpu->kvm->arch.vrma_slb_v;
466 }
467
pingfan liu91648ec2013-11-15 16:35:00 +0800468 preempt_disable();
Paul Mackerras697d3892011-12-12 12:36:37 +0000469 /* Find the HPTE in the hash table */
470 index = kvmppc_hv_find_lock_hpte(kvm, eaddr, slb_v,
471 HPTE_V_VALID | HPTE_V_ABSENT);
pingfan liu91648ec2013-11-15 16:35:00 +0800472 if (index < 0) {
473 preempt_enable();
Paul Mackerras697d3892011-12-12 12:36:37 +0000474 return -ENOENT;
pingfan liu91648ec2013-11-15 16:35:00 +0800475 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000476 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
477 v = hptep[0] & ~HPTE_V_HVLOCK;
478 gr = kvm->arch.revmap[index].guest_rpte;
479
480 /* Unlock the HPTE */
481 asm volatile("lwsync" : : : "memory");
482 hptep[0] = v;
pingfan liu91648ec2013-11-15 16:35:00 +0800483 preempt_enable();
Paul Mackerras697d3892011-12-12 12:36:37 +0000484
485 gpte->eaddr = eaddr;
486 gpte->vpage = ((v & HPTE_V_AVPN) << 4) | ((eaddr >> 12) & 0xfff);
487
488 /* Get PP bits and key for permission check */
489 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
490 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
491 key &= slb_v;
492
493 /* Calculate permissions */
494 gpte->may_read = hpte_read_permission(pp, key);
495 gpte->may_write = hpte_write_permission(pp, key);
496 gpte->may_execute = gpte->may_read && !(gr & (HPTE_R_N | HPTE_R_G));
497
498 /* Storage key permission check for POWER7 */
499 if (data && virtmode && cpu_has_feature(CPU_FTR_ARCH_206)) {
500 int amrfield = hpte_get_skey_perm(gr, vcpu->arch.amr);
501 if (amrfield & 1)
502 gpte->may_read = 0;
503 if (amrfield & 2)
504 gpte->may_write = 0;
505 }
506
507 /* Get the guest physical address */
508 gpte->raddr = kvmppc_mmu_get_real_addr(v, gr, eaddr);
509 return 0;
510}
511
512/*
513 * Quick test for whether an instruction is a load or a store.
514 * If the instruction is a load or a store, then this will indicate
515 * which it is, at least on server processors. (Embedded processors
516 * have some external PID instructions that don't follow the rule
517 * embodied here.) If the instruction isn't a load or store, then
518 * this doesn't return anything useful.
519 */
520static int instruction_is_store(unsigned int instr)
521{
522 unsigned int mask;
523
524 mask = 0x10000000;
525 if ((instr & 0xfc000000) == 0x7c000000)
526 mask = 0x100; /* major opcode 31 */
527 return (instr & mask) != 0;
528}
529
530static int kvmppc_hv_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu,
Alexander Graf6020c0f2012-03-12 02:26:30 +0100531 unsigned long gpa, gva_t ea, int is_store)
Paul Mackerras697d3892011-12-12 12:36:37 +0000532{
533 int ret;
534 u32 last_inst;
535 unsigned long srr0 = kvmppc_get_pc(vcpu);
536
537 /* We try to load the last instruction. We don't let
538 * emulate_instruction do it as it doesn't check what
539 * kvmppc_ld returns.
540 * If we fail, we just return to the guest and try executing it again.
541 */
542 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED) {
543 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
544 if (ret != EMULATE_DONE || last_inst == KVM_INST_FETCH_FAILED)
545 return RESUME_GUEST;
546 vcpu->arch.last_inst = last_inst;
547 }
548
549 /*
550 * WARNING: We do not know for sure whether the instruction we just
551 * read from memory is the same that caused the fault in the first
552 * place. If the instruction we read is neither an load or a store,
553 * then it can't access memory, so we don't need to worry about
554 * enforcing access permissions. So, assuming it is a load or
555 * store, we just check that its direction (load or store) is
556 * consistent with the original fault, since that's what we
557 * checked the access permissions against. If there is a mismatch
558 * we just return and retry the instruction.
559 */
560
Cédric Le Goater73601772014-01-09 11:51:16 +0100561 if (instruction_is_store(kvmppc_get_last_inst(vcpu)) != !!is_store)
Paul Mackerras697d3892011-12-12 12:36:37 +0000562 return RESUME_GUEST;
563
564 /*
565 * Emulated accesses are emulated by looking at the hash for
566 * translation once, then performing the access later. The
567 * translation could be invalidated in the meantime in which
568 * point performing the subsequent memory access on the old
569 * physical address could possibly be a security hole for the
570 * guest (but not the host).
571 *
572 * This is less of an issue for MMIO stores since they aren't
573 * globally visible. It could be an issue for MMIO loads to
574 * a certain extent but we'll ignore it for now.
575 */
576
577 vcpu->arch.paddr_accessed = gpa;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100578 vcpu->arch.vaddr_accessed = ea;
Paul Mackerras697d3892011-12-12 12:36:37 +0000579 return kvmppc_emulate_mmio(run, vcpu);
580}
581
582int kvmppc_book3s_hv_page_fault(struct kvm_run *run, struct kvm_vcpu *vcpu,
583 unsigned long ea, unsigned long dsisr)
584{
585 struct kvm *kvm = vcpu->kvm;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000586 unsigned long *hptep, hpte[3], r;
587 unsigned long mmu_seq, psize, pte_size;
Paul Mackerras1066f772014-05-26 19:48:37 +1000588 unsigned long gpa_base, gfn_base;
Paul Mackerras70bddfe2012-09-20 19:39:21 +0000589 unsigned long gpa, gfn, hva, pfn;
Paul Mackerras697d3892011-12-12 12:36:37 +0000590 struct kvm_memory_slot *memslot;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000591 unsigned long *rmap;
Paul Mackerras697d3892011-12-12 12:36:37 +0000592 struct revmap_entry *rev;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000593 struct page *page, *pages[1];
594 long index, ret, npages;
595 unsigned long is_io;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000596 unsigned int writing, write_ok;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000597 struct vm_area_struct *vma;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000598 unsigned long rcbits;
Paul Mackerras697d3892011-12-12 12:36:37 +0000599
600 /*
601 * Real-mode code has already searched the HPT and found the
602 * entry we're interested in. Lock the entry and check that
603 * it hasn't changed. If it has, just return and re-execute the
604 * instruction.
605 */
606 if (ea != vcpu->arch.pgfault_addr)
607 return RESUME_GUEST;
608 index = vcpu->arch.pgfault_index;
609 hptep = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
610 rev = &kvm->arch.revmap[index];
611 preempt_disable();
612 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
613 cpu_relax();
614 hpte[0] = hptep[0] & ~HPTE_V_HVLOCK;
615 hpte[1] = hptep[1];
Paul Mackerras342d3db2011-12-12 12:38:05 +0000616 hpte[2] = r = rev->guest_rpte;
Paul Mackerras697d3892011-12-12 12:36:37 +0000617 asm volatile("lwsync" : : : "memory");
618 hptep[0] = hpte[0];
619 preempt_enable();
620
621 if (hpte[0] != vcpu->arch.pgfault_hpte[0] ||
622 hpte[1] != vcpu->arch.pgfault_hpte[1])
623 return RESUME_GUEST;
624
625 /* Translate the logical address and get the page */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000626 psize = hpte_page_size(hpte[0], r);
Paul Mackerras1066f772014-05-26 19:48:37 +1000627 gpa_base = r & HPTE_R_RPN & ~(psize - 1);
628 gfn_base = gpa_base >> PAGE_SHIFT;
629 gpa = gpa_base | (ea & (psize - 1));
Paul Mackerras70bddfe2012-09-20 19:39:21 +0000630 gfn = gpa >> PAGE_SHIFT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000631 memslot = gfn_to_memslot(kvm, gfn);
632
633 /* No memslot means it's an emulated MMIO region */
Paul Mackerras70bddfe2012-09-20 19:39:21 +0000634 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
Alexander Graf6020c0f2012-03-12 02:26:30 +0100635 return kvmppc_hv_emulate_mmio(run, vcpu, gpa, ea,
Paul Mackerras697d3892011-12-12 12:36:37 +0000636 dsisr & DSISR_ISSTORE);
Paul Mackerras697d3892011-12-12 12:36:37 +0000637
Paul Mackerras342d3db2011-12-12 12:38:05 +0000638 if (!kvm->arch.using_mmu_notifiers)
639 return -EFAULT; /* should never get here */
640
Paul Mackerras1066f772014-05-26 19:48:37 +1000641 /*
642 * This should never happen, because of the slot_is_aligned()
643 * check in kvmppc_do_h_enter().
644 */
645 if (gfn_base < memslot->base_gfn)
646 return -EFAULT;
647
Paul Mackerras342d3db2011-12-12 12:38:05 +0000648 /* used to check for invalidations in progress */
649 mmu_seq = kvm->mmu_notifier_seq;
650 smp_rmb();
651
652 is_io = 0;
653 pfn = 0;
654 page = NULL;
655 pte_size = PAGE_SIZE;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000656 writing = (dsisr & DSISR_ISSTORE) != 0;
657 /* If writing != 0, then the HPTE must allow writing, if we get here */
658 write_ok = writing;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000659 hva = gfn_to_hva_memslot(memslot, gfn);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000660 npages = get_user_pages_fast(hva, 1, writing, pages);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000661 if (npages < 1) {
662 /* Check if it's an I/O mapping */
663 down_read(&current->mm->mmap_sem);
664 vma = find_vma(current->mm, hva);
665 if (vma && vma->vm_start <= hva && hva + psize <= vma->vm_end &&
666 (vma->vm_flags & VM_PFNMAP)) {
667 pfn = vma->vm_pgoff +
668 ((hva - vma->vm_start) >> PAGE_SHIFT);
669 pte_size = psize;
670 is_io = hpte_cache_bits(pgprot_val(vma->vm_page_prot));
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000671 write_ok = vma->vm_flags & VM_WRITE;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000672 }
673 up_read(&current->mm->mmap_sem);
674 if (!pfn)
675 return -EFAULT;
676 } else {
677 page = pages[0];
Paul Mackerrascaaa4c82013-11-16 17:46:02 +1100678 pfn = page_to_pfn(page);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000679 if (PageHuge(page)) {
680 page = compound_head(page);
681 pte_size <<= compound_order(page);
682 }
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000683 /* if the guest wants write access, see if that is OK */
684 if (!writing && hpte_is_writable(r)) {
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530685 unsigned int hugepage_shift;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000686 pte_t *ptep, pte;
687
688 /*
689 * We need to protect against page table destruction
690 * while looking up and updating the pte.
691 */
692 rcu_read_lock_sched();
693 ptep = find_linux_pte_or_hugepte(current->mm->pgd,
Aneesh Kumar K.Vdb7cb5b2013-06-20 14:30:19 +0530694 hva, &hugepage_shift);
695 if (ptep) {
696 pte = kvmppc_read_update_linux_pte(ptep, 1,
697 hugepage_shift);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000698 if (pte_write(pte))
699 write_ok = 1;
700 }
701 rcu_read_unlock_sched();
702 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000703 }
704
705 ret = -EFAULT;
706 if (psize > pte_size)
707 goto out_put;
708
709 /* Check WIMG vs. the actual page we're accessing */
710 if (!hpte_cache_flags_ok(r, is_io)) {
711 if (is_io)
712 return -EFAULT;
713 /*
714 * Allow guest to map emulated device memory as
715 * uncacheable, but actually make it cacheable.
716 */
717 r = (r & ~(HPTE_R_W|HPTE_R_I|HPTE_R_G)) | HPTE_R_M;
718 }
719
Paul Mackerrascaaa4c82013-11-16 17:46:02 +1100720 /*
721 * Set the HPTE to point to pfn.
722 * Since the pfn is at PAGE_SIZE granularity, make sure we
723 * don't mask out lower-order bits if psize < PAGE_SIZE.
724 */
725 if (psize < PAGE_SIZE)
726 psize = PAGE_SIZE;
727 r = (r & ~(HPTE_R_PP0 - psize)) | ((pfn << PAGE_SHIFT) & ~(psize - 1));
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000728 if (hpte_is_writable(r) && !write_ok)
729 r = hpte_make_readonly(r);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000730 ret = RESUME_GUEST;
731 preempt_disable();
732 while (!try_lock_hpte(hptep, HPTE_V_HVLOCK))
733 cpu_relax();
734 if ((hptep[0] & ~HPTE_V_HVLOCK) != hpte[0] || hptep[1] != hpte[1] ||
735 rev->guest_rpte != hpte[2])
736 /* HPTE has been changed under us; let the guest retry */
737 goto out_unlock;
738 hpte[0] = (hpte[0] & ~HPTE_V_ABSENT) | HPTE_V_VALID;
739
Paul Mackerras1066f772014-05-26 19:48:37 +1000740 /* Always put the HPTE in the rmap chain for the page base address */
741 rmap = &memslot->arch.rmap[gfn_base - memslot->base_gfn];
Paul Mackerras342d3db2011-12-12 12:38:05 +0000742 lock_rmap(rmap);
743
744 /* Check if we might have been invalidated; let the guest retry if so */
745 ret = RESUME_GUEST;
Christoffer Dall8ca40a72012-10-14 23:10:18 -0400746 if (mmu_notifier_retry(vcpu->kvm, mmu_seq)) {
Paul Mackerras342d3db2011-12-12 12:38:05 +0000747 unlock_rmap(rmap);
748 goto out_unlock;
749 }
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000750
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000751 /* Only set R/C in real HPTE if set in both *rmap and guest_rpte */
752 rcbits = *rmap >> KVMPPC_RMAP_RC_SHIFT;
753 r &= rcbits | ~(HPTE_R_R | HPTE_R_C);
754
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000755 if (hptep[0] & HPTE_V_VALID) {
756 /* HPTE was previously valid, so we need to invalidate it */
757 unlock_rmap(rmap);
758 hptep[0] |= HPTE_V_ABSENT;
759 kvmppc_invalidate_hpte(kvm, hptep, index);
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000760 /* don't lose previous R and C bits */
761 r |= hptep[1] & (HPTE_R_R | HPTE_R_C);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000762 } else {
763 kvmppc_add_revmap_chain(kvm, rev, rmap, index, 0);
764 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000765
766 hptep[1] = r;
767 eieio();
768 hptep[0] = hpte[0];
769 asm volatile("ptesync" : : : "memory");
770 preempt_enable();
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000771 if (page && hpte_is_writable(r))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000772 SetPageDirty(page);
773
774 out_put:
David Gibsonde6c0b02012-05-08 20:24:08 +1000775 if (page) {
776 /*
777 * We drop pages[0] here, not page because page might
778 * have been set to the head page of a compound, but
779 * we have to drop the reference on the correct tail
780 * page to match the get inside gup()
781 */
782 put_page(pages[0]);
783 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000784 return ret;
785
786 out_unlock:
787 hptep[0] &= ~HPTE_V_HVLOCK;
788 preempt_enable();
789 goto out_put;
790}
791
Paul Mackerrasa64fd702012-11-21 23:27:19 +0000792static void kvmppc_rmap_reset(struct kvm *kvm)
793{
794 struct kvm_memslots *slots;
795 struct kvm_memory_slot *memslot;
796 int srcu_idx;
797
798 srcu_idx = srcu_read_lock(&kvm->srcu);
799 slots = kvm->memslots;
800 kvm_for_each_memslot(memslot, slots) {
801 /*
802 * This assumes it is acceptable to lose reference and
803 * change bits across a reset.
804 */
805 memset(memslot->arch.rmap, 0,
806 memslot->npages * sizeof(*memslot->arch.rmap));
807 }
808 srcu_read_unlock(&kvm->srcu, srcu_idx);
809}
810
Takuya Yoshikawa84504ef2012-07-02 17:55:48 +0900811static int kvm_handle_hva_range(struct kvm *kvm,
812 unsigned long start,
813 unsigned long end,
814 int (*handler)(struct kvm *kvm,
815 unsigned long *rmapp,
816 unsigned long gfn))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000817{
818 int ret;
819 int retval = 0;
820 struct kvm_memslots *slots;
821 struct kvm_memory_slot *memslot;
822
823 slots = kvm_memslots(kvm);
824 kvm_for_each_memslot(memslot, slots) {
Takuya Yoshikawa84504ef2012-07-02 17:55:48 +0900825 unsigned long hva_start, hva_end;
826 gfn_t gfn, gfn_end;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000827
Takuya Yoshikawa84504ef2012-07-02 17:55:48 +0900828 hva_start = max(start, memslot->userspace_addr);
829 hva_end = min(end, memslot->userspace_addr +
830 (memslot->npages << PAGE_SHIFT));
831 if (hva_start >= hva_end)
832 continue;
833 /*
834 * {gfn(page) | page intersects with [hva_start, hva_end)} =
835 * {gfn, gfn+1, ..., gfn_end-1}.
836 */
837 gfn = hva_to_gfn_memslot(hva_start, memslot);
838 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
839
840 for (; gfn < gfn_end; ++gfn) {
Takuya Yoshikawad19a7482012-07-02 17:54:30 +0900841 gfn_t gfn_offset = gfn - memslot->base_gfn;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000842
Takuya Yoshikawad89cc612012-08-01 18:03:28 +0900843 ret = handler(kvm, &memslot->arch.rmap[gfn_offset], gfn);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000844 retval |= ret;
845 }
846 }
847
848 return retval;
849}
850
Takuya Yoshikawa84504ef2012-07-02 17:55:48 +0900851static int kvm_handle_hva(struct kvm *kvm, unsigned long hva,
852 int (*handler)(struct kvm *kvm, unsigned long *rmapp,
853 unsigned long gfn))
854{
855 return kvm_handle_hva_range(kvm, hva, hva + 1, handler);
856}
857
Paul Mackerras342d3db2011-12-12 12:38:05 +0000858static int kvm_unmap_rmapp(struct kvm *kvm, unsigned long *rmapp,
859 unsigned long gfn)
860{
861 struct revmap_entry *rev = kvm->arch.revmap;
862 unsigned long h, i, j;
863 unsigned long *hptep;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000864 unsigned long ptel, psize, rcbits;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000865
866 for (;;) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000867 lock_rmap(rmapp);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000868 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000869 unlock_rmap(rmapp);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000870 break;
871 }
872
873 /*
874 * To avoid an ABBA deadlock with the HPTE lock bit,
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000875 * we can't spin on the HPTE lock while holding the
876 * rmap chain lock.
Paul Mackerras342d3db2011-12-12 12:38:05 +0000877 */
878 i = *rmapp & KVMPPC_RMAP_INDEX;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000879 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
880 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
881 /* unlock rmap before spinning on the HPTE lock */
882 unlock_rmap(rmapp);
883 while (hptep[0] & HPTE_V_HVLOCK)
884 cpu_relax();
885 continue;
886 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000887 j = rev[i].forw;
888 if (j == i) {
889 /* chain is now empty */
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000890 *rmapp &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000891 } else {
892 /* remove i from chain */
893 h = rev[i].back;
894 rev[h].forw = j;
895 rev[j].back = h;
896 rev[i].forw = rev[i].back = i;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000897 *rmapp = (*rmapp & ~KVMPPC_RMAP_INDEX) | j;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000898 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000899
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000900 /* Now check and modify the HPTE */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000901 ptel = rev[i].guest_rpte;
902 psize = hpte_page_size(hptep[0], ptel);
903 if ((hptep[0] & HPTE_V_VALID) &&
904 hpte_rpn(ptel, psize) == gfn) {
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000905 if (kvm->arch.using_mmu_notifiers)
906 hptep[0] |= HPTE_V_ABSENT;
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000907 kvmppc_invalidate_hpte(kvm, hptep, i);
908 /* Harvest R and C */
909 rcbits = hptep[1] & (HPTE_R_R | HPTE_R_C);
910 *rmapp |= rcbits << KVMPPC_RMAP_RC_SHIFT;
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000911 if (rcbits & ~rev[i].guest_rpte) {
912 rev[i].guest_rpte = ptel | rcbits;
913 note_hpte_modification(kvm, &rev[i]);
914 }
Paul Mackerras342d3db2011-12-12 12:38:05 +0000915 }
Paul Mackerrasbad3b502011-12-15 02:02:02 +0000916 unlock_rmap(rmapp);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000917 hptep[0] &= ~HPTE_V_HVLOCK;
918 }
919 return 0;
920}
921
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530922int kvm_unmap_hva_hv(struct kvm *kvm, unsigned long hva)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000923{
924 if (kvm->arch.using_mmu_notifiers)
925 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
926 return 0;
927}
928
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530929int kvm_unmap_hva_range_hv(struct kvm *kvm, unsigned long start, unsigned long end)
Takuya Yoshikawab3ae2092012-07-02 17:56:33 +0900930{
931 if (kvm->arch.using_mmu_notifiers)
932 kvm_handle_hva_range(kvm, start, end, kvm_unmap_rmapp);
933 return 0;
934}
935
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530936void kvmppc_core_flush_memslot_hv(struct kvm *kvm,
937 struct kvm_memory_slot *memslot)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000938{
939 unsigned long *rmapp;
940 unsigned long gfn;
941 unsigned long n;
942
943 rmapp = memslot->arch.rmap;
944 gfn = memslot->base_gfn;
945 for (n = memslot->npages; n; --n) {
946 /*
947 * Testing the present bit without locking is OK because
948 * the memslot has been marked invalid already, and hence
949 * no new HPTEs referencing this page can be created,
950 * thus the present bit can't go from 0 to 1.
951 */
952 if (*rmapp & KVMPPC_RMAP_PRESENT)
953 kvm_unmap_rmapp(kvm, rmapp, gfn);
954 ++rmapp;
955 ++gfn;
956 }
957}
958
Paul Mackerras342d3db2011-12-12 12:38:05 +0000959static int kvm_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
960 unsigned long gfn)
961{
Paul Mackerras55514892011-12-15 02:02:47 +0000962 struct revmap_entry *rev = kvm->arch.revmap;
963 unsigned long head, i, j;
964 unsigned long *hptep;
965 int ret = 0;
966
967 retry:
968 lock_rmap(rmapp);
969 if (*rmapp & KVMPPC_RMAP_REFERENCED) {
970 *rmapp &= ~KVMPPC_RMAP_REFERENCED;
971 ret = 1;
972 }
973 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
974 unlock_rmap(rmapp);
975 return ret;
976 }
977
978 i = head = *rmapp & KVMPPC_RMAP_INDEX;
979 do {
980 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
981 j = rev[i].forw;
982
983 /* If this HPTE isn't referenced, ignore it */
984 if (!(hptep[1] & HPTE_R_R))
985 continue;
986
987 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
988 /* unlock rmap before spinning on the HPTE lock */
989 unlock_rmap(rmapp);
990 while (hptep[0] & HPTE_V_HVLOCK)
991 cpu_relax();
992 goto retry;
993 }
994
995 /* Now check and modify the HPTE */
996 if ((hptep[0] & HPTE_V_VALID) && (hptep[1] & HPTE_R_R)) {
997 kvmppc_clear_ref_hpte(kvm, hptep, i);
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +0000998 if (!(rev[i].guest_rpte & HPTE_R_R)) {
999 rev[i].guest_rpte |= HPTE_R_R;
1000 note_hpte_modification(kvm, &rev[i]);
1001 }
Paul Mackerras55514892011-12-15 02:02:47 +00001002 ret = 1;
1003 }
1004 hptep[0] &= ~HPTE_V_HVLOCK;
1005 } while ((i = j) != head);
1006
1007 unlock_rmap(rmapp);
1008 return ret;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001009}
1010
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301011int kvm_age_hva_hv(struct kvm *kvm, unsigned long hva)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001012{
1013 if (!kvm->arch.using_mmu_notifiers)
1014 return 0;
1015 return kvm_handle_hva(kvm, hva, kvm_age_rmapp);
1016}
1017
1018static int kvm_test_age_rmapp(struct kvm *kvm, unsigned long *rmapp,
1019 unsigned long gfn)
1020{
Paul Mackerras55514892011-12-15 02:02:47 +00001021 struct revmap_entry *rev = kvm->arch.revmap;
1022 unsigned long head, i, j;
1023 unsigned long *hp;
1024 int ret = 1;
1025
1026 if (*rmapp & KVMPPC_RMAP_REFERENCED)
1027 return 1;
1028
1029 lock_rmap(rmapp);
1030 if (*rmapp & KVMPPC_RMAP_REFERENCED)
1031 goto out;
1032
1033 if (*rmapp & KVMPPC_RMAP_PRESENT) {
1034 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1035 do {
1036 hp = (unsigned long *)(kvm->arch.hpt_virt + (i << 4));
1037 j = rev[i].forw;
1038 if (hp[1] & HPTE_R_R)
1039 goto out;
1040 } while ((i = j) != head);
1041 }
1042 ret = 0;
1043
1044 out:
1045 unlock_rmap(rmapp);
1046 return ret;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001047}
1048
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301049int kvm_test_age_hva_hv(struct kvm *kvm, unsigned long hva)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001050{
1051 if (!kvm->arch.using_mmu_notifiers)
1052 return 0;
1053 return kvm_handle_hva(kvm, hva, kvm_test_age_rmapp);
1054}
1055
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301056void kvm_set_spte_hva_hv(struct kvm *kvm, unsigned long hva, pte_t pte)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001057{
1058 if (!kvm->arch.using_mmu_notifiers)
1059 return;
1060 kvm_handle_hva(kvm, hva, kvm_unmap_rmapp);
Paul Mackerrasde56a942011-06-29 00:21:34 +00001061}
1062
Paul Mackerras6c576e72014-05-26 19:48:39 +10001063static int vcpus_running(struct kvm *kvm)
1064{
1065 return atomic_read(&kvm->arch.vcpus_running) != 0;
1066}
1067
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001068/*
1069 * Returns the number of system pages that are dirty.
1070 * This can be more than 1 if we find a huge-page HPTE.
1071 */
1072static int kvm_test_clear_dirty_npages(struct kvm *kvm, unsigned long *rmapp)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001073{
1074 struct revmap_entry *rev = kvm->arch.revmap;
1075 unsigned long head, i, j;
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001076 unsigned long n;
Paul Mackerras6c576e72014-05-26 19:48:39 +10001077 unsigned long v, r;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001078 unsigned long *hptep;
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001079 int npages_dirty = 0;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001080
1081 retry:
1082 lock_rmap(rmapp);
1083 if (*rmapp & KVMPPC_RMAP_CHANGED) {
1084 *rmapp &= ~KVMPPC_RMAP_CHANGED;
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001085 npages_dirty = 1;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001086 }
1087 if (!(*rmapp & KVMPPC_RMAP_PRESENT)) {
1088 unlock_rmap(rmapp);
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001089 return npages_dirty;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001090 }
1091
1092 i = head = *rmapp & KVMPPC_RMAP_INDEX;
1093 do {
1094 hptep = (unsigned long *) (kvm->arch.hpt_virt + (i << 4));
1095 j = rev[i].forw;
1096
Paul Mackerras6c576e72014-05-26 19:48:39 +10001097 /*
1098 * Checking the C (changed) bit here is racy since there
1099 * is no guarantee about when the hardware writes it back.
1100 * If the HPTE is not writable then it is stable since the
1101 * page can't be written to, and we would have done a tlbie
1102 * (which forces the hardware to complete any writeback)
1103 * when making the HPTE read-only.
1104 * If vcpus are running then this call is racy anyway
1105 * since the page could get dirtied subsequently, so we
1106 * expect there to be a further call which would pick up
1107 * any delayed C bit writeback.
1108 * Otherwise we need to do the tlbie even if C==0 in
1109 * order to pick up any delayed writeback of C.
1110 */
1111 if (!(hptep[1] & HPTE_R_C) &&
1112 (!hpte_is_writable(hptep[1]) || vcpus_running(kvm)))
Paul Mackerras82ed3612011-12-15 02:03:22 +00001113 continue;
1114
1115 if (!try_lock_hpte(hptep, HPTE_V_HVLOCK)) {
1116 /* unlock rmap before spinning on the HPTE lock */
1117 unlock_rmap(rmapp);
1118 while (hptep[0] & HPTE_V_HVLOCK)
1119 cpu_relax();
1120 goto retry;
1121 }
1122
1123 /* Now check and modify the HPTE */
Paul Mackerras6c576e72014-05-26 19:48:39 +10001124 if (!(hptep[0] & HPTE_V_VALID))
1125 continue;
1126
1127 /* need to make it temporarily absent so C is stable */
1128 hptep[0] |= HPTE_V_ABSENT;
1129 kvmppc_invalidate_hpte(kvm, hptep, i);
1130 v = hptep[0];
1131 r = hptep[1];
1132 if (r & HPTE_R_C) {
1133 hptep[1] = r & ~HPTE_R_C;
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001134 if (!(rev[i].guest_rpte & HPTE_R_C)) {
1135 rev[i].guest_rpte |= HPTE_R_C;
1136 note_hpte_modification(kvm, &rev[i]);
1137 }
Paul Mackerras6c576e72014-05-26 19:48:39 +10001138 n = hpte_page_size(v, r);
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001139 n = (n + PAGE_SIZE - 1) >> PAGE_SHIFT;
1140 if (n > npages_dirty)
1141 npages_dirty = n;
Paul Mackerras6c576e72014-05-26 19:48:39 +10001142 eieio();
Paul Mackerras82ed3612011-12-15 02:03:22 +00001143 }
Paul Mackerras6c576e72014-05-26 19:48:39 +10001144 v &= ~(HPTE_V_ABSENT | HPTE_V_HVLOCK);
1145 v |= HPTE_V_VALID;
1146 hptep[0] = v;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001147 } while ((i = j) != head);
1148
1149 unlock_rmap(rmapp);
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001150 return npages_dirty;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001151}
1152
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001153static void harvest_vpa_dirty(struct kvmppc_vpa *vpa,
1154 struct kvm_memory_slot *memslot,
1155 unsigned long *map)
1156{
1157 unsigned long gfn;
1158
1159 if (!vpa->dirty || !vpa->pinned_addr)
1160 return;
1161 gfn = vpa->gpa >> PAGE_SHIFT;
1162 if (gfn < memslot->base_gfn ||
1163 gfn >= memslot->base_gfn + memslot->npages)
1164 return;
1165
1166 vpa->dirty = false;
1167 if (map)
1168 __set_bit_le(gfn - memslot->base_gfn, map);
1169}
1170
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001171long kvmppc_hv_get_dirty_log(struct kvm *kvm, struct kvm_memory_slot *memslot,
1172 unsigned long *map)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001173{
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001174 unsigned long i, j;
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001175 unsigned long *rmapp;
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001176 struct kvm_vcpu *vcpu;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001177
1178 preempt_disable();
Takuya Yoshikawad89cc612012-08-01 18:03:28 +09001179 rmapp = memslot->arch.rmap;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001180 for (i = 0; i < memslot->npages; ++i) {
Alexey Kardashevskiy687414b2014-05-26 19:48:38 +10001181 int npages = kvm_test_clear_dirty_npages(kvm, rmapp);
1182 /*
1183 * Note that if npages > 0 then i must be a multiple of npages,
1184 * since we always put huge-page HPTEs in the rmap chain
1185 * corresponding to their page base address.
1186 */
1187 if (npages && map)
1188 for (j = i; npages; ++j, --npages)
1189 __set_bit_le(j, map);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001190 ++rmapp;
1191 }
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001192
1193 /* Harvest dirty bits from VPA and DTL updates */
1194 /* Note: we never modify the SLB shadow buffer areas */
1195 kvm_for_each_vcpu(i, vcpu, kvm) {
1196 spin_lock(&vcpu->arch.vpa_update_lock);
1197 harvest_vpa_dirty(&vcpu->arch.vpa, memslot, map);
1198 harvest_vpa_dirty(&vcpu->arch.dtl, memslot, map);
1199 spin_unlock(&vcpu->arch.vpa_update_lock);
1200 }
Paul Mackerras82ed3612011-12-15 02:03:22 +00001201 preempt_enable();
1202 return 0;
1203}
1204
Paul Mackerras93e60242011-12-12 12:28:55 +00001205void *kvmppc_pin_guest_page(struct kvm *kvm, unsigned long gpa,
1206 unsigned long *nb_ret)
1207{
1208 struct kvm_memory_slot *memslot;
1209 unsigned long gfn = gpa >> PAGE_SHIFT;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001210 struct page *page, *pages[1];
1211 int npages;
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001212 unsigned long hva, offset;
Paul Mackerrasda9d1d72011-12-12 12:31:41 +00001213 unsigned long pa;
Paul Mackerras93e60242011-12-12 12:28:55 +00001214 unsigned long *physp;
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001215 int srcu_idx;
Paul Mackerras93e60242011-12-12 12:28:55 +00001216
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001217 srcu_idx = srcu_read_lock(&kvm->srcu);
Paul Mackerras93e60242011-12-12 12:28:55 +00001218 memslot = gfn_to_memslot(kvm, gfn);
1219 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001220 goto err;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001221 if (!kvm->arch.using_mmu_notifiers) {
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001222 physp = memslot->arch.slot_phys;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001223 if (!physp)
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001224 goto err;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001225 physp += gfn - memslot->base_gfn;
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001226 pa = *physp;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001227 if (!pa) {
1228 if (kvmppc_get_guest_page(kvm, gfn, memslot,
1229 PAGE_SIZE) < 0)
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001230 goto err;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001231 pa = *physp;
1232 }
1233 page = pfn_to_page(pa >> PAGE_SHIFT);
David Gibsonde6c0b02012-05-08 20:24:08 +10001234 get_page(page);
Paul Mackerras342d3db2011-12-12 12:38:05 +00001235 } else {
1236 hva = gfn_to_hva_memslot(memslot, gfn);
1237 npages = get_user_pages_fast(hva, 1, 1, pages);
1238 if (npages < 1)
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001239 goto err;
Paul Mackerras342d3db2011-12-12 12:38:05 +00001240 page = pages[0];
Paul Mackerrasc77162d2011-12-12 12:31:00 +00001241 }
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001242 srcu_read_unlock(&kvm->srcu, srcu_idx);
1243
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001244 offset = gpa & (PAGE_SIZE - 1);
Paul Mackerras93e60242011-12-12 12:28:55 +00001245 if (nb_ret)
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001246 *nb_ret = PAGE_SIZE - offset;
Paul Mackerras93e60242011-12-12 12:28:55 +00001247 return page_address(page) + offset;
Paul Mackerras2c9097e2012-09-11 13:27:01 +00001248
1249 err:
1250 srcu_read_unlock(&kvm->srcu, srcu_idx);
1251 return NULL;
Paul Mackerras93e60242011-12-12 12:28:55 +00001252}
1253
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001254void kvmppc_unpin_guest_page(struct kvm *kvm, void *va, unsigned long gpa,
1255 bool dirty)
Paul Mackerras93e60242011-12-12 12:28:55 +00001256{
1257 struct page *page = virt_to_page(va);
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001258 struct kvm_memory_slot *memslot;
1259 unsigned long gfn;
1260 unsigned long *rmap;
1261 int srcu_idx;
Paul Mackerras93e60242011-12-12 12:28:55 +00001262
Paul Mackerras93e60242011-12-12 12:28:55 +00001263 put_page(page);
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001264
1265 if (!dirty || !kvm->arch.using_mmu_notifiers)
1266 return;
1267
1268 /* We need to mark this page dirty in the rmap chain */
1269 gfn = gpa >> PAGE_SHIFT;
1270 srcu_idx = srcu_read_lock(&kvm->srcu);
1271 memslot = gfn_to_memslot(kvm, gfn);
1272 if (memslot) {
1273 rmap = &memslot->arch.rmap[gfn - memslot->base_gfn];
1274 lock_rmap(rmap);
1275 *rmap |= KVMPPC_RMAP_CHANGED;
1276 unlock_rmap(rmap);
1277 }
1278 srcu_read_unlock(&kvm->srcu, srcu_idx);
Paul Mackerras93e60242011-12-12 12:28:55 +00001279}
1280
Paul Mackerrasa2932922012-11-19 22:57:20 +00001281/*
1282 * Functions for reading and writing the hash table via reads and
1283 * writes on a file descriptor.
1284 *
1285 * Reads return the guest view of the hash table, which has to be
1286 * pieced together from the real hash table and the guest_rpte
1287 * values in the revmap array.
1288 *
1289 * On writes, each HPTE written is considered in turn, and if it
1290 * is valid, it is written to the HPT as if an H_ENTER with the
1291 * exact flag set was done. When the invalid count is non-zero
1292 * in the header written to the stream, the kernel will make
1293 * sure that that many HPTEs are invalid, and invalidate them
1294 * if not.
1295 */
1296
1297struct kvm_htab_ctx {
1298 unsigned long index;
1299 unsigned long flags;
1300 struct kvm *kvm;
1301 int first_pass;
1302};
1303
1304#define HPTE_SIZE (2 * sizeof(unsigned long))
1305
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001306/*
1307 * Returns 1 if this HPT entry has been modified or has pending
1308 * R/C bit changes.
1309 */
1310static int hpte_dirty(struct revmap_entry *revp, unsigned long *hptp)
1311{
1312 unsigned long rcbits_unset;
1313
1314 if (revp->guest_rpte & HPTE_GR_MODIFIED)
1315 return 1;
1316
1317 /* Also need to consider changes in reference and changed bits */
1318 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1319 if ((hptp[0] & HPTE_V_VALID) && (hptp[1] & rcbits_unset))
1320 return 1;
1321
1322 return 0;
1323}
1324
Paul Mackerrasa2932922012-11-19 22:57:20 +00001325static long record_hpte(unsigned long flags, unsigned long *hptp,
1326 unsigned long *hpte, struct revmap_entry *revp,
1327 int want_valid, int first_pass)
1328{
1329 unsigned long v, r;
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001330 unsigned long rcbits_unset;
Paul Mackerrasa2932922012-11-19 22:57:20 +00001331 int ok = 1;
1332 int valid, dirty;
1333
1334 /* Unmodified entries are uninteresting except on the first pass */
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001335 dirty = hpte_dirty(revp, hptp);
Paul Mackerrasa2932922012-11-19 22:57:20 +00001336 if (!first_pass && !dirty)
1337 return 0;
1338
1339 valid = 0;
1340 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT)) {
1341 valid = 1;
1342 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) &&
1343 !(hptp[0] & HPTE_V_BOLTED))
1344 valid = 0;
1345 }
1346 if (valid != want_valid)
1347 return 0;
1348
1349 v = r = 0;
1350 if (valid || dirty) {
1351 /* lock the HPTE so it's stable and read it */
1352 preempt_disable();
1353 while (!try_lock_hpte(hptp, HPTE_V_HVLOCK))
1354 cpu_relax();
1355 v = hptp[0];
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001356
1357 /* re-evaluate valid and dirty from synchronized HPTE value */
1358 valid = !!(v & HPTE_V_VALID);
1359 dirty = !!(revp->guest_rpte & HPTE_GR_MODIFIED);
1360
1361 /* Harvest R and C into guest view if necessary */
1362 rcbits_unset = ~revp->guest_rpte & (HPTE_R_R | HPTE_R_C);
1363 if (valid && (rcbits_unset & hptp[1])) {
1364 revp->guest_rpte |= (hptp[1] & (HPTE_R_R | HPTE_R_C)) |
1365 HPTE_GR_MODIFIED;
1366 dirty = 1;
1367 }
1368
Paul Mackerrasa2932922012-11-19 22:57:20 +00001369 if (v & HPTE_V_ABSENT) {
1370 v &= ~HPTE_V_ABSENT;
1371 v |= HPTE_V_VALID;
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001372 valid = 1;
Paul Mackerrasa2932922012-11-19 22:57:20 +00001373 }
Paul Mackerrasa2932922012-11-19 22:57:20 +00001374 if ((flags & KVM_GET_HTAB_BOLTED_ONLY) && !(v & HPTE_V_BOLTED))
1375 valid = 0;
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001376
1377 r = revp->guest_rpte;
Paul Mackerrasa2932922012-11-19 22:57:20 +00001378 /* only clear modified if this is the right sort of entry */
1379 if (valid == want_valid && dirty) {
1380 r &= ~HPTE_GR_MODIFIED;
1381 revp->guest_rpte = r;
1382 }
1383 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
1384 hptp[0] &= ~HPTE_V_HVLOCK;
1385 preempt_enable();
1386 if (!(valid == want_valid && (first_pass || dirty)))
1387 ok = 0;
1388 }
1389 hpte[0] = v;
1390 hpte[1] = r;
1391 return ok;
1392}
1393
1394static ssize_t kvm_htab_read(struct file *file, char __user *buf,
1395 size_t count, loff_t *ppos)
1396{
1397 struct kvm_htab_ctx *ctx = file->private_data;
1398 struct kvm *kvm = ctx->kvm;
1399 struct kvm_get_htab_header hdr;
1400 unsigned long *hptp;
1401 struct revmap_entry *revp;
1402 unsigned long i, nb, nw;
1403 unsigned long __user *lbuf;
1404 struct kvm_get_htab_header __user *hptr;
1405 unsigned long flags;
1406 int first_pass;
1407 unsigned long hpte[2];
1408
1409 if (!access_ok(VERIFY_WRITE, buf, count))
1410 return -EFAULT;
1411
1412 first_pass = ctx->first_pass;
1413 flags = ctx->flags;
1414
1415 i = ctx->index;
1416 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1417 revp = kvm->arch.revmap + i;
1418 lbuf = (unsigned long __user *)buf;
1419
1420 nb = 0;
1421 while (nb + sizeof(hdr) + HPTE_SIZE < count) {
1422 /* Initialize header */
1423 hptr = (struct kvm_get_htab_header __user *)buf;
Paul Mackerrasa2932922012-11-19 22:57:20 +00001424 hdr.n_valid = 0;
1425 hdr.n_invalid = 0;
1426 nw = nb;
1427 nb += sizeof(hdr);
1428 lbuf = (unsigned long __user *)(buf + sizeof(hdr));
1429
1430 /* Skip uninteresting entries, i.e. clean on not-first pass */
1431 if (!first_pass) {
1432 while (i < kvm->arch.hpt_npte &&
Paul Mackerrasa1b4a0f2013-04-18 19:50:24 +00001433 !hpte_dirty(revp, hptp)) {
Paul Mackerrasa2932922012-11-19 22:57:20 +00001434 ++i;
1435 hptp += 2;
1436 ++revp;
1437 }
1438 }
Paul Mackerras05dd85f2012-11-21 23:29:12 +00001439 hdr.index = i;
Paul Mackerrasa2932922012-11-19 22:57:20 +00001440
1441 /* Grab a series of valid entries */
1442 while (i < kvm->arch.hpt_npte &&
1443 hdr.n_valid < 0xffff &&
1444 nb + HPTE_SIZE < count &&
1445 record_hpte(flags, hptp, hpte, revp, 1, first_pass)) {
1446 /* valid entry, write it out */
1447 ++hdr.n_valid;
1448 if (__put_user(hpte[0], lbuf) ||
1449 __put_user(hpte[1], lbuf + 1))
1450 return -EFAULT;
1451 nb += HPTE_SIZE;
1452 lbuf += 2;
1453 ++i;
1454 hptp += 2;
1455 ++revp;
1456 }
1457 /* Now skip invalid entries while we can */
1458 while (i < kvm->arch.hpt_npte &&
1459 hdr.n_invalid < 0xffff &&
1460 record_hpte(flags, hptp, hpte, revp, 0, first_pass)) {
1461 /* found an invalid entry */
1462 ++hdr.n_invalid;
1463 ++i;
1464 hptp += 2;
1465 ++revp;
1466 }
1467
1468 if (hdr.n_valid || hdr.n_invalid) {
1469 /* write back the header */
1470 if (__copy_to_user(hptr, &hdr, sizeof(hdr)))
1471 return -EFAULT;
1472 nw = nb;
1473 buf = (char __user *)lbuf;
1474 } else {
1475 nb = nw;
1476 }
1477
1478 /* Check if we've wrapped around the hash table */
1479 if (i >= kvm->arch.hpt_npte) {
1480 i = 0;
1481 ctx->first_pass = 0;
1482 break;
1483 }
1484 }
1485
1486 ctx->index = i;
1487
1488 return nb;
1489}
1490
1491static ssize_t kvm_htab_write(struct file *file, const char __user *buf,
1492 size_t count, loff_t *ppos)
1493{
1494 struct kvm_htab_ctx *ctx = file->private_data;
1495 struct kvm *kvm = ctx->kvm;
1496 struct kvm_get_htab_header hdr;
1497 unsigned long i, j;
1498 unsigned long v, r;
1499 unsigned long __user *lbuf;
1500 unsigned long *hptp;
1501 unsigned long tmp[2];
1502 ssize_t nb;
1503 long int err, ret;
1504 int rma_setup;
1505
1506 if (!access_ok(VERIFY_READ, buf, count))
1507 return -EFAULT;
1508
1509 /* lock out vcpus from running while we're doing this */
1510 mutex_lock(&kvm->lock);
1511 rma_setup = kvm->arch.rma_setup_done;
1512 if (rma_setup) {
1513 kvm->arch.rma_setup_done = 0; /* temporarily */
1514 /* order rma_setup_done vs. vcpus_running */
1515 smp_mb();
1516 if (atomic_read(&kvm->arch.vcpus_running)) {
1517 kvm->arch.rma_setup_done = 1;
1518 mutex_unlock(&kvm->lock);
1519 return -EBUSY;
1520 }
1521 }
1522
1523 err = 0;
1524 for (nb = 0; nb + sizeof(hdr) <= count; ) {
1525 err = -EFAULT;
1526 if (__copy_from_user(&hdr, buf, sizeof(hdr)))
1527 break;
1528
1529 err = 0;
1530 if (nb + hdr.n_valid * HPTE_SIZE > count)
1531 break;
1532
1533 nb += sizeof(hdr);
1534 buf += sizeof(hdr);
1535
1536 err = -EINVAL;
1537 i = hdr.index;
1538 if (i >= kvm->arch.hpt_npte ||
1539 i + hdr.n_valid + hdr.n_invalid > kvm->arch.hpt_npte)
1540 break;
1541
1542 hptp = (unsigned long *)(kvm->arch.hpt_virt + (i * HPTE_SIZE));
1543 lbuf = (unsigned long __user *)buf;
1544 for (j = 0; j < hdr.n_valid; ++j) {
1545 err = -EFAULT;
1546 if (__get_user(v, lbuf) || __get_user(r, lbuf + 1))
1547 goto out;
1548 err = -EINVAL;
1549 if (!(v & HPTE_V_VALID))
1550 goto out;
1551 lbuf += 2;
1552 nb += HPTE_SIZE;
1553
1554 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
1555 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1556 err = -EIO;
1557 ret = kvmppc_virtmode_do_h_enter(kvm, H_EXACT, i, v, r,
1558 tmp);
1559 if (ret != H_SUCCESS) {
1560 pr_err("kvm_htab_write ret %ld i=%ld v=%lx "
1561 "r=%lx\n", ret, i, v, r);
1562 goto out;
1563 }
1564 if (!rma_setup && is_vrma_hpte(v)) {
Aneesh Kumar K.V341acbb32014-06-16 00:17:07 +05301565 unsigned long psize = hpte_base_page_size(v, r);
Paul Mackerrasa2932922012-11-19 22:57:20 +00001566 unsigned long senc = slb_pgsize_encoding(psize);
1567 unsigned long lpcr;
1568
1569 kvm->arch.vrma_slb_v = senc | SLB_VSID_B_1T |
1570 (VRMA_VSID << SLB_VSID_SHIFT_1T);
Paul Mackerrasa0144e22013-09-20 14:52:38 +10001571 lpcr = senc << (LPCR_VRMASD_SH - 4);
1572 kvmppc_update_lpcr(kvm, lpcr, LPCR_VRMASD);
Paul Mackerrasa2932922012-11-19 22:57:20 +00001573 rma_setup = 1;
1574 }
1575 ++i;
1576 hptp += 2;
1577 }
1578
1579 for (j = 0; j < hdr.n_invalid; ++j) {
1580 if (hptp[0] & (HPTE_V_VALID | HPTE_V_ABSENT))
1581 kvmppc_do_h_remove(kvm, 0, i, 0, tmp);
1582 ++i;
1583 hptp += 2;
1584 }
1585 err = 0;
1586 }
1587
1588 out:
1589 /* Order HPTE updates vs. rma_setup_done */
1590 smp_wmb();
1591 kvm->arch.rma_setup_done = rma_setup;
1592 mutex_unlock(&kvm->lock);
1593
1594 if (err)
1595 return err;
1596 return nb;
1597}
1598
1599static int kvm_htab_release(struct inode *inode, struct file *filp)
1600{
1601 struct kvm_htab_ctx *ctx = filp->private_data;
1602
1603 filp->private_data = NULL;
1604 if (!(ctx->flags & KVM_GET_HTAB_WRITE))
1605 atomic_dec(&ctx->kvm->arch.hpte_mod_interest);
1606 kvm_put_kvm(ctx->kvm);
1607 kfree(ctx);
1608 return 0;
1609}
1610
Al Viro75ef9de2013-04-04 19:09:41 -04001611static const struct file_operations kvm_htab_fops = {
Paul Mackerrasa2932922012-11-19 22:57:20 +00001612 .read = kvm_htab_read,
1613 .write = kvm_htab_write,
1614 .llseek = default_llseek,
1615 .release = kvm_htab_release,
1616};
1617
1618int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *ghf)
1619{
1620 int ret;
1621 struct kvm_htab_ctx *ctx;
1622 int rwflag;
1623
1624 /* reject flags we don't recognize */
1625 if (ghf->flags & ~(KVM_GET_HTAB_BOLTED_ONLY | KVM_GET_HTAB_WRITE))
1626 return -EINVAL;
1627 ctx = kzalloc(sizeof(*ctx), GFP_KERNEL);
1628 if (!ctx)
1629 return -ENOMEM;
1630 kvm_get_kvm(kvm);
1631 ctx->kvm = kvm;
1632 ctx->index = ghf->start_index;
1633 ctx->flags = ghf->flags;
1634 ctx->first_pass = 1;
1635
1636 rwflag = (ghf->flags & KVM_GET_HTAB_WRITE) ? O_WRONLY : O_RDONLY;
Yann Droneaud2f84d5e2013-08-24 22:14:08 +02001637 ret = anon_inode_getfd("kvm-htab", &kvm_htab_fops, ctx, rwflag | O_CLOEXEC);
Paul Mackerrasa2932922012-11-19 22:57:20 +00001638 if (ret < 0) {
1639 kvm_put_kvm(kvm);
1640 return ret;
1641 }
1642
1643 if (rwflag == O_RDONLY) {
1644 mutex_lock(&kvm->slots_lock);
1645 atomic_inc(&kvm->arch.hpte_mod_interest);
1646 /* make sure kvmppc_do_h_enter etc. see the increment */
1647 synchronize_srcu_expedited(&kvm->srcu);
1648 mutex_unlock(&kvm->slots_lock);
1649 }
1650
1651 return ret;
1652}
1653
Paul Mackerrasde56a942011-06-29 00:21:34 +00001654void kvmppc_mmu_book3s_hv_init(struct kvm_vcpu *vcpu)
1655{
1656 struct kvmppc_mmu *mmu = &vcpu->arch.mmu;
1657
Paul Mackerras9e368f22011-06-29 00:40:08 +00001658 if (cpu_has_feature(CPU_FTR_ARCH_206))
1659 vcpu->arch.slb_nr = 32; /* POWER7 */
1660 else
1661 vcpu->arch.slb_nr = 64;
Paul Mackerrasde56a942011-06-29 00:21:34 +00001662
1663 mmu->xlate = kvmppc_mmu_book3s_64_hv_xlate;
1664 mmu->reset_msr = kvmppc_mmu_book3s_64_hv_reset_msr;
1665
1666 vcpu->arch.hflags |= BOOK3S_HFLAG_SLB;
1667}