blob: a28a6030ec90ff812bc2975fd1bc4ca21ddd2c5a [file] [log] [blame]
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000023/*
24 * Since this file is built in even if KVM is a module, we need
25 * a local copy of this function for the case where kvm_main.c is
26 * modular.
27 */
28static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
29 gfn_t gfn)
30{
31 struct kvm_memslots *slots;
32 struct kvm_memory_slot *memslot;
33
34 slots = kvm_memslots(kvm);
35 kvm_for_each_memslot(memslot, slots)
36 if (gfn >= memslot->base_gfn &&
37 gfn < memslot->base_gfn + memslot->npages)
38 return memslot;
39 return NULL;
40}
41
Paul Mackerras8936dda2011-12-12 12:27:39 +000042/* Translate address of a vmalloc'd thing to a linear map address */
43static void *real_vmalloc_addr(void *x)
44{
45 unsigned long addr = (unsigned long) x;
46 pte_t *p;
47
48 p = find_linux_pte(swapper_pg_dir, addr);
49 if (!p || !pte_present(*p))
50 return NULL;
51 /* assume we don't have huge pages in vmalloc space... */
52 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
53 return __va(addr);
54}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000055
Paul Mackerrasa8606e22011-06-29 00:22:05 +000056long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
57 long pte_index, unsigned long pteh, unsigned long ptel)
58{
59 unsigned long porder;
60 struct kvm *kvm = vcpu->kvm;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000061 unsigned long i, gfn, lpn, pa;
Paul Mackerrasa8606e22011-06-29 00:22:05 +000062 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +000063 struct revmap_entry *rev;
64 unsigned long g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000065 struct kvm_memory_slot *memslot;
66 unsigned long *physp;
Paul Mackerrasa8606e22011-06-29 00:22:05 +000067
68 /* only handle 4k, 64k and 16M pages for now */
69 porder = 12;
70 if (pteh & HPTE_V_LARGE) {
Paul Mackerras9e368f22011-06-29 00:40:08 +000071 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
72 (ptel & 0xf000) == 0x1000) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +000073 /* 64k page */
74 porder = 16;
75 } else if ((ptel & 0xff000) == 0) {
76 /* 16M page */
77 porder = 24;
78 /* lowest AVA bit must be 0 for 16M pages */
79 if (pteh & 0x80)
80 return H_PARAMETER;
81 } else
82 return H_PARAMETER;
83 }
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000084 if (porder > kvm->arch.ram_porder)
Paul Mackerrasa8606e22011-06-29 00:22:05 +000085 return H_PARAMETER;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000086
87 gfn = ((ptel & HPTE_R_RPN) & ~((1ul << porder) - 1)) >> PAGE_SHIFT;
88 memslot = builtin_gfn_to_memslot(kvm, gfn);
89 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)))
90 return H_PARAMETER;
91 physp = kvm->arch.slot_phys[memslot->id];
92 if (!physp)
93 return H_PARAMETER;
94
95 lpn = (gfn - memslot->base_gfn) >> (kvm->arch.ram_porder - PAGE_SHIFT);
96 physp = real_vmalloc_addr(physp + lpn);
97 pa = *physp;
Paul Mackerrasa8606e22011-06-29 00:22:05 +000098 if (!pa)
99 return H_PARAMETER;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000100 pa &= PAGE_MASK;
101
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000102 /* Check WIMG */
103 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
104 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
105 return H_PARAMETER;
106 pteh &= ~0x60UL;
107 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
108 ptel |= pa;
Paul Mackerras075295d2011-12-12 12:30:16 +0000109
Paul Mackerras8936dda2011-12-12 12:27:39 +0000110 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000111 return H_PARAMETER;
112 if (likely((flags & H_EXACT) == 0)) {
113 pte_index &= ~7UL;
114 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000115 for (i = 0; i < 8; ++i) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000116 if ((*hpte & HPTE_V_VALID) == 0 &&
Paul Mackerras075295d2011-12-12 12:30:16 +0000117 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000118 break;
119 hpte += 2;
120 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000121 if (i == 8) {
122 /*
123 * Since try_lock_hpte doesn't retry (not even stdcx.
124 * failures), it could be that there is a free slot
125 * but we transiently failed to lock it. Try again,
126 * actually locking each slot and checking it.
127 */
128 hpte -= 16;
129 for (i = 0; i < 8; ++i) {
130 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
131 cpu_relax();
132 if ((*hpte & HPTE_V_VALID) == 0)
133 break;
134 *hpte &= ~HPTE_V_HVLOCK;
135 hpte += 2;
136 }
137 if (i == 8)
138 return H_PTEG_FULL;
139 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000140 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000141 } else {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000142 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000143 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID)) {
144 /* Lock the slot and check again */
145 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
146 cpu_relax();
147 if (*hpte & HPTE_V_VALID) {
148 *hpte &= ~HPTE_V_HVLOCK;
149 return H_PTEG_FULL;
150 }
151 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000152 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000153
154 /* Save away the guest's idea of the second HPTE dword */
155 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
156 if (rev)
157 rev->guest_rpte = g_ptel;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000158 hpte[1] = ptel;
159 eieio();
160 hpte[0] = pteh;
161 asm volatile("ptesync" : : : "memory");
Paul Mackerras8936dda2011-12-12 12:27:39 +0000162 vcpu->arch.gpr[4] = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000163 return H_SUCCESS;
164}
165
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000166#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
167
168static inline int try_lock_tlbie(unsigned int *lock)
169{
170 unsigned int tmp, old;
171 unsigned int token = LOCK_TOKEN;
172
173 asm volatile("1:lwarx %1,0,%2\n"
174 " cmpwi cr0,%1,0\n"
175 " bne 2f\n"
176 " stwcx. %3,0,%2\n"
177 " bne- 1b\n"
178 " isync\n"
179 "2:"
180 : "=&r" (tmp), "=&r" (old)
181 : "r" (lock), "r" (token)
182 : "cc", "memory");
183 return old == 0;
184}
185
186long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
187 unsigned long pte_index, unsigned long avpn,
188 unsigned long va)
189{
190 struct kvm *kvm = vcpu->kvm;
191 unsigned long *hpte;
192 unsigned long v, r, rb;
193
Paul Mackerras8936dda2011-12-12 12:27:39 +0000194 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000195 return H_PARAMETER;
196 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000197 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000198 cpu_relax();
199 if ((hpte[0] & HPTE_V_VALID) == 0 ||
200 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
201 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
202 hpte[0] &= ~HPTE_V_HVLOCK;
203 return H_NOT_FOUND;
204 }
205 if (atomic_read(&kvm->online_vcpus) == 1)
206 flags |= H_LOCAL;
207 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
208 vcpu->arch.gpr[5] = r = hpte[1];
209 rb = compute_tlbie_rb(v, r, pte_index);
210 hpte[0] = 0;
211 if (!(flags & H_LOCAL)) {
212 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
213 cpu_relax();
214 asm volatile("ptesync" : : : "memory");
215 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
216 : : "r" (rb), "r" (kvm->arch.lpid));
217 asm volatile("ptesync" : : : "memory");
218 kvm->arch.tlbie_lock = 0;
219 } else {
220 asm volatile("ptesync" : : : "memory");
221 asm volatile("tlbiel %0" : : "r" (rb));
222 asm volatile("ptesync" : : : "memory");
223 }
224 return H_SUCCESS;
225}
226
227long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
228{
229 struct kvm *kvm = vcpu->kvm;
230 unsigned long *args = &vcpu->arch.gpr[4];
231 unsigned long *hp, tlbrb[4];
232 long int i, found;
233 long int n_inval = 0;
234 unsigned long flags, req, pte_index;
235 long int local = 0;
236 long int ret = H_SUCCESS;
237
238 if (atomic_read(&kvm->online_vcpus) == 1)
239 local = 1;
240 for (i = 0; i < 4; ++i) {
241 pte_index = args[i * 2];
242 flags = pte_index >> 56;
243 pte_index &= ((1ul << 56) - 1);
244 req = flags >> 6;
245 flags &= 3;
246 if (req == 3)
247 break;
248 if (req != 1 || flags == 3 ||
Paul Mackerras8936dda2011-12-12 12:27:39 +0000249 pte_index >= HPT_NPTE) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000250 /* parameter error */
251 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
252 ret = H_PARAMETER;
253 break;
254 }
255 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000256 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000257 cpu_relax();
258 found = 0;
259 if (hp[0] & HPTE_V_VALID) {
260 switch (flags & 3) {
261 case 0: /* absolute */
262 found = 1;
263 break;
264 case 1: /* andcond */
265 if (!(hp[0] & args[i * 2 + 1]))
266 found = 1;
267 break;
268 case 2: /* AVPN */
269 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
270 found = 1;
271 break;
272 }
273 }
274 if (!found) {
275 hp[0] &= ~HPTE_V_HVLOCK;
276 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
277 continue;
278 }
279 /* insert R and C bits from PTE */
280 flags |= (hp[1] >> 5) & 0x0c;
281 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
282 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
283 hp[0] = 0;
284 }
285 if (n_inval == 0)
286 return ret;
287
288 if (!local) {
289 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
290 cpu_relax();
291 asm volatile("ptesync" : : : "memory");
292 for (i = 0; i < n_inval; ++i)
293 asm volatile(PPC_TLBIE(%1,%0)
294 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
295 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
296 kvm->arch.tlbie_lock = 0;
297 } else {
298 asm volatile("ptesync" : : : "memory");
299 for (i = 0; i < n_inval; ++i)
300 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
301 asm volatile("ptesync" : : : "memory");
302 }
303 return ret;
304}
305
306long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
307 unsigned long pte_index, unsigned long avpn,
308 unsigned long va)
309{
310 struct kvm *kvm = vcpu->kvm;
311 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000312 struct revmap_entry *rev;
313 unsigned long v, r, rb, mask, bits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000314
Paul Mackerras8936dda2011-12-12 12:27:39 +0000315 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000316 return H_PARAMETER;
317 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000318 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000319 cpu_relax();
320 if ((hpte[0] & HPTE_V_VALID) == 0 ||
321 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
322 hpte[0] &= ~HPTE_V_HVLOCK;
323 return H_NOT_FOUND;
324 }
325 if (atomic_read(&kvm->online_vcpus) == 1)
326 flags |= H_LOCAL;
327 v = hpte[0];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000328 bits = (flags << 55) & HPTE_R_PP0;
329 bits |= (flags << 48) & HPTE_R_KEY_HI;
330 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
331
332 /* Update guest view of 2nd HPTE dword */
333 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
334 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
335 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
336 if (rev) {
337 r = (rev->guest_rpte & ~mask) | bits;
338 rev->guest_rpte = r;
339 }
340 r = (hpte[1] & ~mask) | bits;
341
342 /* Update HPTE */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000343 rb = compute_tlbie_rb(v, r, pte_index);
344 hpte[0] = v & ~HPTE_V_VALID;
345 if (!(flags & H_LOCAL)) {
346 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
347 cpu_relax();
348 asm volatile("ptesync" : : : "memory");
349 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
350 : : "r" (rb), "r" (kvm->arch.lpid));
351 asm volatile("ptesync" : : : "memory");
352 kvm->arch.tlbie_lock = 0;
353 } else {
354 asm volatile("ptesync" : : : "memory");
355 asm volatile("tlbiel %0" : : "r" (rb));
356 asm volatile("ptesync" : : : "memory");
357 }
358 hpte[1] = r;
359 eieio();
360 hpte[0] = v & ~HPTE_V_HVLOCK;
361 asm volatile("ptesync" : : : "memory");
362 return H_SUCCESS;
363}
364
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000365long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
366 unsigned long pte_index)
367{
368 struct kvm *kvm = vcpu->kvm;
369 unsigned long *hpte, r;
370 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000371 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000372
Paul Mackerras8936dda2011-12-12 12:27:39 +0000373 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000374 return H_PARAMETER;
375 if (flags & H_READ_4) {
376 pte_index &= ~3;
377 n = 4;
378 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000379 if (flags & H_R_XLATE)
380 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000381 for (i = 0; i < n; ++i, ++pte_index) {
382 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
383 r = hpte[1];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000384 if (hpte[0] & HPTE_V_VALID) {
385 if (rev)
386 r = rev[i].guest_rpte;
387 else
388 r = hpte[1] | HPTE_R_RPN;
389 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000390 vcpu->arch.gpr[4 + i * 2] = hpte[0];
391 vcpu->arch.gpr[5 + i * 2] = r;
392 }
393 return H_SUCCESS;
394}