blob: 84dae821b230fb685cd8c994b9e73d8ff03f18c6 [file] [log] [blame]
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
14
15#include <asm/tlbflush.h>
16#include <asm/kvm_ppc.h>
17#include <asm/kvm_book3s.h>
18#include <asm/mmu-hash64.h>
19#include <asm/hvcall.h>
20#include <asm/synch.h>
21#include <asm/ppc-opcode.h>
22
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000023/*
24 * Since this file is built in even if KVM is a module, we need
25 * a local copy of this function for the case where kvm_main.c is
26 * modular.
27 */
28static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
29 gfn_t gfn)
30{
31 struct kvm_memslots *slots;
32 struct kvm_memory_slot *memslot;
33
34 slots = kvm_memslots(kvm);
35 kvm_for_each_memslot(memslot, slots)
36 if (gfn >= memslot->base_gfn &&
37 gfn < memslot->base_gfn + memslot->npages)
38 return memslot;
39 return NULL;
40}
41
Paul Mackerras8936dda2011-12-12 12:27:39 +000042/* Translate address of a vmalloc'd thing to a linear map address */
43static void *real_vmalloc_addr(void *x)
44{
45 unsigned long addr = (unsigned long) x;
46 pte_t *p;
47
48 p = find_linux_pte(swapper_pg_dir, addr);
49 if (!p || !pte_present(*p))
50 return NULL;
51 /* assume we don't have huge pages in vmalloc space... */
52 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
53 return __va(addr);
54}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000055
56#define HPTE_V_HVLOCK 0x40UL
57
58static inline long lock_hpte(unsigned long *hpte, unsigned long bits)
59{
60 unsigned long tmp, old;
61
62 asm volatile(" ldarx %0,0,%2\n"
63 " and. %1,%0,%3\n"
64 " bne 2f\n"
65 " ori %0,%0,%4\n"
66 " stdcx. %0,0,%2\n"
67 " beq+ 2f\n"
68 " li %1,%3\n"
69 "2: isync"
70 : "=&r" (tmp), "=&r" (old)
71 : "r" (hpte), "r" (bits), "i" (HPTE_V_HVLOCK)
72 : "cc", "memory");
73 return old == 0;
74}
75
76long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
77 long pte_index, unsigned long pteh, unsigned long ptel)
78{
79 unsigned long porder;
80 struct kvm *kvm = vcpu->kvm;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000081 unsigned long i, gfn, lpn, pa;
Paul Mackerrasa8606e22011-06-29 00:22:05 +000082 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +000083 struct revmap_entry *rev;
84 unsigned long g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000085 struct kvm_memory_slot *memslot;
86 unsigned long *physp;
Paul Mackerrasa8606e22011-06-29 00:22:05 +000087
88 /* only handle 4k, 64k and 16M pages for now */
89 porder = 12;
90 if (pteh & HPTE_V_LARGE) {
Paul Mackerras9e368f22011-06-29 00:40:08 +000091 if (cpu_has_feature(CPU_FTR_ARCH_206) &&
92 (ptel & 0xf000) == 0x1000) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +000093 /* 64k page */
94 porder = 16;
95 } else if ((ptel & 0xff000) == 0) {
96 /* 16M page */
97 porder = 24;
98 /* lowest AVA bit must be 0 for 16M pages */
99 if (pteh & 0x80)
100 return H_PARAMETER;
101 } else
102 return H_PARAMETER;
103 }
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000104 if (porder > kvm->arch.ram_porder)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000105 return H_PARAMETER;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000106
107 gfn = ((ptel & HPTE_R_RPN) & ~((1ul << porder) - 1)) >> PAGE_SHIFT;
108 memslot = builtin_gfn_to_memslot(kvm, gfn);
109 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID)))
110 return H_PARAMETER;
111 physp = kvm->arch.slot_phys[memslot->id];
112 if (!physp)
113 return H_PARAMETER;
114
115 lpn = (gfn - memslot->base_gfn) >> (kvm->arch.ram_porder - PAGE_SHIFT);
116 physp = real_vmalloc_addr(physp + lpn);
117 pa = *physp;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000118 if (!pa)
119 return H_PARAMETER;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000120 pa &= PAGE_MASK;
121
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000122 /* Check WIMG */
123 if ((ptel & HPTE_R_WIMG) != HPTE_R_M &&
124 (ptel & HPTE_R_WIMG) != (HPTE_R_W | HPTE_R_I | HPTE_R_M))
125 return H_PARAMETER;
126 pteh &= ~0x60UL;
127 ptel &= ~(HPTE_R_PP0 - kvm->arch.ram_psize);
128 ptel |= pa;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000129 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000130 return H_PARAMETER;
131 if (likely((flags & H_EXACT) == 0)) {
132 pte_index &= ~7UL;
133 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
134 for (i = 0; ; ++i) {
135 if (i == 8)
136 return H_PTEG_FULL;
137 if ((*hpte & HPTE_V_VALID) == 0 &&
138 lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
139 break;
140 hpte += 2;
141 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000142 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000143 } else {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000144 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
145 if (!lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID))
146 return H_PTEG_FULL;
147 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000148
149 /* Save away the guest's idea of the second HPTE dword */
150 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
151 if (rev)
152 rev->guest_rpte = g_ptel;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000153 hpte[1] = ptel;
154 eieio();
155 hpte[0] = pteh;
156 asm volatile("ptesync" : : : "memory");
Paul Mackerras8936dda2011-12-12 12:27:39 +0000157 vcpu->arch.gpr[4] = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000158 return H_SUCCESS;
159}
160
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000161#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
162
163static inline int try_lock_tlbie(unsigned int *lock)
164{
165 unsigned int tmp, old;
166 unsigned int token = LOCK_TOKEN;
167
168 asm volatile("1:lwarx %1,0,%2\n"
169 " cmpwi cr0,%1,0\n"
170 " bne 2f\n"
171 " stwcx. %3,0,%2\n"
172 " bne- 1b\n"
173 " isync\n"
174 "2:"
175 : "=&r" (tmp), "=&r" (old)
176 : "r" (lock), "r" (token)
177 : "cc", "memory");
178 return old == 0;
179}
180
181long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
182 unsigned long pte_index, unsigned long avpn,
183 unsigned long va)
184{
185 struct kvm *kvm = vcpu->kvm;
186 unsigned long *hpte;
187 unsigned long v, r, rb;
188
Paul Mackerras8936dda2011-12-12 12:27:39 +0000189 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000190 return H_PARAMETER;
191 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
192 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
193 cpu_relax();
194 if ((hpte[0] & HPTE_V_VALID) == 0 ||
195 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
196 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
197 hpte[0] &= ~HPTE_V_HVLOCK;
198 return H_NOT_FOUND;
199 }
200 if (atomic_read(&kvm->online_vcpus) == 1)
201 flags |= H_LOCAL;
202 vcpu->arch.gpr[4] = v = hpte[0] & ~HPTE_V_HVLOCK;
203 vcpu->arch.gpr[5] = r = hpte[1];
204 rb = compute_tlbie_rb(v, r, pte_index);
205 hpte[0] = 0;
206 if (!(flags & H_LOCAL)) {
207 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
208 cpu_relax();
209 asm volatile("ptesync" : : : "memory");
210 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
211 : : "r" (rb), "r" (kvm->arch.lpid));
212 asm volatile("ptesync" : : : "memory");
213 kvm->arch.tlbie_lock = 0;
214 } else {
215 asm volatile("ptesync" : : : "memory");
216 asm volatile("tlbiel %0" : : "r" (rb));
217 asm volatile("ptesync" : : : "memory");
218 }
219 return H_SUCCESS;
220}
221
222long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
223{
224 struct kvm *kvm = vcpu->kvm;
225 unsigned long *args = &vcpu->arch.gpr[4];
226 unsigned long *hp, tlbrb[4];
227 long int i, found;
228 long int n_inval = 0;
229 unsigned long flags, req, pte_index;
230 long int local = 0;
231 long int ret = H_SUCCESS;
232
233 if (atomic_read(&kvm->online_vcpus) == 1)
234 local = 1;
235 for (i = 0; i < 4; ++i) {
236 pte_index = args[i * 2];
237 flags = pte_index >> 56;
238 pte_index &= ((1ul << 56) - 1);
239 req = flags >> 6;
240 flags &= 3;
241 if (req == 3)
242 break;
243 if (req != 1 || flags == 3 ||
Paul Mackerras8936dda2011-12-12 12:27:39 +0000244 pte_index >= HPT_NPTE) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000245 /* parameter error */
246 args[i * 2] = ((0xa0 | flags) << 56) + pte_index;
247 ret = H_PARAMETER;
248 break;
249 }
250 hp = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
251 while (!lock_hpte(hp, HPTE_V_HVLOCK))
252 cpu_relax();
253 found = 0;
254 if (hp[0] & HPTE_V_VALID) {
255 switch (flags & 3) {
256 case 0: /* absolute */
257 found = 1;
258 break;
259 case 1: /* andcond */
260 if (!(hp[0] & args[i * 2 + 1]))
261 found = 1;
262 break;
263 case 2: /* AVPN */
264 if ((hp[0] & ~0x7fUL) == args[i * 2 + 1])
265 found = 1;
266 break;
267 }
268 }
269 if (!found) {
270 hp[0] &= ~HPTE_V_HVLOCK;
271 args[i * 2] = ((0x90 | flags) << 56) + pte_index;
272 continue;
273 }
274 /* insert R and C bits from PTE */
275 flags |= (hp[1] >> 5) & 0x0c;
276 args[i * 2] = ((0x80 | flags) << 56) + pte_index;
277 tlbrb[n_inval++] = compute_tlbie_rb(hp[0], hp[1], pte_index);
278 hp[0] = 0;
279 }
280 if (n_inval == 0)
281 return ret;
282
283 if (!local) {
284 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
285 cpu_relax();
286 asm volatile("ptesync" : : : "memory");
287 for (i = 0; i < n_inval; ++i)
288 asm volatile(PPC_TLBIE(%1,%0)
289 : : "r" (tlbrb[i]), "r" (kvm->arch.lpid));
290 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
291 kvm->arch.tlbie_lock = 0;
292 } else {
293 asm volatile("ptesync" : : : "memory");
294 for (i = 0; i < n_inval; ++i)
295 asm volatile("tlbiel %0" : : "r" (tlbrb[i]));
296 asm volatile("ptesync" : : : "memory");
297 }
298 return ret;
299}
300
301long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
302 unsigned long pte_index, unsigned long avpn,
303 unsigned long va)
304{
305 struct kvm *kvm = vcpu->kvm;
306 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000307 struct revmap_entry *rev;
308 unsigned long v, r, rb, mask, bits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000309
Paul Mackerras8936dda2011-12-12 12:27:39 +0000310 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000311 return H_PARAMETER;
312 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
313 while (!lock_hpte(hpte, HPTE_V_HVLOCK))
314 cpu_relax();
315 if ((hpte[0] & HPTE_V_VALID) == 0 ||
316 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
317 hpte[0] &= ~HPTE_V_HVLOCK;
318 return H_NOT_FOUND;
319 }
320 if (atomic_read(&kvm->online_vcpus) == 1)
321 flags |= H_LOCAL;
322 v = hpte[0];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000323 bits = (flags << 55) & HPTE_R_PP0;
324 bits |= (flags << 48) & HPTE_R_KEY_HI;
325 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
326
327 /* Update guest view of 2nd HPTE dword */
328 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
329 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
330 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
331 if (rev) {
332 r = (rev->guest_rpte & ~mask) | bits;
333 rev->guest_rpte = r;
334 }
335 r = (hpte[1] & ~mask) | bits;
336
337 /* Update HPTE */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000338 rb = compute_tlbie_rb(v, r, pte_index);
339 hpte[0] = v & ~HPTE_V_VALID;
340 if (!(flags & H_LOCAL)) {
341 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
342 cpu_relax();
343 asm volatile("ptesync" : : : "memory");
344 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
345 : : "r" (rb), "r" (kvm->arch.lpid));
346 asm volatile("ptesync" : : : "memory");
347 kvm->arch.tlbie_lock = 0;
348 } else {
349 asm volatile("ptesync" : : : "memory");
350 asm volatile("tlbiel %0" : : "r" (rb));
351 asm volatile("ptesync" : : : "memory");
352 }
353 hpte[1] = r;
354 eieio();
355 hpte[0] = v & ~HPTE_V_HVLOCK;
356 asm volatile("ptesync" : : : "memory");
357 return H_SUCCESS;
358}
359
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000360long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
361 unsigned long pte_index)
362{
363 struct kvm *kvm = vcpu->kvm;
364 unsigned long *hpte, r;
365 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000366 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000367
Paul Mackerras8936dda2011-12-12 12:27:39 +0000368 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000369 return H_PARAMETER;
370 if (flags & H_READ_4) {
371 pte_index &= ~3;
372 n = 4;
373 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000374 if (flags & H_R_XLATE)
375 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000376 for (i = 0; i < n; ++i, ++pte_index) {
377 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
378 r = hpte[1];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000379 if (hpte[0] & HPTE_V_VALID) {
380 if (rev)
381 r = rev[i].guest_rpte;
382 else
383 r = hpte[1] | HPTE_R_RPN;
384 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000385 vcpu->arch.gpr[4 + i * 2] = hpte[0];
386 vcpu->arch.gpr[5 + i * 2] = r;
387 }
388 return H_SUCCESS;
389}