blob: ba4a1376b3311a6983b0bac6684dca9c5dc5be18 [file] [log] [blame]
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * Copyright 2010-2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
7 */
8
9#include <linux/types.h>
10#include <linux/string.h>
11#include <linux/kvm.h>
12#include <linux/kvm_host.h>
13#include <linux/hugetlb.h>
Paul Mackerrasc77162d2011-12-12 12:31:00 +000014#include <linux/module.h>
Paul Mackerrasa8606e22011-06-29 00:22:05 +000015
16#include <asm/tlbflush.h>
17#include <asm/kvm_ppc.h>
18#include <asm/kvm_book3s.h>
19#include <asm/mmu-hash64.h>
20#include <asm/hvcall.h>
21#include <asm/synch.h>
22#include <asm/ppc-opcode.h>
23
Paul Mackerrasb2b2f162011-12-12 12:28:21 +000024/*
25 * Since this file is built in even if KVM is a module, we need
26 * a local copy of this function for the case where kvm_main.c is
27 * modular.
28 */
29static struct kvm_memory_slot *builtin_gfn_to_memslot(struct kvm *kvm,
30 gfn_t gfn)
31{
32 struct kvm_memslots *slots;
33 struct kvm_memory_slot *memslot;
34
35 slots = kvm_memslots(kvm);
36 kvm_for_each_memslot(memslot, slots)
37 if (gfn >= memslot->base_gfn &&
38 gfn < memslot->base_gfn + memslot->npages)
39 return memslot;
40 return NULL;
41}
42
Paul Mackerras8936dda2011-12-12 12:27:39 +000043/* Translate address of a vmalloc'd thing to a linear map address */
44static void *real_vmalloc_addr(void *x)
45{
46 unsigned long addr = (unsigned long) x;
47 pte_t *p;
48
49 p = find_linux_pte(swapper_pg_dir, addr);
50 if (!p || !pte_present(*p))
51 return NULL;
52 /* assume we don't have huge pages in vmalloc space... */
53 addr = (pte_pfn(*p) << PAGE_SHIFT) | (addr & ~PAGE_MASK);
54 return __va(addr);
55}
Paul Mackerrasa8606e22011-06-29 00:22:05 +000056
Paul Mackerras06ce2c62011-12-12 12:33:07 +000057/*
58 * Add this HPTE into the chain for the real page.
59 * Must be called with the chain locked; it unlocks the chain.
60 */
Paul Mackerras342d3db2011-12-12 12:38:05 +000061void kvmppc_add_revmap_chain(struct kvm *kvm, struct revmap_entry *rev,
Paul Mackerras06ce2c62011-12-12 12:33:07 +000062 unsigned long *rmap, long pte_index, int realmode)
63{
64 struct revmap_entry *head, *tail;
65 unsigned long i;
66
67 if (*rmap & KVMPPC_RMAP_PRESENT) {
68 i = *rmap & KVMPPC_RMAP_INDEX;
69 head = &kvm->arch.revmap[i];
70 if (realmode)
71 head = real_vmalloc_addr(head);
72 tail = &kvm->arch.revmap[head->back];
73 if (realmode)
74 tail = real_vmalloc_addr(tail);
75 rev->forw = i;
76 rev->back = head->back;
77 tail->forw = pte_index;
78 head->back = pte_index;
79 } else {
80 rev->forw = rev->back = pte_index;
81 i = pte_index;
82 }
83 smp_wmb();
84 *rmap = i | KVMPPC_RMAP_REFERENCED | KVMPPC_RMAP_PRESENT; /* unlock */
85}
Paul Mackerras342d3db2011-12-12 12:38:05 +000086EXPORT_SYMBOL_GPL(kvmppc_add_revmap_chain);
Paul Mackerras06ce2c62011-12-12 12:33:07 +000087
88/* Remove this HPTE from the chain for a real page */
89static void remove_revmap_chain(struct kvm *kvm, long pte_index,
90 unsigned long hpte_v)
91{
92 struct revmap_entry *rev, *next, *prev;
93 unsigned long gfn, ptel, head;
94 struct kvm_memory_slot *memslot;
95 unsigned long *rmap;
96
97 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
98 ptel = rev->guest_rpte;
99 gfn = hpte_rpn(ptel, hpte_page_size(hpte_v, ptel));
100 memslot = builtin_gfn_to_memslot(kvm, gfn);
101 if (!memslot || (memslot->flags & KVM_MEMSLOT_INVALID))
102 return;
103
104 rmap = real_vmalloc_addr(&memslot->rmap[gfn - memslot->base_gfn]);
105 lock_rmap(rmap);
106
107 head = *rmap & KVMPPC_RMAP_INDEX;
108 next = real_vmalloc_addr(&kvm->arch.revmap[rev->forw]);
109 prev = real_vmalloc_addr(&kvm->arch.revmap[rev->back]);
110 next->back = rev->back;
111 prev->forw = rev->forw;
112 if (head == pte_index) {
113 head = rev->forw;
114 if (head == pte_index)
115 *rmap &= ~(KVMPPC_RMAP_PRESENT | KVMPPC_RMAP_INDEX);
116 else
117 *rmap = (*rmap & ~KVMPPC_RMAP_INDEX) | head;
118 }
119 unlock_rmap(rmap);
120}
121
Paul Mackerras342d3db2011-12-12 12:38:05 +0000122static pte_t lookup_linux_pte(struct kvm_vcpu *vcpu, unsigned long hva,
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000123 int writing, unsigned long *pte_sizep)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000124{
125 pte_t *ptep;
126 unsigned long ps = *pte_sizep;
127 unsigned int shift;
128
129 ptep = find_linux_pte_or_hugepte(vcpu->arch.pgdir, hva, &shift);
130 if (!ptep)
131 return __pte(0);
132 if (shift)
133 *pte_sizep = 1ul << shift;
134 else
135 *pte_sizep = PAGE_SIZE;
136 if (ps > *pte_sizep)
137 return __pte(0);
138 if (!pte_present(*ptep))
139 return __pte(0);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000140 return kvmppc_read_update_linux_pte(ptep, writing);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000141}
142
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000143static inline void unlock_hpte(unsigned long *hpte, unsigned long hpte_v)
144{
145 asm volatile(PPC_RELEASE_BARRIER "" : : : "memory");
146 hpte[0] = hpte_v;
147}
148
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000149long kvmppc_h_enter(struct kvm_vcpu *vcpu, unsigned long flags,
150 long pte_index, unsigned long pteh, unsigned long ptel)
151{
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000152 struct kvm *kvm = vcpu->kvm;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000153 unsigned long i, pa, gpa, gfn, psize;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000154 unsigned long slot_fn, hva;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000155 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000156 struct revmap_entry *rev;
157 unsigned long g_ptel = ptel;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000158 struct kvm_memory_slot *memslot;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000159 unsigned long *physp, pte_size;
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000160 unsigned long is_io;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000161 unsigned long *rmap;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000162 pte_t pte;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000163 unsigned int writing;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000164 unsigned long mmu_seq;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000165 bool realmode = vcpu->arch.vcore->vcore_state == VCORE_RUNNING;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000166
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000167 psize = hpte_page_size(pteh, ptel);
168 if (!psize)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000169 return H_PARAMETER;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000170 writing = hpte_is_writable(ptel);
Paul Mackerras697d3892011-12-12 12:36:37 +0000171 pteh &= ~(HPTE_V_HVLOCK | HPTE_V_ABSENT | HPTE_V_VALID);
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000172
Paul Mackerras342d3db2011-12-12 12:38:05 +0000173 /* used later to detect if we might have been invalidated */
174 mmu_seq = kvm->mmu_notifier_seq;
175 smp_rmb();
176
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000177 /* Find the memslot (if any) for this address */
178 gpa = (ptel & HPTE_R_RPN) & ~(psize - 1);
179 gfn = gpa >> PAGE_SHIFT;
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000180 memslot = builtin_gfn_to_memslot(kvm, gfn);
Paul Mackerras697d3892011-12-12 12:36:37 +0000181 pa = 0;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000182 is_io = ~0ul;
Paul Mackerras697d3892011-12-12 12:36:37 +0000183 rmap = NULL;
184 if (!(memslot && !(memslot->flags & KVM_MEMSLOT_INVALID))) {
185 /* PPC970 can't do emulated MMIO */
186 if (!cpu_has_feature(CPU_FTR_ARCH_206))
187 return H_PARAMETER;
188 /* Emulated MMIO - mark this with key=31 */
189 pteh |= HPTE_V_ABSENT;
190 ptel |= HPTE_R_KEY_HI | HPTE_R_KEY_LO;
191 goto do_insert;
192 }
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000193
194 /* Check if the requested page fits entirely in the memslot. */
195 if (!slot_is_aligned(memslot, psize))
196 return H_PARAMETER;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000197 slot_fn = gfn - memslot->base_gfn;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000198 rmap = &memslot->rmap[slot_fn];
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000199
Paul Mackerras342d3db2011-12-12 12:38:05 +0000200 if (!kvm->arch.using_mmu_notifiers) {
201 physp = kvm->arch.slot_phys[memslot->id];
202 if (!physp)
203 return H_PARAMETER;
204 physp += slot_fn;
205 if (realmode)
206 physp = real_vmalloc_addr(physp);
207 pa = *physp;
208 if (!pa)
209 return H_TOO_HARD;
210 is_io = pa & (HPTE_R_I | HPTE_R_W);
211 pte_size = PAGE_SIZE << (pa & KVMPPC_PAGE_ORDER_MASK);
212 pa &= PAGE_MASK;
213 } else {
214 /* Translate to host virtual address */
215 hva = gfn_to_hva_memslot(memslot, gfn);
Paul Mackerrasb2b2f162011-12-12 12:28:21 +0000216
Paul Mackerras342d3db2011-12-12 12:38:05 +0000217 /* Look up the Linux PTE for the backing page */
218 pte_size = psize;
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000219 pte = lookup_linux_pte(vcpu, hva, writing, &pte_size);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000220 if (pte_present(pte)) {
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000221 if (writing && !pte_write(pte))
222 /* make the actual HPTE be read-only */
223 ptel = hpte_make_readonly(ptel);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000224 is_io = hpte_cache_bits(pte_val(pte));
225 pa = pte_pfn(pte) << PAGE_SHIFT;
226 }
227 }
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000228 if (pte_size < psize)
229 return H_PARAMETER;
230 if (pa && pte_size > psize)
231 pa |= gpa & (pte_size - 1);
232
233 ptel &= ~(HPTE_R_PP0 - psize);
234 ptel |= pa;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000235
236 if (pa)
237 pteh |= HPTE_V_VALID;
238 else
239 pteh |= HPTE_V_ABSENT;
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000240
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000241 /* Check WIMG */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000242 if (is_io != ~0ul && !hpte_cache_flags_ok(ptel, is_io)) {
Paul Mackerras9d0ef5ea2011-12-12 12:32:27 +0000243 if (is_io)
244 return H_PARAMETER;
245 /*
246 * Allow guest to map emulated device memory as
247 * uncacheable, but actually make it cacheable.
248 */
249 ptel &= ~(HPTE_R_W|HPTE_R_I|HPTE_R_G);
250 ptel |= HPTE_R_M;
251 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000252
Paul Mackerras342d3db2011-12-12 12:38:05 +0000253 /* Find and lock the HPTEG slot to use */
Paul Mackerras697d3892011-12-12 12:36:37 +0000254 do_insert:
Paul Mackerras8936dda2011-12-12 12:27:39 +0000255 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000256 return H_PARAMETER;
257 if (likely((flags & H_EXACT) == 0)) {
258 pte_index &= ~7UL;
259 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000260 for (i = 0; i < 8; ++i) {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000261 if ((*hpte & HPTE_V_VALID) == 0 &&
Paul Mackerras697d3892011-12-12 12:36:37 +0000262 try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
263 HPTE_V_ABSENT))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000264 break;
265 hpte += 2;
266 }
Paul Mackerras075295d2011-12-12 12:30:16 +0000267 if (i == 8) {
268 /*
269 * Since try_lock_hpte doesn't retry (not even stdcx.
270 * failures), it could be that there is a free slot
271 * but we transiently failed to lock it. Try again,
272 * actually locking each slot and checking it.
273 */
274 hpte -= 16;
275 for (i = 0; i < 8; ++i) {
276 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
277 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000278 if (!(*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)))
Paul Mackerras075295d2011-12-12 12:30:16 +0000279 break;
280 *hpte &= ~HPTE_V_HVLOCK;
281 hpte += 2;
282 }
283 if (i == 8)
284 return H_PTEG_FULL;
285 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000286 pte_index += i;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000287 } else {
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000288 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000289 if (!try_lock_hpte(hpte, HPTE_V_HVLOCK | HPTE_V_VALID |
290 HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000291 /* Lock the slot and check again */
292 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
293 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000294 if (*hpte & (HPTE_V_VALID | HPTE_V_ABSENT)) {
Paul Mackerras075295d2011-12-12 12:30:16 +0000295 *hpte &= ~HPTE_V_HVLOCK;
296 return H_PTEG_FULL;
297 }
298 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000299 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000300
301 /* Save away the guest's idea of the second HPTE dword */
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000302 rev = &kvm->arch.revmap[pte_index];
303 if (realmode)
304 rev = real_vmalloc_addr(rev);
Paul Mackerras8936dda2011-12-12 12:27:39 +0000305 if (rev)
306 rev->guest_rpte = g_ptel;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000307
308 /* Link HPTE into reverse-map chain */
Paul Mackerras697d3892011-12-12 12:36:37 +0000309 if (pteh & HPTE_V_VALID) {
310 if (realmode)
311 rmap = real_vmalloc_addr(rmap);
312 lock_rmap(rmap);
Paul Mackerras342d3db2011-12-12 12:38:05 +0000313 /* Check for pending invalidations under the rmap chain lock */
314 if (kvm->arch.using_mmu_notifiers &&
315 mmu_notifier_retry(vcpu, mmu_seq)) {
316 /* inval in progress, write a non-present HPTE */
317 pteh |= HPTE_V_ABSENT;
318 pteh &= ~HPTE_V_VALID;
319 unlock_rmap(rmap);
320 } else {
321 kvmppc_add_revmap_chain(kvm, rev, rmap, pte_index,
322 realmode);
323 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000324 }
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000325
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000326 hpte[1] = ptel;
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000327
328 /* Write the first HPTE dword, unlocking the HPTE and making it valid */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000329 eieio();
330 hpte[0] = pteh;
331 asm volatile("ptesync" : : : "memory");
Paul Mackerras06ce2c62011-12-12 12:33:07 +0000332
Paul Mackerras8936dda2011-12-12 12:27:39 +0000333 vcpu->arch.gpr[4] = pte_index;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000334 return H_SUCCESS;
335}
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000336EXPORT_SYMBOL_GPL(kvmppc_h_enter);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000337
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000338#define LOCK_TOKEN (*(u32 *)(&get_paca()->lock_token))
339
340static inline int try_lock_tlbie(unsigned int *lock)
341{
342 unsigned int tmp, old;
343 unsigned int token = LOCK_TOKEN;
344
345 asm volatile("1:lwarx %1,0,%2\n"
346 " cmpwi cr0,%1,0\n"
347 " bne 2f\n"
348 " stwcx. %3,0,%2\n"
349 " bne- 1b\n"
350 " isync\n"
351 "2:"
352 : "=&r" (tmp), "=&r" (old)
353 : "r" (lock), "r" (token)
354 : "cc", "memory");
355 return old == 0;
356}
357
358long kvmppc_h_remove(struct kvm_vcpu *vcpu, unsigned long flags,
359 unsigned long pte_index, unsigned long avpn,
360 unsigned long va)
361{
362 struct kvm *kvm = vcpu->kvm;
363 unsigned long *hpte;
364 unsigned long v, r, rb;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000365 struct revmap_entry *rev;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000366
Paul Mackerras8936dda2011-12-12 12:27:39 +0000367 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000368 return H_PARAMETER;
369 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000370 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000371 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000372 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000373 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn) ||
374 ((flags & H_ANDCOND) && (hpte[0] & avpn) != 0)) {
375 hpte[0] &= ~HPTE_V_HVLOCK;
376 return H_NOT_FOUND;
377 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000378
379 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
380 v = hpte[0] & ~HPTE_V_HVLOCK;
381 if (v & HPTE_V_VALID) {
382 hpte[0] &= ~HPTE_V_VALID;
383 rb = compute_tlbie_rb(v, hpte[1], pte_index);
384 if (!(flags & H_LOCAL) && atomic_read(&kvm->online_vcpus) > 1) {
385 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
386 cpu_relax();
387 asm volatile("ptesync" : : : "memory");
388 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
389 : : "r" (rb), "r" (kvm->arch.lpid));
390 asm volatile("ptesync" : : : "memory");
391 kvm->arch.tlbie_lock = 0;
392 } else {
393 asm volatile("ptesync" : : : "memory");
394 asm volatile("tlbiel %0" : : "r" (rb));
395 asm volatile("ptesync" : : : "memory");
396 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000397 remove_revmap_chain(kvm, pte_index, v);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000398 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000399 r = rev->guest_rpte;
400 unlock_hpte(hpte, 0);
401
402 vcpu->arch.gpr[4] = v;
403 vcpu->arch.gpr[5] = r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000404 return H_SUCCESS;
405}
406
407long kvmppc_h_bulk_remove(struct kvm_vcpu *vcpu)
408{
409 struct kvm *kvm = vcpu->kvm;
410 unsigned long *args = &vcpu->arch.gpr[4];
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000411 unsigned long *hp, *hptes[4], tlbrb[4];
412 long int i, j, k, n, found, indexes[4];
413 unsigned long flags, req, pte_index, rcbits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000414 long int local = 0;
415 long int ret = H_SUCCESS;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000416 struct revmap_entry *rev, *revs[4];
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000417
418 if (atomic_read(&kvm->online_vcpus) == 1)
419 local = 1;
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000420 for (i = 0; i < 4 && ret == H_SUCCESS; ) {
421 n = 0;
422 for (; i < 4; ++i) {
423 j = i * 2;
424 pte_index = args[j];
425 flags = pte_index >> 56;
426 pte_index &= ((1ul << 56) - 1);
427 req = flags >> 6;
428 flags &= 3;
429 if (req == 3) { /* no more requests */
430 i = 4;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000431 break;
432 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000433 if (req != 1 || flags == 3 || pte_index >= HPT_NPTE) {
434 /* parameter error */
435 args[j] = ((0xa0 | flags) << 56) + pte_index;
436 ret = H_PARAMETER;
437 break;
438 }
439 hp = (unsigned long *)
440 (kvm->arch.hpt_virt + (pte_index << 4));
441 /* to avoid deadlock, don't spin except for first */
442 if (!try_lock_hpte(hp, HPTE_V_HVLOCK)) {
443 if (n)
444 break;
445 while (!try_lock_hpte(hp, HPTE_V_HVLOCK))
446 cpu_relax();
447 }
448 found = 0;
449 if (hp[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) {
450 switch (flags & 3) {
451 case 0: /* absolute */
452 found = 1;
453 break;
454 case 1: /* andcond */
455 if (!(hp[0] & args[j + 1]))
456 found = 1;
457 break;
458 case 2: /* AVPN */
459 if ((hp[0] & ~0x7fUL) == args[j + 1])
460 found = 1;
461 break;
462 }
463 }
464 if (!found) {
465 hp[0] &= ~HPTE_V_HVLOCK;
466 args[j] = ((0x90 | flags) << 56) + pte_index;
467 continue;
468 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000469
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000470 args[j] = ((0x80 | flags) << 56) + pte_index;
471 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
472 /* insert R and C bits from guest PTE */
473 rcbits = rev->guest_rpte & (HPTE_R_R|HPTE_R_C);
474 args[j] |= rcbits << (56 - 5);
475
476 if (!(hp[0] & HPTE_V_VALID))
477 continue;
478
479 hp[0] &= ~HPTE_V_VALID; /* leave it locked */
480 tlbrb[n] = compute_tlbie_rb(hp[0], hp[1], pte_index);
481 indexes[n] = j;
482 hptes[n] = hp;
483 revs[n] = rev;
484 ++n;
485 }
486
487 if (!n)
488 break;
489
490 /* Now that we've collected a batch, do the tlbies */
491 if (!local) {
492 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
493 cpu_relax();
494 asm volatile("ptesync" : : : "memory");
495 for (k = 0; k < n; ++k)
496 asm volatile(PPC_TLBIE(%1,%0) : :
497 "r" (tlbrb[k]),
498 "r" (kvm->arch.lpid));
499 asm volatile("eieio; tlbsync; ptesync" : : : "memory");
500 kvm->arch.tlbie_lock = 0;
501 } else {
502 asm volatile("ptesync" : : : "memory");
503 for (k = 0; k < n; ++k)
504 asm volatile("tlbiel %0" : : "r" (tlbrb[k]));
505 asm volatile("ptesync" : : : "memory");
506 }
507
508 for (k = 0; k < n; ++k) {
509 j = indexes[k];
510 pte_index = args[j] & ((1ul << 56) - 1);
511 hp = hptes[k];
512 rev = revs[k];
513 remove_revmap_chain(kvm, pte_index, hp[0]);
514 unlock_hpte(hp, 0);
515 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000516 }
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000517
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000518 return ret;
519}
520
521long kvmppc_h_protect(struct kvm_vcpu *vcpu, unsigned long flags,
522 unsigned long pte_index, unsigned long avpn,
523 unsigned long va)
524{
525 struct kvm *kvm = vcpu->kvm;
526 unsigned long *hpte;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000527 struct revmap_entry *rev;
528 unsigned long v, r, rb, mask, bits;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000529
Paul Mackerras8936dda2011-12-12 12:27:39 +0000530 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000531 return H_PARAMETER;
Paul Mackerras697d3892011-12-12 12:36:37 +0000532
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000533 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras075295d2011-12-12 12:30:16 +0000534 while (!try_lock_hpte(hpte, HPTE_V_HVLOCK))
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000535 cpu_relax();
Paul Mackerras697d3892011-12-12 12:36:37 +0000536 if ((hpte[0] & (HPTE_V_ABSENT | HPTE_V_VALID)) == 0 ||
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000537 ((flags & H_AVPN) && (hpte[0] & ~0x7fUL) != avpn)) {
538 hpte[0] &= ~HPTE_V_HVLOCK;
539 return H_NOT_FOUND;
540 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000541
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000542 if (atomic_read(&kvm->online_vcpus) == 1)
543 flags |= H_LOCAL;
544 v = hpte[0];
Paul Mackerras8936dda2011-12-12 12:27:39 +0000545 bits = (flags << 55) & HPTE_R_PP0;
546 bits |= (flags << 48) & HPTE_R_KEY_HI;
547 bits |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);
548
549 /* Update guest view of 2nd HPTE dword */
550 mask = HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N |
551 HPTE_R_KEY_HI | HPTE_R_KEY_LO;
552 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
553 if (rev) {
554 r = (rev->guest_rpte & ~mask) | bits;
555 rev->guest_rpte = r;
556 }
557 r = (hpte[1] & ~mask) | bits;
558
559 /* Update HPTE */
Paul Mackerras697d3892011-12-12 12:36:37 +0000560 if (v & HPTE_V_VALID) {
561 rb = compute_tlbie_rb(v, r, pte_index);
562 hpte[0] = v & ~HPTE_V_VALID;
563 if (!(flags & H_LOCAL)) {
564 while(!try_lock_tlbie(&kvm->arch.tlbie_lock))
565 cpu_relax();
566 asm volatile("ptesync" : : : "memory");
567 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
568 : : "r" (rb), "r" (kvm->arch.lpid));
569 asm volatile("ptesync" : : : "memory");
570 kvm->arch.tlbie_lock = 0;
571 } else {
572 asm volatile("ptesync" : : : "memory");
573 asm volatile("tlbiel %0" : : "r" (rb));
574 asm volatile("ptesync" : : : "memory");
575 }
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000576 }
577 hpte[1] = r;
578 eieio();
579 hpte[0] = v & ~HPTE_V_HVLOCK;
580 asm volatile("ptesync" : : : "memory");
581 return H_SUCCESS;
582}
583
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000584long kvmppc_h_read(struct kvm_vcpu *vcpu, unsigned long flags,
585 unsigned long pte_index)
586{
587 struct kvm *kvm = vcpu->kvm;
Paul Mackerras697d3892011-12-12 12:36:37 +0000588 unsigned long *hpte, v, r;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000589 int i, n = 1;
Paul Mackerras8936dda2011-12-12 12:27:39 +0000590 struct revmap_entry *rev = NULL;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000591
Paul Mackerras8936dda2011-12-12 12:27:39 +0000592 if (pte_index >= HPT_NPTE)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000593 return H_PARAMETER;
594 if (flags & H_READ_4) {
595 pte_index &= ~3;
596 n = 4;
597 }
Paul Mackerras8936dda2011-12-12 12:27:39 +0000598 if (flags & H_R_XLATE)
599 rev = real_vmalloc_addr(&kvm->arch.revmap[pte_index]);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000600 for (i = 0; i < n; ++i, ++pte_index) {
601 hpte = (unsigned long *)(kvm->arch.hpt_virt + (pte_index << 4));
Paul Mackerras697d3892011-12-12 12:36:37 +0000602 v = hpte[0] & ~HPTE_V_HVLOCK;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000603 r = hpte[1];
Paul Mackerras697d3892011-12-12 12:36:37 +0000604 if (v & HPTE_V_ABSENT) {
605 v &= ~HPTE_V_ABSENT;
606 v |= HPTE_V_VALID;
607 }
608 if (v & HPTE_V_VALID) {
Paul Mackerras8936dda2011-12-12 12:27:39 +0000609 if (rev)
610 r = rev[i].guest_rpte;
611 else
612 r = hpte[1] | HPTE_R_RPN;
613 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000614 vcpu->arch.gpr[4 + i * 2] = v;
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000615 vcpu->arch.gpr[5 + i * 2] = r;
616 }
617 return H_SUCCESS;
618}
Paul Mackerras697d3892011-12-12 12:36:37 +0000619
Paul Mackerras342d3db2011-12-12 12:38:05 +0000620void kvmppc_invalidate_hpte(struct kvm *kvm, unsigned long *hptep,
621 unsigned long pte_index)
622{
623 unsigned long rb;
624
625 hptep[0] &= ~HPTE_V_VALID;
626 rb = compute_tlbie_rb(hptep[0], hptep[1], pte_index);
627 while (!try_lock_tlbie(&kvm->arch.tlbie_lock))
628 cpu_relax();
629 asm volatile("ptesync" : : : "memory");
630 asm volatile(PPC_TLBIE(%1,%0)"; eieio; tlbsync"
631 : : "r" (rb), "r" (kvm->arch.lpid));
632 asm volatile("ptesync" : : : "memory");
633 kvm->arch.tlbie_lock = 0;
634}
635EXPORT_SYMBOL_GPL(kvmppc_invalidate_hpte);
636
Paul Mackerras697d3892011-12-12 12:36:37 +0000637static int slb_base_page_shift[4] = {
638 24, /* 16M */
639 16, /* 64k */
640 34, /* 16G */
641 20, /* 1M, unsupported */
642};
643
644long kvmppc_hv_find_lock_hpte(struct kvm *kvm, gva_t eaddr, unsigned long slb_v,
645 unsigned long valid)
646{
647 unsigned int i;
648 unsigned int pshift;
649 unsigned long somask;
650 unsigned long vsid, hash;
651 unsigned long avpn;
652 unsigned long *hpte;
653 unsigned long mask, val;
654 unsigned long v, r;
655
656 /* Get page shift, work out hash and AVPN etc. */
657 mask = SLB_VSID_B | HPTE_V_AVPN | HPTE_V_SECONDARY;
658 val = 0;
659 pshift = 12;
660 if (slb_v & SLB_VSID_L) {
661 mask |= HPTE_V_LARGE;
662 val |= HPTE_V_LARGE;
663 pshift = slb_base_page_shift[(slb_v & SLB_VSID_LP) >> 4];
664 }
665 if (slb_v & SLB_VSID_B_1T) {
666 somask = (1UL << 40) - 1;
667 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT_1T;
668 vsid ^= vsid << 25;
669 } else {
670 somask = (1UL << 28) - 1;
671 vsid = (slb_v & ~SLB_VSID_B) >> SLB_VSID_SHIFT;
672 }
673 hash = (vsid ^ ((eaddr & somask) >> pshift)) & HPT_HASH_MASK;
674 avpn = slb_v & ~(somask >> 16); /* also includes B */
675 avpn |= (eaddr & somask) >> 16;
676
677 if (pshift >= 24)
678 avpn &= ~((1UL << (pshift - 16)) - 1);
679 else
680 avpn &= ~0x7fUL;
681 val |= avpn;
682
683 for (;;) {
684 hpte = (unsigned long *)(kvm->arch.hpt_virt + (hash << 7));
685
686 for (i = 0; i < 16; i += 2) {
687 /* Read the PTE racily */
688 v = hpte[i] & ~HPTE_V_HVLOCK;
689
690 /* Check valid/absent, hash, segment size and AVPN */
691 if (!(v & valid) || (v & mask) != val)
692 continue;
693
694 /* Lock the PTE and read it under the lock */
695 while (!try_lock_hpte(&hpte[i], HPTE_V_HVLOCK))
696 cpu_relax();
697 v = hpte[i] & ~HPTE_V_HVLOCK;
698 r = hpte[i+1];
699
700 /*
701 * Check the HPTE again, including large page size
702 * Since we don't currently allow any MPSS (mixed
703 * page-size segment) page sizes, it is sufficient
704 * to check against the actual page size.
705 */
706 if ((v & valid) && (v & mask) == val &&
707 hpte_page_size(v, r) == (1ul << pshift))
708 /* Return with the HPTE still locked */
709 return (hash << 3) + (i >> 1);
710
711 /* Unlock and move on */
712 hpte[i] = v;
713 }
714
715 if (val & HPTE_V_SECONDARY)
716 break;
717 val |= HPTE_V_SECONDARY;
718 hash = hash ^ HPT_HASH_MASK;
719 }
720 return -1;
721}
722EXPORT_SYMBOL(kvmppc_hv_find_lock_hpte);
723
724/*
725 * Called in real mode to check whether an HPTE not found fault
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000726 * is due to accessing a paged-out page or an emulated MMIO page,
727 * or if a protection fault is due to accessing a page that the
728 * guest wanted read/write access to but which we made read-only.
Paul Mackerras697d3892011-12-12 12:36:37 +0000729 * Returns a possibly modified status (DSISR) value if not
730 * (i.e. pass the interrupt to the guest),
731 * -1 to pass the fault up to host kernel mode code, -2 to do that
Paul Mackerras342d3db2011-12-12 12:38:05 +0000732 * and also load the instruction word (for MMIO emulation),
Paul Mackerras697d3892011-12-12 12:36:37 +0000733 * or 0 if we should make the guest retry the access.
734 */
735long kvmppc_hpte_hv_fault(struct kvm_vcpu *vcpu, unsigned long addr,
Paul Mackerras342d3db2011-12-12 12:38:05 +0000736 unsigned long slb_v, unsigned int status, bool data)
Paul Mackerras697d3892011-12-12 12:36:37 +0000737{
738 struct kvm *kvm = vcpu->kvm;
739 long int index;
740 unsigned long v, r, gr;
741 unsigned long *hpte;
742 unsigned long valid;
743 struct revmap_entry *rev;
744 unsigned long pp, key;
745
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000746 /* For protection fault, expect to find a valid HPTE */
747 valid = HPTE_V_VALID;
748 if (status & DSISR_NOHPTE)
749 valid |= HPTE_V_ABSENT;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000750
Paul Mackerras697d3892011-12-12 12:36:37 +0000751 index = kvmppc_hv_find_lock_hpte(kvm, addr, slb_v, valid);
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000752 if (index < 0) {
753 if (status & DSISR_NOHPTE)
754 return status; /* there really was no HPTE */
755 return 0; /* for prot fault, HPTE disappeared */
756 }
Paul Mackerras697d3892011-12-12 12:36:37 +0000757 hpte = (unsigned long *)(kvm->arch.hpt_virt + (index << 4));
758 v = hpte[0] & ~HPTE_V_HVLOCK;
759 r = hpte[1];
760 rev = real_vmalloc_addr(&kvm->arch.revmap[index]);
761 gr = rev->guest_rpte;
762
Paul Mackerrasa92bce92011-12-15 02:01:10 +0000763 unlock_hpte(hpte, v);
Paul Mackerras697d3892011-12-12 12:36:37 +0000764
Paul Mackerras4cf302b2011-12-12 12:38:51 +0000765 /* For not found, if the HPTE is valid by now, retry the instruction */
766 if ((status & DSISR_NOHPTE) && (v & HPTE_V_VALID))
Paul Mackerras697d3892011-12-12 12:36:37 +0000767 return 0;
768
769 /* Check access permissions to the page */
770 pp = gr & (HPTE_R_PP0 | HPTE_R_PP);
771 key = (vcpu->arch.shregs.msr & MSR_PR) ? SLB_VSID_KP : SLB_VSID_KS;
Paul Mackerras342d3db2011-12-12 12:38:05 +0000772 status &= ~DSISR_NOHPTE; /* DSISR_NOHPTE == SRR1_ISI_NOPT */
773 if (!data) {
774 if (gr & (HPTE_R_N | HPTE_R_G))
775 return status | SRR1_ISI_N_OR_G;
776 if (!hpte_read_permission(pp, slb_v & key))
777 return status | SRR1_ISI_PROT;
778 } else if (status & DSISR_ISSTORE) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000779 /* check write permission */
780 if (!hpte_write_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000781 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000782 } else {
783 if (!hpte_read_permission(pp, slb_v & key))
Paul Mackerras342d3db2011-12-12 12:38:05 +0000784 return status | DSISR_PROTFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000785 }
786
787 /* Check storage key, if applicable */
Paul Mackerras342d3db2011-12-12 12:38:05 +0000788 if (data && (vcpu->arch.shregs.msr & MSR_DR)) {
Paul Mackerras697d3892011-12-12 12:36:37 +0000789 unsigned int perm = hpte_get_skey_perm(gr, vcpu->arch.amr);
790 if (status & DSISR_ISSTORE)
791 perm >>= 1;
792 if (perm & 1)
Paul Mackerras342d3db2011-12-12 12:38:05 +0000793 return status | DSISR_KEYFAULT;
Paul Mackerras697d3892011-12-12 12:36:37 +0000794 }
795
796 /* Save HPTE info for virtual-mode handler */
797 vcpu->arch.pgfault_addr = addr;
798 vcpu->arch.pgfault_index = index;
799 vcpu->arch.pgfault_hpte[0] = v;
800 vcpu->arch.pgfault_hpte[1] = r;
801
Paul Mackerras342d3db2011-12-12 12:38:05 +0000802 /* Check the storage key to see if it is possibly emulated MMIO */
803 if (data && (vcpu->arch.shregs.msr & MSR_IR) &&
804 (r & (HPTE_R_KEY_HI | HPTE_R_KEY_LO)) ==
805 (HPTE_R_KEY_HI | HPTE_R_KEY_LO))
Paul Mackerras697d3892011-12-12 12:36:37 +0000806 return -2; /* MMIO emulation - load instr word */
807
808 return -1; /* send fault up to host kernel mode */
Paul Mackerras697d3892011-12-12 12:36:37 +0000809}