blob: 4ccdde152c371d09a3a7ab8740d9c81d7e7894e5 [file] [log] [blame]
Alexander Graf0d8dc682009-10-30 05:47:11 +00001/*
2 * Copyright (C) 2009 SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 *
8 * This program is free software; you can redistribute it and/or modify
9 * it under the terms of the GNU General Public License, version 2, as
10 * published by the Free Software Foundation.
11 *
12 * This program is distributed in the hope that it will be useful,
13 * but WITHOUT ANY WARRANTY; without even the implied warranty of
14 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
15 * GNU General Public License for more details.
16 *
17 * You should have received a copy of the GNU General Public License
18 * along with this program; if not, write to the Free Software
19 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
20 */
21
22#include <linux/kvm_host.h>
Alexander Graf3b249152010-06-21 15:25:19 +020023#include <linux/hash.h>
Alexander Graf0d8dc682009-10-30 05:47:11 +000024
25#include <asm/kvm_ppc.h>
26#include <asm/kvm_book3s.h>
27#include <asm/mmu-hash64.h>
28#include <asm/machdep.h>
29#include <asm/mmu_context.h>
30#include <asm/hw_irq.h>
31
32#define PTE_SIZE 12
33#define VSID_ALL 0
34
35/* #define DEBUG_MMU */
36/* #define DEBUG_SLB */
37
38#ifdef DEBUG_MMU
39#define dprintk_mmu(a, ...) printk(KERN_INFO a, __VA_ARGS__)
40#else
41#define dprintk_mmu(a, ...) do { } while(0)
42#endif
43
44#ifdef DEBUG_SLB
45#define dprintk_slb(a, ...) printk(KERN_INFO a, __VA_ARGS__)
46#else
47#define dprintk_slb(a, ...) do { } while(0)
48#endif
49
50static void invalidate_pte(struct hpte_cache *pte)
51{
Alexander Graf5156f272010-04-20 02:49:52 +020052 dprintk_mmu("KVM: Flushing SPT: 0x%lx (0x%llx) -> 0x%llx\n",
53 pte->pte.eaddr, pte->pte.vpage, pte->host_va);
Alexander Graf0d8dc682009-10-30 05:47:11 +000054
55 ppc_md.hpte_invalidate(pte->slot, pte->host_va,
56 MMU_PAGE_4K, MMU_SEGSIZE_256M,
57 false);
58 pte->host_va = 0;
Alexander Graf33fd27c2010-04-16 00:11:49 +020059
60 if (pte->pte.may_write)
61 kvm_release_pfn_dirty(pte->pfn);
62 else
63 kvm_release_pfn_clean(pte->pfn);
Alexander Graf0d8dc682009-10-30 05:47:11 +000064}
65
Alexander Grafaf7b4d12010-04-20 02:49:46 +020066void kvmppc_mmu_pte_flush(struct kvm_vcpu *vcpu, ulong guest_ea, ulong ea_mask)
Alexander Graf0d8dc682009-10-30 05:47:11 +000067{
68 int i;
69
Alexander Graf5156f272010-04-20 02:49:52 +020070 dprintk_mmu("KVM: Flushing %d Shadow PTEs: 0x%lx & 0x%lx\n",
Alexander Graf0d8dc682009-10-30 05:47:11 +000071 vcpu->arch.hpte_cache_offset, guest_ea, ea_mask);
72 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
73
74 guest_ea &= ea_mask;
75 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
76 struct hpte_cache *pte;
77
78 pte = &vcpu->arch.hpte_cache[i];
79 if (!pte->host_va)
80 continue;
81
82 if ((pte->pte.eaddr & ea_mask) == guest_ea) {
83 invalidate_pte(pte);
84 }
85 }
86
87 /* Doing a complete flush -> start from scratch */
88 if (!ea_mask)
89 vcpu->arch.hpte_cache_offset = 0;
90}
91
92void kvmppc_mmu_pte_vflush(struct kvm_vcpu *vcpu, u64 guest_vp, u64 vp_mask)
93{
94 int i;
95
96 dprintk_mmu("KVM: Flushing %d Shadow vPTEs: 0x%llx & 0x%llx\n",
97 vcpu->arch.hpte_cache_offset, guest_vp, vp_mask);
98 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
99
100 guest_vp &= vp_mask;
101 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
102 struct hpte_cache *pte;
103
104 pte = &vcpu->arch.hpte_cache[i];
105 if (!pte->host_va)
106 continue;
107
108 if ((pte->pte.vpage & vp_mask) == guest_vp) {
109 invalidate_pte(pte);
110 }
111 }
112}
113
Alexander Grafaf7b4d12010-04-20 02:49:46 +0200114void kvmppc_mmu_pte_pflush(struct kvm_vcpu *vcpu, ulong pa_start, ulong pa_end)
Alexander Graf0d8dc682009-10-30 05:47:11 +0000115{
116 int i;
117
Alexander Graf5156f272010-04-20 02:49:52 +0200118 dprintk_mmu("KVM: Flushing %d Shadow pPTEs: 0x%lx & 0x%lx\n",
119 vcpu->arch.hpte_cache_offset, pa_start, pa_end);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000120 BUG_ON(vcpu->arch.hpte_cache_offset > HPTEG_CACHE_NUM);
121
122 for (i = 0; i < vcpu->arch.hpte_cache_offset; i++) {
123 struct hpte_cache *pte;
124
125 pte = &vcpu->arch.hpte_cache[i];
126 if (!pte->host_va)
127 continue;
128
129 if ((pte->pte.raddr >= pa_start) &&
130 (pte->pte.raddr < pa_end)) {
131 invalidate_pte(pte);
132 }
133 }
134}
135
Alexander Graf0d8dc682009-10-30 05:47:11 +0000136static int kvmppc_mmu_hpte_cache_next(struct kvm_vcpu *vcpu)
137{
138 if (vcpu->arch.hpte_cache_offset == HPTEG_CACHE_NUM)
139 kvmppc_mmu_pte_flush(vcpu, 0, 0);
140
141 return vcpu->arch.hpte_cache_offset++;
142}
143
144/* We keep 512 gvsid->hvsid entries, mapping the guest ones to the array using
145 * a hash, so we don't waste cycles on looping */
146static u16 kvmppc_sid_hash(struct kvm_vcpu *vcpu, u64 gvsid)
147{
Alexander Graf3b249152010-06-21 15:25:19 +0200148 return hash_64(gvsid, SID_MAP_BITS);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000149}
150
Alexander Graf0d8dc682009-10-30 05:47:11 +0000151static struct kvmppc_sid_map *find_sid_vsid(struct kvm_vcpu *vcpu, u64 gvsid)
152{
153 struct kvmppc_sid_map *map;
154 u16 sid_map_mask;
155
156 if (vcpu->arch.msr & MSR_PR)
157 gvsid |= VSID_PR;
158
159 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
160 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
161 if (map->guest_vsid == gvsid) {
Alexander Graf5156f272010-04-20 02:49:52 +0200162 dprintk_slb("SLB: Searching: 0x%llx -> 0x%llx\n",
Alexander Graf0d8dc682009-10-30 05:47:11 +0000163 gvsid, map->host_vsid);
164 return map;
165 }
166
167 map = &to_book3s(vcpu)->sid_map[SID_MAP_MASK - sid_map_mask];
168 if (map->guest_vsid == gvsid) {
169 dprintk_slb("SLB: Searching 0x%llx -> 0x%llx\n",
170 gvsid, map->host_vsid);
171 return map;
172 }
173
Alexander Graf5156f272010-04-20 02:49:52 +0200174 dprintk_slb("SLB: Searching %d/%d: 0x%llx -> not found\n",
175 sid_map_mask, SID_MAP_MASK - sid_map_mask, gvsid);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000176 return NULL;
177}
178
179int kvmppc_mmu_map_page(struct kvm_vcpu *vcpu, struct kvmppc_pte *orig_pte)
180{
181 pfn_t hpaddr;
182 ulong hash, hpteg, va;
183 u64 vsid;
184 int ret;
185 int rflags = 0x192;
186 int vflags = 0;
187 int attempt = 0;
188 struct kvmppc_sid_map *map;
189
190 /* Get host physical address for gpa */
191 hpaddr = gfn_to_pfn(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
192 if (kvm_is_error_hva(hpaddr)) {
Alexander Grafaf7b4d12010-04-20 02:49:46 +0200193 printk(KERN_INFO "Couldn't get guest page for gfn %lx!\n", orig_pte->eaddr);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000194 return -EINVAL;
195 }
196 hpaddr <<= PAGE_SHIFT;
197#if PAGE_SHIFT == 12
198#elif PAGE_SHIFT == 16
199 hpaddr |= orig_pte->raddr & 0xf000;
200#else
201#error Unknown page size
202#endif
203
204 /* and write the mapping ea -> hpa into the pt */
205 vcpu->arch.mmu.esid_to_vsid(vcpu, orig_pte->eaddr >> SID_SHIFT, &vsid);
206 map = find_sid_vsid(vcpu, vsid);
207 if (!map) {
Alexander Grafac214672010-04-20 02:49:50 +0200208 ret = kvmppc_mmu_map_segment(vcpu, orig_pte->eaddr);
209 WARN_ON(ret < 0);
Alexander Graf0d8dc682009-10-30 05:47:11 +0000210 map = find_sid_vsid(vcpu, vsid);
211 }
Alexander Grafac214672010-04-20 02:49:50 +0200212 if (!map) {
213 printk(KERN_ERR "KVM: Segment map for 0x%llx (0x%lx) failed\n",
214 vsid, orig_pte->eaddr);
215 WARN_ON(true);
216 return -EINVAL;
217 }
Alexander Graf0d8dc682009-10-30 05:47:11 +0000218
219 vsid = map->host_vsid;
220 va = hpt_va(orig_pte->eaddr, vsid, MMU_SEGSIZE_256M);
221
222 if (!orig_pte->may_write)
223 rflags |= HPTE_R_PP;
224 else
225 mark_page_dirty(vcpu->kvm, orig_pte->raddr >> PAGE_SHIFT);
226
227 if (!orig_pte->may_execute)
228 rflags |= HPTE_R_N;
229
230 hash = hpt_hash(va, PTE_SIZE, MMU_SEGSIZE_256M);
231
232map_again:
233 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
234
235 /* In case we tried normal mapping already, let's nuke old entries */
236 if (attempt > 1)
237 if (ppc_md.hpte_remove(hpteg) < 0)
238 return -1;
239
240 ret = ppc_md.hpte_insert(hpteg, va, hpaddr, rflags, vflags, MMU_PAGE_4K, MMU_SEGSIZE_256M);
241
242 if (ret < 0) {
243 /* If we couldn't map a primary PTE, try a secondary */
Alexander Graf0d8dc682009-10-30 05:47:11 +0000244 hash = ~hash;
Alexander Graf20a340a2010-02-19 11:00:46 +0100245 vflags ^= HPTE_V_SECONDARY;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000246 attempt++;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000247 goto map_again;
248 } else {
249 int hpte_id = kvmppc_mmu_hpte_cache_next(vcpu);
250 struct hpte_cache *pte = &vcpu->arch.hpte_cache[hpte_id];
251
Alexander Graf5156f272010-04-20 02:49:52 +0200252 dprintk_mmu("KVM: %c%c Map 0x%lx: [%lx] 0x%lx (0x%llx) -> %lx\n",
Alexander Graf0d8dc682009-10-30 05:47:11 +0000253 ((rflags & HPTE_R_PP) == 3) ? '-' : 'w',
254 (rflags & HPTE_R_N) ? '-' : 'x',
255 orig_pte->eaddr, hpteg, va, orig_pte->vpage, hpaddr);
256
Alexander Grafa1eda282010-03-24 21:48:34 +0100257 /* The ppc_md code may give us a secondary entry even though we
258 asked for a primary. Fix up. */
259 if ((ret & _PTEIDX_SECONDARY) && !(vflags & HPTE_V_SECONDARY)) {
260 hash = ~hash;
261 hpteg = ((hash & htab_hash_mask) * HPTES_PER_GROUP);
262 }
263
Alexander Graf0d8dc682009-10-30 05:47:11 +0000264 pte->slot = hpteg + (ret & 7);
265 pte->host_va = va;
266 pte->pte = *orig_pte;
267 pte->pfn = hpaddr >> PAGE_SHIFT;
268 }
269
270 return 0;
271}
272
273static struct kvmppc_sid_map *create_sid_map(struct kvm_vcpu *vcpu, u64 gvsid)
274{
275 struct kvmppc_sid_map *map;
276 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
277 u16 sid_map_mask;
278 static int backwards_map = 0;
279
280 if (vcpu->arch.msr & MSR_PR)
281 gvsid |= VSID_PR;
282
283 /* We might get collisions that trap in preceding order, so let's
284 map them differently */
285
286 sid_map_mask = kvmppc_sid_hash(vcpu, gvsid);
287 if (backwards_map)
288 sid_map_mask = SID_MAP_MASK - sid_map_mask;
289
290 map = &to_book3s(vcpu)->sid_map[sid_map_mask];
291
292 /* Make sure we're taking the other map next time */
293 backwards_map = !backwards_map;
294
295 /* Uh-oh ... out of mappings. Let's flush! */
296 if (vcpu_book3s->vsid_next == vcpu_book3s->vsid_max) {
297 vcpu_book3s->vsid_next = vcpu_book3s->vsid_first;
298 memset(vcpu_book3s->sid_map, 0,
299 sizeof(struct kvmppc_sid_map) * SID_MAP_NUM);
300 kvmppc_mmu_pte_flush(vcpu, 0, 0);
301 kvmppc_mmu_flush_segments(vcpu);
302 }
303 map->host_vsid = vcpu_book3s->vsid_next++;
304
305 map->guest_vsid = gvsid;
306 map->valid = true;
307
Alexander Graf5156f272010-04-20 02:49:52 +0200308 dprintk_slb("SLB: New mapping at %d: 0x%llx -> 0x%llx\n",
309 sid_map_mask, gvsid, map->host_vsid);
310
Alexander Graf0d8dc682009-10-30 05:47:11 +0000311 return map;
312}
313
314static int kvmppc_mmu_next_segment(struct kvm_vcpu *vcpu, ulong esid)
315{
316 int i;
317 int max_slb_size = 64;
318 int found_inval = -1;
319 int r;
320
Alexander Grafc7f38f42010-04-16 00:11:40 +0200321 if (!to_svcpu(vcpu)->slb_max)
322 to_svcpu(vcpu)->slb_max = 1;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000323
324 /* Are we overwriting? */
Alexander Grafc7f38f42010-04-16 00:11:40 +0200325 for (i = 1; i < to_svcpu(vcpu)->slb_max; i++) {
326 if (!(to_svcpu(vcpu)->slb[i].esid & SLB_ESID_V))
Alexander Graf0d8dc682009-10-30 05:47:11 +0000327 found_inval = i;
Alexander Grafc7f38f42010-04-16 00:11:40 +0200328 else if ((to_svcpu(vcpu)->slb[i].esid & ESID_MASK) == esid)
Alexander Graf0d8dc682009-10-30 05:47:11 +0000329 return i;
330 }
331
332 /* Found a spare entry that was invalidated before */
333 if (found_inval > 0)
334 return found_inval;
335
336 /* No spare invalid entry, so create one */
337
338 if (mmu_slb_size < 64)
339 max_slb_size = mmu_slb_size;
340
341 /* Overflowing -> purge */
Alexander Grafc7f38f42010-04-16 00:11:40 +0200342 if ((to_svcpu(vcpu)->slb_max) == max_slb_size)
Alexander Graf0d8dc682009-10-30 05:47:11 +0000343 kvmppc_mmu_flush_segments(vcpu);
344
Alexander Grafc7f38f42010-04-16 00:11:40 +0200345 r = to_svcpu(vcpu)->slb_max;
346 to_svcpu(vcpu)->slb_max++;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000347
348 return r;
349}
350
351int kvmppc_mmu_map_segment(struct kvm_vcpu *vcpu, ulong eaddr)
352{
353 u64 esid = eaddr >> SID_SHIFT;
354 u64 slb_esid = (eaddr & ESID_MASK) | SLB_ESID_V;
355 u64 slb_vsid = SLB_VSID_USER;
356 u64 gvsid;
357 int slb_index;
358 struct kvmppc_sid_map *map;
359
360 slb_index = kvmppc_mmu_next_segment(vcpu, eaddr & ESID_MASK);
361
362 if (vcpu->arch.mmu.esid_to_vsid(vcpu, esid, &gvsid)) {
363 /* Invalidate an entry */
Alexander Grafc7f38f42010-04-16 00:11:40 +0200364 to_svcpu(vcpu)->slb[slb_index].esid = 0;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000365 return -ENOENT;
366 }
367
368 map = find_sid_vsid(vcpu, gvsid);
369 if (!map)
370 map = create_sid_map(vcpu, gvsid);
371
372 map->guest_esid = esid;
373
374 slb_vsid |= (map->host_vsid << 12);
375 slb_vsid &= ~SLB_VSID_KP;
376 slb_esid |= slb_index;
377
Alexander Grafc7f38f42010-04-16 00:11:40 +0200378 to_svcpu(vcpu)->slb[slb_index].esid = slb_esid;
379 to_svcpu(vcpu)->slb[slb_index].vsid = slb_vsid;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000380
381 dprintk_slb("slbmte %#llx, %#llx\n", slb_vsid, slb_esid);
382
383 return 0;
384}
385
386void kvmppc_mmu_flush_segments(struct kvm_vcpu *vcpu)
387{
Alexander Grafc7f38f42010-04-16 00:11:40 +0200388 to_svcpu(vcpu)->slb_max = 1;
389 to_svcpu(vcpu)->slb[0].esid = 0;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000390}
391
392void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
393{
394 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf9cc5e952010-04-16 00:11:45 +0200395 __destroy_context(to_book3s(vcpu)->context_id);
396}
397
398int kvmppc_mmu_init(struct kvm_vcpu *vcpu)
399{
400 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
401 int err;
402
403 err = __init_new_context();
404 if (err < 0)
405 return -1;
406 vcpu3s->context_id = err;
407
408 vcpu3s->vsid_max = ((vcpu3s->context_id + 1) << USER_ESID_BITS) - 1;
409 vcpu3s->vsid_first = vcpu3s->context_id << USER_ESID_BITS;
410 vcpu3s->vsid_next = vcpu3s->vsid_first;
411
412 return 0;
Alexander Graf0d8dc682009-10-30 05:47:11 +0000413}