blob: 4fa73c3f571331d43e343d6f4b17d9734a6befda [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Paul Mackerrasdeb26c22013-02-04 18:11:44 +000038#include <asm/hvcall.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
43
44#include "trace.h"
45
46/* #define EXIT_DEBUG */
47/* #define DEBUG_EXT */
48
49static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
50 ulong msr);
51
52/* Some compatibility defines */
53#ifdef CONFIG_PPC_BOOK3S_32
54#define MSR_USER32 MSR_USER
55#define MSR_USER64 MSR_USER
56#define HW_PAGE_SIZE PAGE_SIZE
57#endif
58
59void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
60{
61#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010062 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
63 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010064 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
65 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000066#endif
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000067 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000068#ifdef CONFIG_PPC_BOOK3S_32
69 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
70#endif
71}
72
73void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
74{
75#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010076 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
77 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010078 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
79 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000080#endif
81
Paul Mackerras28c483b2012-11-04 18:16:46 +000082 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000083 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000084}
85
Paul Mackerrasa2d56022013-09-20 14:52:43 +100086/* Copy data needed by real-mode code from vcpu to shadow vcpu */
87void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
88 struct kvm_vcpu *vcpu)
89{
90 svcpu->gpr[0] = vcpu->arch.gpr[0];
91 svcpu->gpr[1] = vcpu->arch.gpr[1];
92 svcpu->gpr[2] = vcpu->arch.gpr[2];
93 svcpu->gpr[3] = vcpu->arch.gpr[3];
94 svcpu->gpr[4] = vcpu->arch.gpr[4];
95 svcpu->gpr[5] = vcpu->arch.gpr[5];
96 svcpu->gpr[6] = vcpu->arch.gpr[6];
97 svcpu->gpr[7] = vcpu->arch.gpr[7];
98 svcpu->gpr[8] = vcpu->arch.gpr[8];
99 svcpu->gpr[9] = vcpu->arch.gpr[9];
100 svcpu->gpr[10] = vcpu->arch.gpr[10];
101 svcpu->gpr[11] = vcpu->arch.gpr[11];
102 svcpu->gpr[12] = vcpu->arch.gpr[12];
103 svcpu->gpr[13] = vcpu->arch.gpr[13];
104 svcpu->cr = vcpu->arch.cr;
105 svcpu->xer = vcpu->arch.xer;
106 svcpu->ctr = vcpu->arch.ctr;
107 svcpu->lr = vcpu->arch.lr;
108 svcpu->pc = vcpu->arch.pc;
109}
110
111/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
112void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
113 struct kvmppc_book3s_shadow_vcpu *svcpu)
114{
115 vcpu->arch.gpr[0] = svcpu->gpr[0];
116 vcpu->arch.gpr[1] = svcpu->gpr[1];
117 vcpu->arch.gpr[2] = svcpu->gpr[2];
118 vcpu->arch.gpr[3] = svcpu->gpr[3];
119 vcpu->arch.gpr[4] = svcpu->gpr[4];
120 vcpu->arch.gpr[5] = svcpu->gpr[5];
121 vcpu->arch.gpr[6] = svcpu->gpr[6];
122 vcpu->arch.gpr[7] = svcpu->gpr[7];
123 vcpu->arch.gpr[8] = svcpu->gpr[8];
124 vcpu->arch.gpr[9] = svcpu->gpr[9];
125 vcpu->arch.gpr[10] = svcpu->gpr[10];
126 vcpu->arch.gpr[11] = svcpu->gpr[11];
127 vcpu->arch.gpr[12] = svcpu->gpr[12];
128 vcpu->arch.gpr[13] = svcpu->gpr[13];
129 vcpu->arch.cr = svcpu->cr;
130 vcpu->arch.xer = svcpu->xer;
131 vcpu->arch.ctr = svcpu->ctr;
132 vcpu->arch.lr = svcpu->lr;
133 vcpu->arch.pc = svcpu->pc;
134 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
135 vcpu->arch.fault_dar = svcpu->fault_dar;
136 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
137 vcpu->arch.last_inst = svcpu->last_inst;
138}
139
Alexander Graf7c973a22012-08-13 12:50:35 +0200140int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200141{
Alexander Graf7c973a22012-08-13 12:50:35 +0200142 int r = 1; /* Indicate we want to get back into the guest */
143
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200144 /* We misuse TLB_FLUSH to indicate that we want to clear
145 all shadow cache entries */
146 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
147 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200148
149 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200150}
151
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200152/************* MMU Notifiers *************/
153
154int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
155{
156 trace_kvm_unmap_hva(hva);
157
158 /*
159 * Flush all shadow tlb entries everywhere. This is slow, but
160 * we are 100% sure that we catch the to be unmapped page
161 */
162 kvm_flush_remote_tlbs(kvm);
163
164 return 0;
165}
166
167int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
168{
169 /* kvm_unmap_hva flushes everything anyways */
170 kvm_unmap_hva(kvm, start);
171
172 return 0;
173}
174
175int kvm_age_hva(struct kvm *kvm, unsigned long hva)
176{
177 /* XXX could be more clever ;) */
178 return 0;
179}
180
181int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
182{
183 /* XXX could be more clever ;) */
184 return 0;
185}
186
187void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
188{
189 /* The page will get remapped properly on its next fault */
190 kvm_unmap_hva(kvm, hva);
191}
192
193/*****************************************/
194
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000195static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
196{
197 ulong smsr = vcpu->arch.shared->msr;
198
199 /* Guest MSR values */
Paul Mackerras3a2e7b02012-11-04 18:17:28 +0000200 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000201 /* Process MSR values */
202 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
203 /* External providers the guest reserved */
204 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
205 /* 64-bit Process MSR values */
206#ifdef CONFIG_PPC_BOOK3S_64
207 smsr |= MSR_ISF | MSR_HV;
208#endif
209 vcpu->arch.shadow_msr = smsr;
210}
211
212void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
213{
214 ulong old_msr = vcpu->arch.shared->msr;
215
216#ifdef EXIT_DEBUG
217 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
218#endif
219
220 msr &= to_book3s(vcpu)->msr_mask;
221 vcpu->arch.shared->msr = msr;
222 kvmppc_recalc_shadow_msr(vcpu);
223
224 if (msr & MSR_POW) {
225 if (!vcpu->arch.pending_exceptions) {
226 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100227 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000228 vcpu->stat.halt_wakeup++;
229
230 /* Unset POW bit after we woke up */
231 msr &= ~MSR_POW;
232 vcpu->arch.shared->msr = msr;
233 }
234 }
235
236 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
237 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
238 kvmppc_mmu_flush_segments(vcpu);
239 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
240
241 /* Preload magic page segment when in kernel mode */
242 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
243 struct kvm_vcpu_arch *a = &vcpu->arch;
244
245 if (msr & MSR_DR)
246 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
247 else
248 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
249 }
250 }
251
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000252 /*
253 * When switching from 32 to 64-bit, we may have a stale 32-bit
254 * magic page around, we need to flush it. Typically 32-bit magic
255 * page will be instanciated when calling into RTAS. Note: We
256 * assume that such transition only happens while in kernel mode,
257 * ie, we never transition from user 32-bit to kernel 64-bit with
258 * a 32-bit magic page around.
259 */
260 if (vcpu->arch.magic_page_pa &&
261 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
262 /* going from RTAS to normal kernel code */
263 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
264 ~0xFFFUL);
265 }
266
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000267 /* Preload FPU if it's enabled */
268 if (vcpu->arch.shared->msr & MSR_FP)
269 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
270}
271
272void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
273{
274 u32 host_pvr;
275
276 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
277 vcpu->arch.pvr = pvr;
278#ifdef CONFIG_PPC_BOOK3S_64
279 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
280 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200281 if (!to_book3s(vcpu)->hior_explicit)
282 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000283 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200284 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000285 } else
286#endif
287 {
288 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200289 if (!to_book3s(vcpu)->hior_explicit)
290 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000291 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200292 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000293 }
294
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200295 kvmppc_sanity_check(vcpu);
296
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000297 /* If we are in hypervisor level on 970, we can tell the CPU to
298 * treat DCBZ as 32 bytes store */
299 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
300 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
301 !strcmp(cur_cpu_spec->platform, "ppc970"))
302 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
303
304 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
305 really needs them in a VM on Cell and force disable them. */
306 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
307 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
308
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000309 /*
310 * If they're asking for POWER6 or later, set the flag
311 * indicating that we can do multiple large page sizes
312 * and 1TB segments.
313 * Also set the flag that indicates that tlbie has the large
314 * page bit in the RB operand instead of the instruction.
315 */
316 switch (PVR_VER(pvr)) {
317 case PVR_POWER6:
318 case PVR_POWER7:
319 case PVR_POWER7p:
320 case PVR_POWER8:
321 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
322 BOOK3S_HFLAG_NEW_TLBIE;
323 break;
324 }
325
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000326#ifdef CONFIG_PPC_BOOK3S_32
327 /* 32 bit Book3S always has 32 byte dcbz */
328 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
329#endif
330
331 /* On some CPUs we can execute paired single operations natively */
332 asm ( "mfpvr %0" : "=r"(host_pvr));
333 switch (host_pvr) {
334 case 0x00080200: /* lonestar 2.0 */
335 case 0x00088202: /* lonestar 2.2 */
336 case 0x70000100: /* gekko 1.0 */
337 case 0x00080100: /* gekko 2.0 */
338 case 0x00083203: /* gekko 2.3a */
339 case 0x00083213: /* gekko 2.3b */
340 case 0x00083204: /* gekko 2.4 */
341 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
342 case 0x00087200: /* broadway */
343 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
344 /* Enable HID2.PSE - in case we need it later */
345 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
346 }
347}
348
349/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
350 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
351 * emulate 32 bytes dcbz length.
352 *
353 * The Book3s_64 inventors also realized this case and implemented a special bit
354 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
355 *
356 * My approach here is to patch the dcbz instruction on executing pages.
357 */
358static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
359{
360 struct page *hpage;
361 u64 hpage_offset;
362 u32 *page;
363 int i;
364
365 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800366 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000367 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000368
369 hpage_offset = pte->raddr & ~PAGE_MASK;
370 hpage_offset &= ~0xFFFULL;
371 hpage_offset /= 4;
372
373 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800374 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000375
376 /* patch dcbz into reserved instruction, so we trap */
377 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
378 if ((page[i] & 0xff0007ff) == INS_DCBZ)
379 page[i] &= 0xfffffff7;
380
Cong Wang2480b202011-11-25 23:14:16 +0800381 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000382 put_page(hpage);
383}
384
385static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
386{
387 ulong mp_pa = vcpu->arch.magic_page_pa;
388
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000389 if (!(vcpu->arch.shared->msr & MSR_SF))
390 mp_pa = (uint32_t)mp_pa;
391
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000392 if (unlikely(mp_pa) &&
393 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
394 return 1;
395 }
396
397 return kvm_is_visible_gfn(vcpu->kvm, gfn);
398}
399
400int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
401 ulong eaddr, int vec)
402{
403 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
404 int r = RESUME_GUEST;
405 int relocated;
406 int page_found = 0;
407 struct kvmppc_pte pte;
408 bool is_mmio = false;
409 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
410 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
411 u64 vsid;
412
413 relocated = data ? dr : ir;
414
415 /* Resolve real address if translation turned on */
416 if (relocated) {
417 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
418 } else {
419 pte.may_execute = true;
420 pte.may_read = true;
421 pte.may_write = true;
422 pte.raddr = eaddr & KVM_PAM;
423 pte.eaddr = eaddr;
424 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000425 pte.page_size = MMU_PAGE_64K;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000426 }
427
428 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
429 case 0:
430 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
431 break;
432 case MSR_DR:
433 case MSR_IR:
434 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
435
436 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
437 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
438 else
439 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
440 pte.vpage |= vsid;
441
442 if (vsid == -1)
443 page_found = -EINVAL;
444 break;
445 }
446
447 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
448 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
449 /*
450 * If we do the dcbz hack, we have to NX on every execution,
451 * so we can patch the executing code. This renders our guest
452 * NX-less.
453 */
454 pte.may_execute = !data;
455 }
456
457 if (page_found == -ENOENT) {
458 /* Page not found in guest PTE entries */
459 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000460 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000461 vcpu->arch.shared->msr |=
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000462 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000463 kvmppc_book3s_queue_irqprio(vcpu, vec);
464 } else if (page_found == -EPERM) {
465 /* Storage protection */
466 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000467 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000468 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
469 vcpu->arch.shared->msr |=
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000470 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000471 kvmppc_book3s_queue_irqprio(vcpu, vec);
472 } else if (page_found == -EINVAL) {
473 /* Page not found in guest SLB */
474 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
475 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
476 } else if (!is_mmio &&
477 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
478 /* The guest's PTE is not mapped yet. Map on the host */
479 kvmppc_mmu_map_page(vcpu, &pte);
480 if (data)
481 vcpu->stat.sp_storage++;
482 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
483 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
484 kvmppc_patch_dcbz(vcpu, &pte);
485 } else {
486 /* MMIO */
487 vcpu->stat.mmio_exits++;
488 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100489 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000490 r = kvmppc_emulate_mmio(run, vcpu);
491 if ( r == RESUME_HOST_NV )
492 r = RESUME_HOST;
493 }
494
495 return r;
496}
497
498static inline int get_fpr_index(int i)
499{
Paul Mackerras28c483b2012-11-04 18:16:46 +0000500 return i * TS_FPRWIDTH;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000501}
502
503/* Give up external provider (FPU, Altivec, VSX) */
504void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
505{
506 struct thread_struct *t = &current->thread;
507 u64 *vcpu_fpr = vcpu->arch.fpr;
508#ifdef CONFIG_VSX
509 u64 *vcpu_vsx = vcpu->arch.vsr;
510#endif
511 u64 *thread_fpr = (u64*)t->fpr;
512 int i;
513
Paul Mackerras28c483b2012-11-04 18:16:46 +0000514 /*
515 * VSX instructions can access FP and vector registers, so if
516 * we are giving up VSX, make sure we give up FP and VMX as well.
517 */
518 if (msr & MSR_VSX)
519 msr |= MSR_FP | MSR_VEC;
520
521 msr &= vcpu->arch.guest_owned_ext;
522 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000523 return;
524
525#ifdef DEBUG_EXT
526 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
527#endif
528
Paul Mackerras28c483b2012-11-04 18:16:46 +0000529 if (msr & MSR_FP) {
530 /*
531 * Note that on CPUs with VSX, giveup_fpu stores
532 * both the traditional FP registers and the added VSX
533 * registers into thread.fpr[].
534 */
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000535 if (current->thread.regs->msr & MSR_FP)
536 giveup_fpu(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000537 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
538 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
539
540 vcpu->arch.fpscr = t->fpscr.val;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000541
542#ifdef CONFIG_VSX
543 if (cpu_has_feature(CPU_FTR_VSX))
544 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
545 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
546#endif
547 }
548
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000549#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000550 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000551 if (current->thread.regs->msr & MSR_VEC)
552 giveup_altivec(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000553 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
554 vcpu->arch.vscr = t->vscr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000555 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000556#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000557
Paul Mackerras28c483b2012-11-04 18:16:46 +0000558 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000559 kvmppc_recalc_shadow_msr(vcpu);
560}
561
562static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
563{
564 ulong srr0 = kvmppc_get_pc(vcpu);
565 u32 last_inst = kvmppc_get_last_inst(vcpu);
566 int ret;
567
568 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
569 if (ret == -ENOENT) {
570 ulong msr = vcpu->arch.shared->msr;
571
572 msr = kvmppc_set_field(msr, 33, 33, 1);
573 msr = kvmppc_set_field(msr, 34, 36, 0);
574 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
575 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
576 return EMULATE_AGAIN;
577 }
578
579 return EMULATE_DONE;
580}
581
582static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
583{
584
585 /* Need to do paired single emulation? */
586 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
587 return EMULATE_DONE;
588
589 /* Read out the instruction */
590 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
591 /* Need to emulate */
592 return EMULATE_FAIL;
593
594 return EMULATE_AGAIN;
595}
596
597/* Handle external providers (FPU, Altivec, VSX) */
598static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
599 ulong msr)
600{
601 struct thread_struct *t = &current->thread;
602 u64 *vcpu_fpr = vcpu->arch.fpr;
603#ifdef CONFIG_VSX
604 u64 *vcpu_vsx = vcpu->arch.vsr;
605#endif
606 u64 *thread_fpr = (u64*)t->fpr;
607 int i;
608
609 /* When we have paired singles, we emulate in software */
610 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
611 return RESUME_GUEST;
612
613 if (!(vcpu->arch.shared->msr & msr)) {
614 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
615 return RESUME_GUEST;
616 }
617
Paul Mackerras28c483b2012-11-04 18:16:46 +0000618 if (msr == MSR_VSX) {
619 /* No VSX? Give an illegal instruction interrupt */
620#ifdef CONFIG_VSX
621 if (!cpu_has_feature(CPU_FTR_VSX))
622#endif
623 {
624 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
625 return RESUME_GUEST;
626 }
627
628 /*
629 * We have to load up all the FP and VMX registers before
630 * we can let the guest use VSX instructions.
631 */
632 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000633 }
634
Paul Mackerras28c483b2012-11-04 18:16:46 +0000635 /* See if we already own all the ext(s) needed */
636 msr &= ~vcpu->arch.guest_owned_ext;
637 if (!msr)
638 return RESUME_GUEST;
639
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000640#ifdef DEBUG_EXT
641 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
642#endif
643
Paul Mackerras28c483b2012-11-04 18:16:46 +0000644 if (msr & MSR_FP) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000645 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
646 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
Paul Mackerras28c483b2012-11-04 18:16:46 +0000647#ifdef CONFIG_VSX
648 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
649 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
650#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000651 t->fpscr.val = vcpu->arch.fpscr;
652 t->fpexc_mode = 0;
653 kvmppc_load_up_fpu();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000654 }
655
656 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000657#ifdef CONFIG_ALTIVEC
658 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
659 t->vscr = vcpu->arch.vscr;
660 t->vrsave = -1;
661 kvmppc_load_up_altivec();
662#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000663 }
664
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000665 current->thread.regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000666 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000667 kvmppc_recalc_shadow_msr(vcpu);
668
669 return RESUME_GUEST;
670}
671
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000672/*
673 * Kernel code using FP or VMX could have flushed guest state to
674 * the thread_struct; if so, get it back now.
675 */
676static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
677{
678 unsigned long lost_ext;
679
680 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
681 if (!lost_ext)
682 return;
683
684 if (lost_ext & MSR_FP)
685 kvmppc_load_up_fpu();
Paul Mackerrasf2481772013-09-20 14:52:42 +1000686#ifdef CONFIG_ALTIVEC
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000687 if (lost_ext & MSR_VEC)
688 kvmppc_load_up_altivec();
Paul Mackerrasf2481772013-09-20 14:52:42 +1000689#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000690 current->thread.regs->msr |= lost_ext;
691}
692
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000693int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
694 unsigned int exit_nr)
695{
696 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200697 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000698
699 vcpu->stat.sum_exits++;
700
701 run->exit_reason = KVM_EXIT_UNKNOWN;
702 run->ready_for_interrupt_injection = 1;
703
Alexander Grafbd2be682012-08-13 01:04:19 +0200704 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +0200705
Alexander Graf97c95052012-08-02 15:10:00 +0200706 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200707 kvm_guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +0200708
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000709 switch (exit_nr) {
710 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +0100711 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000712 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000713 vcpu->stat.pf_instruc++;
714
715#ifdef CONFIG_PPC_BOOK3S_32
716 /* We set segments as unused segments when invalidating them. So
717 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000718 {
719 struct kvmppc_book3s_shadow_vcpu *svcpu;
720 u32 sr;
721
722 svcpu = svcpu_get(vcpu);
723 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100724 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000725 if (sr == SR_INVALID) {
726 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
727 r = RESUME_GUEST;
728 break;
729 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000730 }
731#endif
732
733 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +0100734 if (shadow_srr1 & 0x40000000) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000735 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
736 vcpu->stat.sp_instruc++;
737 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
738 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
739 /*
740 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
741 * so we can't use the NX bit inside the guest. Let's cross our fingers,
742 * that no guest that needs the dcbz hack does NX.
743 */
744 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
745 r = RESUME_GUEST;
746 } else {
Alexander Graf468a12c2011-12-09 14:44:13 +0100747 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000748 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
749 r = RESUME_GUEST;
750 }
751 break;
Alexander Graf468a12c2011-12-09 14:44:13 +0100752 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000753 case BOOK3S_INTERRUPT_DATA_STORAGE:
754 {
755 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000756 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000757 vcpu->stat.pf_storage++;
758
759#ifdef CONFIG_PPC_BOOK3S_32
760 /* We set segments as unused segments when invalidating them. So
761 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000762 {
763 struct kvmppc_book3s_shadow_vcpu *svcpu;
764 u32 sr;
765
766 svcpu = svcpu_get(vcpu);
767 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100768 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000769 if (sr == SR_INVALID) {
770 kvmppc_mmu_map_segment(vcpu, dar);
771 r = RESUME_GUEST;
772 break;
773 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000774 }
775#endif
776
777 /* The only case we need to handle is missing shadow PTEs */
Alexander Graf468a12c2011-12-09 14:44:13 +0100778 if (fault_dsisr & DSISR_NOHPTE) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000779 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
780 } else {
781 vcpu->arch.shared->dar = dar;
Alexander Graf468a12c2011-12-09 14:44:13 +0100782 vcpu->arch.shared->dsisr = fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000783 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
784 r = RESUME_GUEST;
785 }
786 break;
787 }
788 case BOOK3S_INTERRUPT_DATA_SEGMENT:
789 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
790 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
791 kvmppc_book3s_queue_irqprio(vcpu,
792 BOOK3S_INTERRUPT_DATA_SEGMENT);
793 }
794 r = RESUME_GUEST;
795 break;
796 case BOOK3S_INTERRUPT_INST_SEGMENT:
797 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
798 kvmppc_book3s_queue_irqprio(vcpu,
799 BOOK3S_INTERRUPT_INST_SEGMENT);
800 }
801 r = RESUME_GUEST;
802 break;
803 /* We're good on these - the host merely wanted to get our attention */
804 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100805 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000806 vcpu->stat.dec_exits++;
807 r = RESUME_GUEST;
808 break;
809 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100810 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
811 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000812 vcpu->stat.ext_intr_exits++;
813 r = RESUME_GUEST;
814 break;
815 case BOOK3S_INTERRUPT_PERFMON:
816 r = RESUME_GUEST;
817 break;
818 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100819 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000820 {
821 enum emulation_result er;
822 ulong flags;
823
824program_interrupt:
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000825 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000826
827 if (vcpu->arch.shared->msr & MSR_PR) {
828#ifdef EXIT_DEBUG
829 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
830#endif
831 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
832 (INS_DCBZ & 0xfffffff7)) {
833 kvmppc_core_queue_program(vcpu, flags);
834 r = RESUME_GUEST;
835 break;
836 }
837 }
838
839 vcpu->stat.emulated_inst_exits++;
840 er = kvmppc_emulate_instruction(run, vcpu);
841 switch (er) {
842 case EMULATE_DONE:
843 r = RESUME_GUEST_NV;
844 break;
845 case EMULATE_AGAIN:
846 r = RESUME_GUEST;
847 break;
848 case EMULATE_FAIL:
849 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
850 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
851 kvmppc_core_queue_program(vcpu, flags);
852 r = RESUME_GUEST;
853 break;
854 case EMULATE_DO_MMIO:
855 run->exit_reason = KVM_EXIT_MMIO;
856 r = RESUME_HOST_NV;
857 break;
Bharat Bhushanc402a3f2013-04-08 00:32:13 +0000858 case EMULATE_EXIT_USER:
Alexander Graf50c7bb82012-12-14 23:42:05 +0100859 r = RESUME_HOST_NV;
860 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000861 default:
862 BUG();
863 }
864 break;
865 }
866 case BOOK3S_INTERRUPT_SYSCALL:
Alexander Grafa668f2b2011-08-08 17:26:24 +0200867 if (vcpu->arch.papr_enabled &&
Paul Mackerras8b23de22013-08-06 14:15:19 +1000868 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
Alexander Grafa668f2b2011-08-08 17:26:24 +0200869 !(vcpu->arch.shared->msr & MSR_PR)) {
870 /* SC 1 papr hypercalls */
871 ulong cmd = kvmppc_get_gpr(vcpu, 3);
872 int i;
873
Andreas Schwab96f38d72011-11-08 07:17:39 +0000874#ifdef CONFIG_KVM_BOOK3S_64_PR
Alexander Grafa668f2b2011-08-08 17:26:24 +0200875 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
876 r = RESUME_GUEST;
877 break;
878 }
Andreas Schwab96f38d72011-11-08 07:17:39 +0000879#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +0200880
881 run->papr_hcall.nr = cmd;
882 for (i = 0; i < 9; ++i) {
883 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
884 run->papr_hcall.args[i] = gpr;
885 }
886 run->exit_reason = KVM_EXIT_PAPR_HCALL;
887 vcpu->arch.hcall_needed = 1;
888 r = RESUME_HOST;
889 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000890 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
891 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
892 /* MOL hypercalls */
893 u64 *gprs = run->osi.gprs;
894 int i;
895
896 run->exit_reason = KVM_EXIT_OSI;
897 for (i = 0; i < 32; i++)
898 gprs[i] = kvmppc_get_gpr(vcpu, i);
899 vcpu->arch.osi_needed = 1;
900 r = RESUME_HOST_NV;
901 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
902 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
903 /* KVM PV hypercalls */
904 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
905 r = RESUME_GUEST;
906 } else {
907 /* Guest syscalls */
908 vcpu->stat.syscall_exits++;
909 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
910 r = RESUME_GUEST;
911 }
912 break;
913 case BOOK3S_INTERRUPT_FP_UNAVAIL:
914 case BOOK3S_INTERRUPT_ALTIVEC:
915 case BOOK3S_INTERRUPT_VSX:
916 {
917 int ext_msr = 0;
918
919 switch (exit_nr) {
920 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
921 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
922 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
923 }
924
925 switch (kvmppc_check_ext(vcpu, exit_nr)) {
926 case EMULATE_DONE:
927 /* everything ok - let's enable the ext */
928 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
929 break;
930 case EMULATE_FAIL:
931 /* we need to emulate this instruction */
932 goto program_interrupt;
933 break;
934 default:
935 /* nothing to worry about - go again */
936 break;
937 }
938 break;
939 }
940 case BOOK3S_INTERRUPT_ALIGNMENT:
941 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
942 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
943 kvmppc_get_last_inst(vcpu));
944 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
945 kvmppc_get_last_inst(vcpu));
946 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
947 }
948 r = RESUME_GUEST;
949 break;
950 case BOOK3S_INTERRUPT_MACHINE_CHECK:
951 case BOOK3S_INTERRUPT_TRACE:
952 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
953 r = RESUME_GUEST;
954 break;
955 default:
Alexander Graf468a12c2011-12-09 14:44:13 +0100956 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000957 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000958 /* Ugh - bork here! What did we get? */
959 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +0100960 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000961 r = RESUME_HOST;
962 BUG();
963 break;
964 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100965 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000966
967 if (!(r & RESUME_HOST)) {
968 /* To avoid clobbering exit_reason, only check for signals if
969 * we aren't already exiting to userspace for some other
970 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +0100971
972 /*
973 * Interrupts could be timers for the guest which we have to
974 * inject again, so let's postpone them until we're in the guest
975 * and if we really did time things so badly, then we just exit
976 * again due to a host external interrupt.
977 */
Alexander Grafbd2be682012-08-13 01:04:19 +0200978 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200979 s = kvmppc_prepare_to_enter(vcpu);
980 if (s <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +0200981 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200982 r = s;
Alexander Graf24afa372012-08-12 12:42:30 +0200983 } else {
Scott Wood5f1c2482013-07-10 17:47:39 -0500984 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000985 }
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000986 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000987 }
988
989 trace_kvm_book3s_reenter(r, vcpu);
990
991 return r;
992}
993
994int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
995 struct kvm_sregs *sregs)
996{
997 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
998 int i;
999
1000 sregs->pvr = vcpu->arch.pvr;
1001
1002 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1003 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1004 for (i = 0; i < 64; i++) {
1005 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1006 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1007 }
1008 } else {
1009 for (i = 0; i < 16; i++)
1010 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1011
1012 for (i = 0; i < 8; i++) {
1013 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1014 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1015 }
1016 }
1017
1018 return 0;
1019}
1020
1021int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1022 struct kvm_sregs *sregs)
1023{
1024 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1025 int i;
1026
1027 kvmppc_set_pvr(vcpu, sregs->pvr);
1028
1029 vcpu3s->sdr1 = sregs->u.s.sdr1;
1030 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1031 for (i = 0; i < 64; i++) {
1032 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1033 sregs->u.s.ppc64.slb[i].slbe);
1034 }
1035 } else {
1036 for (i = 0; i < 16; i++) {
1037 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1038 }
1039 for (i = 0; i < 8; i++) {
1040 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1041 (u32)sregs->u.s.ppc32.ibat[i]);
1042 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1043 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1044 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1045 (u32)sregs->u.s.ppc32.dbat[i]);
1046 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1047 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1048 }
1049 }
1050
1051 /* Flush the MMU after messing with the segments */
1052 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1053
1054 return 0;
1055}
1056
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001057int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001058{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001059 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001060
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001061 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001062 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001063 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001064 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +00001065#ifdef CONFIG_VSX
1066 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1067 long int i = id - KVM_REG_PPC_VSR0;
1068
1069 if (!cpu_has_feature(CPU_FTR_VSX)) {
1070 r = -ENXIO;
1071 break;
1072 }
1073 val->vsxval[0] = vcpu->arch.fpr[i];
1074 val->vsxval[1] = vcpu->arch.vsr[i];
1075 break;
1076 }
1077#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +00001078 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001079 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001080 break;
1081 }
1082
1083 return r;
1084}
1085
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001086int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001087{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001088 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001089
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001090 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001091 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001092 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1093 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001094 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +00001095#ifdef CONFIG_VSX
1096 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1097 long int i = id - KVM_REG_PPC_VSR0;
1098
1099 if (!cpu_has_feature(CPU_FTR_VSX)) {
1100 r = -ENXIO;
1101 break;
1102 }
1103 vcpu->arch.fpr[i] = val->vsxval[0];
1104 vcpu->arch.vsr[i] = val->vsxval[1];
1105 break;
1106 }
1107#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +00001108 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001109 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001110 break;
1111 }
1112
1113 return r;
1114}
1115
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001116int kvmppc_core_check_processor_compat(void)
1117{
1118 return 0;
1119}
1120
1121struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1122{
1123 struct kvmppc_vcpu_book3s *vcpu_book3s;
1124 struct kvm_vcpu *vcpu;
1125 int err = -ENOMEM;
1126 unsigned long p;
1127
1128 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1129 if (!vcpu_book3s)
1130 goto out;
1131
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001132#ifdef CONFIG_KVM_BOOK3S_32
Zhang Yanfei6e51c9ff2013-03-12 12:54:06 +08001133 vcpu_book3s->shadow_vcpu =
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001134 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1135 if (!vcpu_book3s->shadow_vcpu)
1136 goto free_vcpu;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001137#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001138 vcpu = &vcpu_book3s->vcpu;
1139 err = kvm_vcpu_init(vcpu, kvm, id);
1140 if (err)
1141 goto free_shadow_vcpu;
1142
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001143 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001144 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001145 if (!p)
1146 goto uninit_vcpu;
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001147 /* the real shared page fills the last 4k of our page */
1148 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001149
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001150#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001151 /*
1152 * Default to the same as the host if we're on sufficiently
1153 * recent machine that we have 1TB segments;
1154 * otherwise default to PPC970FX.
1155 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001156 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001157 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1158 vcpu->arch.pvr = mfspr(SPRN_PVR);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001159#else
1160 /* default to book3s_32 (750) */
1161 vcpu->arch.pvr = 0x84202;
1162#endif
1163 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1164 vcpu->arch.slb_nr = 64;
1165
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001166 vcpu->arch.shadow_msr = MSR_USER64;
1167
1168 err = kvmppc_mmu_init(vcpu);
1169 if (err < 0)
1170 goto uninit_vcpu;
1171
1172 return vcpu;
1173
1174uninit_vcpu:
1175 kvm_vcpu_uninit(vcpu);
1176free_shadow_vcpu:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001177#ifdef CONFIG_KVM_BOOK3S_32
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001178 kfree(vcpu_book3s->shadow_vcpu);
1179free_vcpu:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001180#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001181 vfree(vcpu_book3s);
1182out:
1183 return ERR_PTR(err);
1184}
1185
1186void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1187{
1188 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1189
1190 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1191 kvm_vcpu_uninit(vcpu);
1192 kfree(vcpu_book3s->shadow_vcpu);
1193 vfree(vcpu_book3s);
1194}
1195
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001196int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001197{
1198 int ret;
1199 double fpr[32][TS_FPRWIDTH];
1200 unsigned int fpscr;
1201 int fpexc_mode;
1202#ifdef CONFIG_ALTIVEC
1203 vector128 vr[32];
1204 vector128 vscr;
1205 unsigned long uninitialized_var(vrsave);
1206 int used_vr;
1207#endif
1208#ifdef CONFIG_VSX
1209 int used_vsr;
1210#endif
1211 ulong ext_msr;
1212
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001213 /* Check if we can run the vcpu at all */
1214 if (!vcpu->arch.sane) {
1215 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001216 ret = -EINVAL;
1217 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001218 }
1219
Alexander Grafe371f712011-12-19 13:36:55 +01001220 /*
1221 * Interrupts could be timers for the guest which we have to inject
1222 * again, so let's postpone them until we're in the guest and if we
1223 * really did time things so badly, then we just exit again due to
1224 * a host external interrupt.
1225 */
Alexander Grafbd2be682012-08-13 01:04:19 +02001226 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001227 ret = kvmppc_prepare_to_enter(vcpu);
1228 if (ret <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +02001229 local_irq_enable();
Alexander Graf7d827142011-12-09 15:46:21 +01001230 goto out;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001231 }
1232
1233 /* Save FPU state in stack */
1234 if (current->thread.regs->msr & MSR_FP)
1235 giveup_fpu(current);
1236 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1237 fpscr = current->thread.fpscr.val;
1238 fpexc_mode = current->thread.fpexc_mode;
1239
1240#ifdef CONFIG_ALTIVEC
1241 /* Save Altivec state in stack */
1242 used_vr = current->thread.used_vr;
1243 if (used_vr) {
1244 if (current->thread.regs->msr & MSR_VEC)
1245 giveup_altivec(current);
1246 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1247 vscr = current->thread.vscr;
1248 vrsave = current->thread.vrsave;
1249 }
1250#endif
1251
1252#ifdef CONFIG_VSX
1253 /* Save VSX state in stack */
1254 used_vsr = current->thread.used_vsr;
1255 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
Paul Mackerras28c483b2012-11-04 18:16:46 +00001256 __giveup_vsx(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001257#endif
1258
1259 /* Remember the MSR with disabled extensions */
1260 ext_msr = current->thread.regs->msr;
1261
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001262 /* Preload FPU if it's enabled */
1263 if (vcpu->arch.shared->msr & MSR_FP)
1264 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1265
Scott Wood5f1c2482013-07-10 17:47:39 -05001266 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001267
1268 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1269
Alexander Graf24afa372012-08-12 12:42:30 +02001270 /* No need for kvm_guest_exit. It's done in handle_exit.
1271 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001272
Paul Mackerras28c483b2012-11-04 18:16:46 +00001273 /* Make sure we save the guest FPU/Altivec/VSX state */
1274 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1275
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001276 current->thread.regs->msr = ext_msr;
1277
Paul Mackerras28c483b2012-11-04 18:16:46 +00001278 /* Restore FPU/VSX state from stack */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001279 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1280 current->thread.fpscr.val = fpscr;
1281 current->thread.fpexc_mode = fpexc_mode;
1282
1283#ifdef CONFIG_ALTIVEC
1284 /* Restore Altivec state from stack */
1285 if (used_vr && current->thread.used_vr) {
1286 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1287 current->thread.vscr = vscr;
1288 current->thread.vrsave = vrsave;
1289 }
1290 current->thread.used_vr = used_vr;
1291#endif
1292
1293#ifdef CONFIG_VSX
1294 current->thread.used_vsr = used_vsr;
1295#endif
1296
Alexander Graf7d827142011-12-09 15:46:21 +01001297out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001298 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001299 return ret;
1300}
1301
Paul Mackerras82ed3612011-12-15 02:03:22 +00001302/*
1303 * Get (and clear) the dirty memory log for a memory slot.
1304 */
1305int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1306 struct kvm_dirty_log *log)
1307{
1308 struct kvm_memory_slot *memslot;
1309 struct kvm_vcpu *vcpu;
1310 ulong ga, ga_end;
1311 int is_dirty = 0;
1312 int r;
1313 unsigned long n;
1314
1315 mutex_lock(&kvm->slots_lock);
1316
1317 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1318 if (r)
1319 goto out;
1320
1321 /* If nothing is dirty, don't bother messing with page tables. */
1322 if (is_dirty) {
1323 memslot = id_to_memslot(kvm->memslots, log->slot);
1324
1325 ga = memslot->base_gfn << PAGE_SHIFT;
1326 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1327
1328 kvm_for_each_vcpu(n, vcpu, kvm)
1329 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1330
1331 n = kvm_dirty_bitmap_bytes(memslot);
1332 memset(memslot->dirty_bitmap, 0, n);
1333 }
1334
1335 r = 0;
1336out:
1337 mutex_unlock(&kvm->slots_lock);
1338 return r;
1339}
1340
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001341#ifdef CONFIG_PPC64
1342int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1343{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001344 long int i;
1345 struct kvm_vcpu *vcpu;
1346
1347 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001348
1349 /* SLB is always 64 entries */
1350 info->slb_size = 64;
1351
1352 /* Standard 4k base page size segment */
1353 info->sps[0].page_shift = 12;
1354 info->sps[0].slb_enc = 0;
1355 info->sps[0].enc[0].page_shift = 12;
1356 info->sps[0].enc[0].pte_enc = 0;
1357
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001358 /*
1359 * 64k large page size.
1360 * We only want to put this in if the CPUs we're emulating
1361 * support it, but unfortunately we don't have a vcpu easily
1362 * to hand here to test. Just pick the first vcpu, and if
1363 * that doesn't exist yet, report the minimum capability,
1364 * i.e., no 64k pages.
1365 * 1T segment support goes along with 64k pages.
1366 */
1367 i = 1;
1368 vcpu = kvm_get_vcpu(kvm, 0);
1369 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1370 info->flags = KVM_PPC_1T_SEGMENTS;
1371 info->sps[i].page_shift = 16;
1372 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1373 info->sps[i].enc[0].page_shift = 16;
1374 info->sps[i].enc[0].pte_enc = 1;
1375 ++i;
1376 }
1377
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001378 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001379 info->sps[i].page_shift = 24;
1380 info->sps[i].slb_enc = SLB_VSID_L;
1381 info->sps[i].enc[0].page_shift = 24;
1382 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001383
1384 return 0;
1385}
1386#endif /* CONFIG_PPC64 */
1387
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001388void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1389 struct kvm_memory_slot *dont)
1390{
1391}
1392
1393int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1394 unsigned long npages)
1395{
1396 return 0;
1397}
1398
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001399int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001400 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001401 struct kvm_userspace_memory_region *mem)
1402{
1403 return 0;
1404}
1405
1406void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001407 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001408 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001409{
1410}
1411
1412void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001413{
1414}
1415
Ian Munsiea413f472012-12-03 18:36:13 +00001416static unsigned int kvm_global_user_count = 0;
1417static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1418
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001419int kvmppc_core_init_vm(struct kvm *kvm)
1420{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001421#ifdef CONFIG_PPC64
1422 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
Michael Ellerman8e591cb2013-04-17 20:30:00 +00001423 INIT_LIST_HEAD(&kvm->arch.rtas_tokens);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001424#endif
Paul Mackerras9308ab82013-09-20 14:52:48 +10001425 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001426
Ian Munsiea413f472012-12-03 18:36:13 +00001427 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1428 spin_lock(&kvm_global_user_count_lock);
1429 if (++kvm_global_user_count == 1)
1430 pSeries_disable_reloc_on_exc();
1431 spin_unlock(&kvm_global_user_count_lock);
1432 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001433 return 0;
1434}
1435
1436void kvmppc_core_destroy_vm(struct kvm *kvm)
1437{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001438#ifdef CONFIG_PPC64
1439 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1440#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001441
1442 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1443 spin_lock(&kvm_global_user_count_lock);
1444 BUG_ON(kvm_global_user_count == 0);
1445 if (--kvm_global_user_count == 0)
1446 pSeries_enable_reloc_on_exc();
1447 spin_unlock(&kvm_global_user_count_lock);
1448 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001449}
1450
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001451static int kvmppc_book3s_init(void)
1452{
1453 int r;
1454
1455 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1456 THIS_MODULE);
1457
1458 if (r)
1459 return r;
1460
1461 r = kvmppc_mmu_hpte_sysinit();
1462
1463 return r;
1464}
1465
1466static void kvmppc_book3s_exit(void)
1467{
1468 kvmppc_mmu_hpte_sysexit();
1469 kvm_exit();
1470}
1471
1472module_init(kvmppc_book3s_init);
1473module_exit(kvmppc_book3s_exit);