blob: d63a91f825d39f5ea4c06b3876c1264d161aedde [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Paul Mackerrasdeb26c22013-02-04 18:11:44 +000038#include <asm/hvcall.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000045
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053046#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053047
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000050
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
56
57/* Some compatibility defines */
58#ifdef CONFIG_PPC_BOOK3S_32
59#define MSR_USER32 MSR_USER
60#define MSR_USER64 MSR_USER
61#define HW_PAGE_SIZE PAGE_SIZE
62#endif
63
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053064static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000065{
66#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010067 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
68 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010069 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
70 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000071#endif
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000072 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000073#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +100074 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000075#endif
76}
77
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053078static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000079{
80#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010081 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
82 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010083 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
84 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000085#endif
86
Paul Mackerras28c483b2012-11-04 18:16:46 +000087 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000088 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000089}
90
Paul Mackerrasa2d56022013-09-20 14:52:43 +100091/* Copy data needed by real-mode code from vcpu to shadow vcpu */
92void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
93 struct kvm_vcpu *vcpu)
94{
95 svcpu->gpr[0] = vcpu->arch.gpr[0];
96 svcpu->gpr[1] = vcpu->arch.gpr[1];
97 svcpu->gpr[2] = vcpu->arch.gpr[2];
98 svcpu->gpr[3] = vcpu->arch.gpr[3];
99 svcpu->gpr[4] = vcpu->arch.gpr[4];
100 svcpu->gpr[5] = vcpu->arch.gpr[5];
101 svcpu->gpr[6] = vcpu->arch.gpr[6];
102 svcpu->gpr[7] = vcpu->arch.gpr[7];
103 svcpu->gpr[8] = vcpu->arch.gpr[8];
104 svcpu->gpr[9] = vcpu->arch.gpr[9];
105 svcpu->gpr[10] = vcpu->arch.gpr[10];
106 svcpu->gpr[11] = vcpu->arch.gpr[11];
107 svcpu->gpr[12] = vcpu->arch.gpr[12];
108 svcpu->gpr[13] = vcpu->arch.gpr[13];
109 svcpu->cr = vcpu->arch.cr;
110 svcpu->xer = vcpu->arch.xer;
111 svcpu->ctr = vcpu->arch.ctr;
112 svcpu->lr = vcpu->arch.lr;
113 svcpu->pc = vcpu->arch.pc;
114}
115
116/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
117void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
118 struct kvmppc_book3s_shadow_vcpu *svcpu)
119{
120 vcpu->arch.gpr[0] = svcpu->gpr[0];
121 vcpu->arch.gpr[1] = svcpu->gpr[1];
122 vcpu->arch.gpr[2] = svcpu->gpr[2];
123 vcpu->arch.gpr[3] = svcpu->gpr[3];
124 vcpu->arch.gpr[4] = svcpu->gpr[4];
125 vcpu->arch.gpr[5] = svcpu->gpr[5];
126 vcpu->arch.gpr[6] = svcpu->gpr[6];
127 vcpu->arch.gpr[7] = svcpu->gpr[7];
128 vcpu->arch.gpr[8] = svcpu->gpr[8];
129 vcpu->arch.gpr[9] = svcpu->gpr[9];
130 vcpu->arch.gpr[10] = svcpu->gpr[10];
131 vcpu->arch.gpr[11] = svcpu->gpr[11];
132 vcpu->arch.gpr[12] = svcpu->gpr[12];
133 vcpu->arch.gpr[13] = svcpu->gpr[13];
134 vcpu->arch.cr = svcpu->cr;
135 vcpu->arch.xer = svcpu->xer;
136 vcpu->arch.ctr = svcpu->ctr;
137 vcpu->arch.lr = svcpu->lr;
138 vcpu->arch.pc = svcpu->pc;
139 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
140 vcpu->arch.fault_dar = svcpu->fault_dar;
141 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
142 vcpu->arch.last_inst = svcpu->last_inst;
143}
144
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530145static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200146{
Alexander Graf7c973a22012-08-13 12:50:35 +0200147 int r = 1; /* Indicate we want to get back into the guest */
148
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200149 /* We misuse TLB_FLUSH to indicate that we want to clear
150 all shadow cache entries */
151 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
152 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200153
154 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200155}
156
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200157/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000158static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
159 unsigned long end)
160{
161 long i;
162 struct kvm_vcpu *vcpu;
163 struct kvm_memslots *slots;
164 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200165
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000166 slots = kvm_memslots(kvm);
167 kvm_for_each_memslot(memslot, slots) {
168 unsigned long hva_start, hva_end;
169 gfn_t gfn, gfn_end;
170
171 hva_start = max(start, memslot->userspace_addr);
172 hva_end = min(end, memslot->userspace_addr +
173 (memslot->npages << PAGE_SHIFT));
174 if (hva_start >= hva_end)
175 continue;
176 /*
177 * {gfn(page) | page intersects with [hva_start, hva_end)} =
178 * {gfn, gfn+1, ..., gfn_end-1}.
179 */
180 gfn = hva_to_gfn_memslot(hva_start, memslot);
181 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
182 kvm_for_each_vcpu(i, vcpu, kvm)
183 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
184 gfn_end << PAGE_SHIFT);
185 }
186}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200187
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530188static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200189{
190 trace_kvm_unmap_hva(hva);
191
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000192 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200193
194 return 0;
195}
196
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530197static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
198 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200199{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000200 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200201
202 return 0;
203}
204
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530205static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200206{
207 /* XXX could be more clever ;) */
208 return 0;
209}
210
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530211static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200212{
213 /* XXX could be more clever ;) */
214 return 0;
215}
216
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530217static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200218{
219 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000220 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200221}
222
223/*****************************************/
224
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000225static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
226{
227 ulong smsr = vcpu->arch.shared->msr;
228
229 /* Guest MSR values */
Paul Mackerras3a2e7b02012-11-04 18:17:28 +0000230 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000231 /* Process MSR values */
232 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
233 /* External providers the guest reserved */
234 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
235 /* 64-bit Process MSR values */
236#ifdef CONFIG_PPC_BOOK3S_64
237 smsr |= MSR_ISF | MSR_HV;
238#endif
239 vcpu->arch.shadow_msr = smsr;
240}
241
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530242static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000243{
244 ulong old_msr = vcpu->arch.shared->msr;
245
246#ifdef EXIT_DEBUG
247 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
248#endif
249
250 msr &= to_book3s(vcpu)->msr_mask;
251 vcpu->arch.shared->msr = msr;
252 kvmppc_recalc_shadow_msr(vcpu);
253
254 if (msr & MSR_POW) {
255 if (!vcpu->arch.pending_exceptions) {
256 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100257 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000258 vcpu->stat.halt_wakeup++;
259
260 /* Unset POW bit after we woke up */
261 msr &= ~MSR_POW;
262 vcpu->arch.shared->msr = msr;
263 }
264 }
265
266 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
267 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
268 kvmppc_mmu_flush_segments(vcpu);
269 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
270
271 /* Preload magic page segment when in kernel mode */
272 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
273 struct kvm_vcpu_arch *a = &vcpu->arch;
274
275 if (msr & MSR_DR)
276 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
277 else
278 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
279 }
280 }
281
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000282 /*
283 * When switching from 32 to 64-bit, we may have a stale 32-bit
284 * magic page around, we need to flush it. Typically 32-bit magic
285 * page will be instanciated when calling into RTAS. Note: We
286 * assume that such transition only happens while in kernel mode,
287 * ie, we never transition from user 32-bit to kernel 64-bit with
288 * a 32-bit magic page around.
289 */
290 if (vcpu->arch.magic_page_pa &&
291 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
292 /* going from RTAS to normal kernel code */
293 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
294 ~0xFFFUL);
295 }
296
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000297 /* Preload FPU if it's enabled */
298 if (vcpu->arch.shared->msr & MSR_FP)
299 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
300}
301
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530302void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000303{
304 u32 host_pvr;
305
306 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
307 vcpu->arch.pvr = pvr;
308#ifdef CONFIG_PPC_BOOK3S_64
309 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
310 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200311 if (!to_book3s(vcpu)->hior_explicit)
312 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000313 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200314 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000315 } else
316#endif
317 {
318 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200319 if (!to_book3s(vcpu)->hior_explicit)
320 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000321 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200322 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000323 }
324
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200325 kvmppc_sanity_check(vcpu);
326
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000327 /* If we are in hypervisor level on 970, we can tell the CPU to
328 * treat DCBZ as 32 bytes store */
329 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
330 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
331 !strcmp(cur_cpu_spec->platform, "ppc970"))
332 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
333
334 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
335 really needs them in a VM on Cell and force disable them. */
336 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
337 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
338
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000339 /*
340 * If they're asking for POWER6 or later, set the flag
341 * indicating that we can do multiple large page sizes
342 * and 1TB segments.
343 * Also set the flag that indicates that tlbie has the large
344 * page bit in the RB operand instead of the instruction.
345 */
346 switch (PVR_VER(pvr)) {
347 case PVR_POWER6:
348 case PVR_POWER7:
349 case PVR_POWER7p:
350 case PVR_POWER8:
351 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
352 BOOK3S_HFLAG_NEW_TLBIE;
353 break;
354 }
355
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000356#ifdef CONFIG_PPC_BOOK3S_32
357 /* 32 bit Book3S always has 32 byte dcbz */
358 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
359#endif
360
361 /* On some CPUs we can execute paired single operations natively */
362 asm ( "mfpvr %0" : "=r"(host_pvr));
363 switch (host_pvr) {
364 case 0x00080200: /* lonestar 2.0 */
365 case 0x00088202: /* lonestar 2.2 */
366 case 0x70000100: /* gekko 1.0 */
367 case 0x00080100: /* gekko 2.0 */
368 case 0x00083203: /* gekko 2.3a */
369 case 0x00083213: /* gekko 2.3b */
370 case 0x00083204: /* gekko 2.4 */
371 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
372 case 0x00087200: /* broadway */
373 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
374 /* Enable HID2.PSE - in case we need it later */
375 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
376 }
377}
378
379/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
380 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
381 * emulate 32 bytes dcbz length.
382 *
383 * The Book3s_64 inventors also realized this case and implemented a special bit
384 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
385 *
386 * My approach here is to patch the dcbz instruction on executing pages.
387 */
388static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
389{
390 struct page *hpage;
391 u64 hpage_offset;
392 u32 *page;
393 int i;
394
395 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800396 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000397 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000398
399 hpage_offset = pte->raddr & ~PAGE_MASK;
400 hpage_offset &= ~0xFFFULL;
401 hpage_offset /= 4;
402
403 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800404 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000405
406 /* patch dcbz into reserved instruction, so we trap */
407 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
408 if ((page[i] & 0xff0007ff) == INS_DCBZ)
409 page[i] &= 0xfffffff7;
410
Cong Wang2480b202011-11-25 23:14:16 +0800411 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000412 put_page(hpage);
413}
414
415static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
416{
417 ulong mp_pa = vcpu->arch.magic_page_pa;
418
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000419 if (!(vcpu->arch.shared->msr & MSR_SF))
420 mp_pa = (uint32_t)mp_pa;
421
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000422 if (unlikely(mp_pa) &&
423 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
424 return 1;
425 }
426
427 return kvm_is_visible_gfn(vcpu->kvm, gfn);
428}
429
430int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
431 ulong eaddr, int vec)
432{
433 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000434 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000435 int r = RESUME_GUEST;
436 int relocated;
437 int page_found = 0;
438 struct kvmppc_pte pte;
439 bool is_mmio = false;
440 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
441 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
442 u64 vsid;
443
444 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000445 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
446 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000447
448 /* Resolve real address if translation turned on */
449 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000450 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000451 } else {
452 pte.may_execute = true;
453 pte.may_read = true;
454 pte.may_write = true;
455 pte.raddr = eaddr & KVM_PAM;
456 pte.eaddr = eaddr;
457 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000458 pte.page_size = MMU_PAGE_64K;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000459 }
460
461 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
462 case 0:
463 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
464 break;
465 case MSR_DR:
466 case MSR_IR:
467 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
468
469 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
470 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
471 else
472 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
473 pte.vpage |= vsid;
474
475 if (vsid == -1)
476 page_found = -EINVAL;
477 break;
478 }
479
480 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
481 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
482 /*
483 * If we do the dcbz hack, we have to NX on every execution,
484 * so we can patch the executing code. This renders our guest
485 * NX-less.
486 */
487 pte.may_execute = !data;
488 }
489
490 if (page_found == -ENOENT) {
491 /* Page not found in guest PTE entries */
492 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000493 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000494 vcpu->arch.shared->msr |=
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000495 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000496 kvmppc_book3s_queue_irqprio(vcpu, vec);
497 } else if (page_found == -EPERM) {
498 /* Storage protection */
499 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000500 vcpu->arch.shared->dsisr = vcpu->arch.fault_dsisr & ~DSISR_NOHPTE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000501 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
502 vcpu->arch.shared->msr |=
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000503 vcpu->arch.shadow_srr1 & 0x00000000f8000000ULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000504 kvmppc_book3s_queue_irqprio(vcpu, vec);
505 } else if (page_found == -EINVAL) {
506 /* Page not found in guest SLB */
507 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
508 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
509 } else if (!is_mmio &&
510 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000511 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
512 /*
513 * There is already a host HPTE there, presumably
514 * a read-only one for a page the guest thinks
515 * is writable, so get rid of it first.
516 */
517 kvmppc_mmu_unmap_page(vcpu, &pte);
518 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000519 /* The guest's PTE is not mapped yet. Map on the host */
Paul Mackerras93b159b2013-09-20 14:52:51 +1000520 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000521 if (data)
522 vcpu->stat.sp_storage++;
523 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000524 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000525 kvmppc_patch_dcbz(vcpu, &pte);
526 } else {
527 /* MMIO */
528 vcpu->stat.mmio_exits++;
529 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100530 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000531 r = kvmppc_emulate_mmio(run, vcpu);
532 if ( r == RESUME_HOST_NV )
533 r = RESUME_HOST;
534 }
535
536 return r;
537}
538
539static inline int get_fpr_index(int i)
540{
Paul Mackerras28c483b2012-11-04 18:16:46 +0000541 return i * TS_FPRWIDTH;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000542}
543
544/* Give up external provider (FPU, Altivec, VSX) */
545void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
546{
547 struct thread_struct *t = &current->thread;
548 u64 *vcpu_fpr = vcpu->arch.fpr;
549#ifdef CONFIG_VSX
550 u64 *vcpu_vsx = vcpu->arch.vsr;
551#endif
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000552 u64 *thread_fpr = &t->fp_state.fpr[0][0];
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000553 int i;
554
Paul Mackerras28c483b2012-11-04 18:16:46 +0000555 /*
556 * VSX instructions can access FP and vector registers, so if
557 * we are giving up VSX, make sure we give up FP and VMX as well.
558 */
559 if (msr & MSR_VSX)
560 msr |= MSR_FP | MSR_VEC;
561
562 msr &= vcpu->arch.guest_owned_ext;
563 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000564 return;
565
566#ifdef DEBUG_EXT
567 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
568#endif
569
Paul Mackerras28c483b2012-11-04 18:16:46 +0000570 if (msr & MSR_FP) {
571 /*
572 * Note that on CPUs with VSX, giveup_fpu stores
573 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000574 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000575 */
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000576 if (current->thread.regs->msr & MSR_FP)
577 giveup_fpu(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000578 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
579 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
580
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000581 vcpu->arch.fpscr = t->fp_state.fpscr;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000582
583#ifdef CONFIG_VSX
584 if (cpu_has_feature(CPU_FTR_VSX))
585 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
586 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
587#endif
588 }
589
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000590#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000591 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000592 if (current->thread.regs->msr & MSR_VEC)
593 giveup_altivec(current);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000594 memcpy(vcpu->arch.vr, t->vr_state.vr, sizeof(vcpu->arch.vr));
595 vcpu->arch.vscr = t->vr_state.vscr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000596 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000597#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000598
Paul Mackerras28c483b2012-11-04 18:16:46 +0000599 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000600 kvmppc_recalc_shadow_msr(vcpu);
601}
602
603static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
604{
605 ulong srr0 = kvmppc_get_pc(vcpu);
606 u32 last_inst = kvmppc_get_last_inst(vcpu);
607 int ret;
608
609 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
610 if (ret == -ENOENT) {
611 ulong msr = vcpu->arch.shared->msr;
612
613 msr = kvmppc_set_field(msr, 33, 33, 1);
614 msr = kvmppc_set_field(msr, 34, 36, 0);
615 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
616 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
617 return EMULATE_AGAIN;
618 }
619
620 return EMULATE_DONE;
621}
622
623static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
624{
625
626 /* Need to do paired single emulation? */
627 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
628 return EMULATE_DONE;
629
630 /* Read out the instruction */
631 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
632 /* Need to emulate */
633 return EMULATE_FAIL;
634
635 return EMULATE_AGAIN;
636}
637
638/* Handle external providers (FPU, Altivec, VSX) */
639static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
640 ulong msr)
641{
642 struct thread_struct *t = &current->thread;
643 u64 *vcpu_fpr = vcpu->arch.fpr;
644#ifdef CONFIG_VSX
645 u64 *vcpu_vsx = vcpu->arch.vsr;
646#endif
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000647 u64 *thread_fpr = &t->fp_state.fpr[0][0];
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000648 int i;
649
650 /* When we have paired singles, we emulate in software */
651 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
652 return RESUME_GUEST;
653
654 if (!(vcpu->arch.shared->msr & msr)) {
655 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
656 return RESUME_GUEST;
657 }
658
Paul Mackerras28c483b2012-11-04 18:16:46 +0000659 if (msr == MSR_VSX) {
660 /* No VSX? Give an illegal instruction interrupt */
661#ifdef CONFIG_VSX
662 if (!cpu_has_feature(CPU_FTR_VSX))
663#endif
664 {
665 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
666 return RESUME_GUEST;
667 }
668
669 /*
670 * We have to load up all the FP and VMX registers before
671 * we can let the guest use VSX instructions.
672 */
673 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000674 }
675
Paul Mackerras28c483b2012-11-04 18:16:46 +0000676 /* See if we already own all the ext(s) needed */
677 msr &= ~vcpu->arch.guest_owned_ext;
678 if (!msr)
679 return RESUME_GUEST;
680
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000681#ifdef DEBUG_EXT
682 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
683#endif
684
Paul Mackerras28c483b2012-11-04 18:16:46 +0000685 if (msr & MSR_FP) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000686 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
687 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
Paul Mackerras28c483b2012-11-04 18:16:46 +0000688#ifdef CONFIG_VSX
689 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
690 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
691#endif
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000692 t->fp_state.fpscr = vcpu->arch.fpscr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000693 t->fpexc_mode = 0;
Paul Mackerras09548fd2013-10-15 20:43:01 +1100694 enable_kernel_fp();
695 load_fp_state(&t->fp_state);
Paul Mackerras28c483b2012-11-04 18:16:46 +0000696 }
697
698 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000699#ifdef CONFIG_ALTIVEC
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000700 memcpy(t->vr_state.vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
701 t->vr_state.vscr = vcpu->arch.vscr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000702 t->vrsave = -1;
Paul Mackerras09548fd2013-10-15 20:43:01 +1100703 enable_kernel_altivec();
704 load_vr_state(&t->vr_state);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000705#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000706 }
707
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000708 current->thread.regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000709 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000710 kvmppc_recalc_shadow_msr(vcpu);
711
712 return RESUME_GUEST;
713}
714
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000715/*
716 * Kernel code using FP or VMX could have flushed guest state to
717 * the thread_struct; if so, get it back now.
718 */
719static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
720{
721 unsigned long lost_ext;
722
723 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
724 if (!lost_ext)
725 return;
726
Paul Mackerras09548fd2013-10-15 20:43:01 +1100727 if (lost_ext & MSR_FP) {
728 enable_kernel_fp();
729 load_fp_state(&current->thread.fp_state);
730 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000731#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100732 if (lost_ext & MSR_VEC) {
733 enable_kernel_altivec();
734 load_vr_state(&current->thread.vr_state);
735 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000736#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000737 current->thread.regs->msr |= lost_ext;
738}
739
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530740int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
741 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000742{
743 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200744 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000745
746 vcpu->stat.sum_exits++;
747
748 run->exit_reason = KVM_EXIT_UNKNOWN;
749 run->ready_for_interrupt_injection = 1;
750
Alexander Grafbd2be682012-08-13 01:04:19 +0200751 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +0200752
Alexander Graf97c95052012-08-02 15:10:00 +0200753 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200754 kvm_guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +0200755
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000756 switch (exit_nr) {
757 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +0100758 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000759 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000760 vcpu->stat.pf_instruc++;
761
762#ifdef CONFIG_PPC_BOOK3S_32
763 /* We set segments as unused segments when invalidating them. So
764 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000765 {
766 struct kvmppc_book3s_shadow_vcpu *svcpu;
767 u32 sr;
768
769 svcpu = svcpu_get(vcpu);
770 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100771 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000772 if (sr == SR_INVALID) {
773 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
774 r = RESUME_GUEST;
775 break;
776 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000777 }
778#endif
779
780 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +0100781 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000782 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000783 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000784 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000785 vcpu->stat.sp_instruc++;
786 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
787 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
788 /*
789 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
790 * so we can't use the NX bit inside the guest. Let's cross our fingers,
791 * that no guest that needs the dcbz hack does NX.
792 */
793 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
794 r = RESUME_GUEST;
795 } else {
Alexander Graf468a12c2011-12-09 14:44:13 +0100796 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000797 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
798 r = RESUME_GUEST;
799 }
800 break;
Alexander Graf468a12c2011-12-09 14:44:13 +0100801 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000802 case BOOK3S_INTERRUPT_DATA_STORAGE:
803 {
804 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000805 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000806 vcpu->stat.pf_storage++;
807
808#ifdef CONFIG_PPC_BOOK3S_32
809 /* We set segments as unused segments when invalidating them. So
810 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000811 {
812 struct kvmppc_book3s_shadow_vcpu *svcpu;
813 u32 sr;
814
815 svcpu = svcpu_get(vcpu);
816 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100817 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000818 if (sr == SR_INVALID) {
819 kvmppc_mmu_map_segment(vcpu, dar);
820 r = RESUME_GUEST;
821 break;
822 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000823 }
824#endif
825
Paul Mackerras93b159b2013-09-20 14:52:51 +1000826 /*
827 * We need to handle missing shadow PTEs, and
828 * protection faults due to us mapping a page read-only
829 * when the guest thinks it is writable.
830 */
831 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
832 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000833 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000834 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000835 } else {
836 vcpu->arch.shared->dar = dar;
Alexander Graf468a12c2011-12-09 14:44:13 +0100837 vcpu->arch.shared->dsisr = fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000838 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
839 r = RESUME_GUEST;
840 }
841 break;
842 }
843 case BOOK3S_INTERRUPT_DATA_SEGMENT:
844 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
845 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
846 kvmppc_book3s_queue_irqprio(vcpu,
847 BOOK3S_INTERRUPT_DATA_SEGMENT);
848 }
849 r = RESUME_GUEST;
850 break;
851 case BOOK3S_INTERRUPT_INST_SEGMENT:
852 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
853 kvmppc_book3s_queue_irqprio(vcpu,
854 BOOK3S_INTERRUPT_INST_SEGMENT);
855 }
856 r = RESUME_GUEST;
857 break;
858 /* We're good on these - the host merely wanted to get our attention */
859 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100860 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000861 vcpu->stat.dec_exits++;
862 r = RESUME_GUEST;
863 break;
864 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100865 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
866 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000867 vcpu->stat.ext_intr_exits++;
868 r = RESUME_GUEST;
869 break;
870 case BOOK3S_INTERRUPT_PERFMON:
871 r = RESUME_GUEST;
872 break;
873 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100874 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000875 {
876 enum emulation_result er;
877 ulong flags;
878
879program_interrupt:
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000880 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000881
882 if (vcpu->arch.shared->msr & MSR_PR) {
883#ifdef EXIT_DEBUG
884 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
885#endif
886 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
887 (INS_DCBZ & 0xfffffff7)) {
888 kvmppc_core_queue_program(vcpu, flags);
889 r = RESUME_GUEST;
890 break;
891 }
892 }
893
894 vcpu->stat.emulated_inst_exits++;
895 er = kvmppc_emulate_instruction(run, vcpu);
896 switch (er) {
897 case EMULATE_DONE:
898 r = RESUME_GUEST_NV;
899 break;
900 case EMULATE_AGAIN:
901 r = RESUME_GUEST;
902 break;
903 case EMULATE_FAIL:
904 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
905 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
906 kvmppc_core_queue_program(vcpu, flags);
907 r = RESUME_GUEST;
908 break;
909 case EMULATE_DO_MMIO:
910 run->exit_reason = KVM_EXIT_MMIO;
911 r = RESUME_HOST_NV;
912 break;
Bharat Bhushanc402a3f2013-04-08 00:32:13 +0000913 case EMULATE_EXIT_USER:
Alexander Graf50c7bb82012-12-14 23:42:05 +0100914 r = RESUME_HOST_NV;
915 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000916 default:
917 BUG();
918 }
919 break;
920 }
921 case BOOK3S_INTERRUPT_SYSCALL:
Alexander Grafa668f2b2011-08-08 17:26:24 +0200922 if (vcpu->arch.papr_enabled &&
Paul Mackerras8b23de22013-08-06 14:15:19 +1000923 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
Alexander Grafa668f2b2011-08-08 17:26:24 +0200924 !(vcpu->arch.shared->msr & MSR_PR)) {
925 /* SC 1 papr hypercalls */
926 ulong cmd = kvmppc_get_gpr(vcpu, 3);
927 int i;
928
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +0530929#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +0200930 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
931 r = RESUME_GUEST;
932 break;
933 }
Andreas Schwab96f38d72011-11-08 07:17:39 +0000934#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +0200935
936 run->papr_hcall.nr = cmd;
937 for (i = 0; i < 9; ++i) {
938 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
939 run->papr_hcall.args[i] = gpr;
940 }
941 run->exit_reason = KVM_EXIT_PAPR_HCALL;
942 vcpu->arch.hcall_needed = 1;
943 r = RESUME_HOST;
944 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000945 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
946 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
947 /* MOL hypercalls */
948 u64 *gprs = run->osi.gprs;
949 int i;
950
951 run->exit_reason = KVM_EXIT_OSI;
952 for (i = 0; i < 32; i++)
953 gprs[i] = kvmppc_get_gpr(vcpu, i);
954 vcpu->arch.osi_needed = 1;
955 r = RESUME_HOST_NV;
956 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
957 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
958 /* KVM PV hypercalls */
959 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
960 r = RESUME_GUEST;
961 } else {
962 /* Guest syscalls */
963 vcpu->stat.syscall_exits++;
964 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
965 r = RESUME_GUEST;
966 }
967 break;
968 case BOOK3S_INTERRUPT_FP_UNAVAIL:
969 case BOOK3S_INTERRUPT_ALTIVEC:
970 case BOOK3S_INTERRUPT_VSX:
971 {
972 int ext_msr = 0;
973
974 switch (exit_nr) {
975 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
976 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
977 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
978 }
979
980 switch (kvmppc_check_ext(vcpu, exit_nr)) {
981 case EMULATE_DONE:
982 /* everything ok - let's enable the ext */
983 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
984 break;
985 case EMULATE_FAIL:
986 /* we need to emulate this instruction */
987 goto program_interrupt;
988 break;
989 default:
990 /* nothing to worry about - go again */
991 break;
992 }
993 break;
994 }
995 case BOOK3S_INTERRUPT_ALIGNMENT:
996 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
997 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
998 kvmppc_get_last_inst(vcpu));
999 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
1000 kvmppc_get_last_inst(vcpu));
1001 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1002 }
1003 r = RESUME_GUEST;
1004 break;
1005 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1006 case BOOK3S_INTERRUPT_TRACE:
1007 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1008 r = RESUME_GUEST;
1009 break;
1010 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001011 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001012 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001013 /* Ugh - bork here! What did we get? */
1014 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001015 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001016 r = RESUME_HOST;
1017 BUG();
1018 break;
1019 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001020 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001021
1022 if (!(r & RESUME_HOST)) {
1023 /* To avoid clobbering exit_reason, only check for signals if
1024 * we aren't already exiting to userspace for some other
1025 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001026
1027 /*
1028 * Interrupts could be timers for the guest which we have to
1029 * inject again, so let's postpone them until we're in the guest
1030 * and if we really did time things so badly, then we just exit
1031 * again due to a host external interrupt.
1032 */
Alexander Grafbd2be682012-08-13 01:04:19 +02001033 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001034 s = kvmppc_prepare_to_enter(vcpu);
1035 if (s <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +02001036 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001037 r = s;
Alexander Graf24afa372012-08-12 12:42:30 +02001038 } else {
Scott Wood5f1c2482013-07-10 17:47:39 -05001039 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001040 }
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001041 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001042 }
1043
1044 trace_kvm_book3s_reenter(r, vcpu);
1045
1046 return r;
1047}
1048
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301049static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1050 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001051{
1052 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1053 int i;
1054
1055 sregs->pvr = vcpu->arch.pvr;
1056
1057 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1058 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1059 for (i = 0; i < 64; i++) {
1060 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1061 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1062 }
1063 } else {
1064 for (i = 0; i < 16; i++)
1065 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
1066
1067 for (i = 0; i < 8; i++) {
1068 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1069 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1070 }
1071 }
1072
1073 return 0;
1074}
1075
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301076static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1077 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001078{
1079 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1080 int i;
1081
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301082 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001083
1084 vcpu3s->sdr1 = sregs->u.s.sdr1;
1085 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1086 for (i = 0; i < 64; i++) {
1087 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1088 sregs->u.s.ppc64.slb[i].slbe);
1089 }
1090 } else {
1091 for (i = 0; i < 16; i++) {
1092 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1093 }
1094 for (i = 0; i < 8; i++) {
1095 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1096 (u32)sregs->u.s.ppc32.ibat[i]);
1097 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1098 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1099 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1100 (u32)sregs->u.s.ppc32.dbat[i]);
1101 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1102 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1103 }
1104 }
1105
1106 /* Flush the MMU after messing with the segments */
1107 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1108
1109 return 0;
1110}
1111
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301112static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1113 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001114{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001115 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001116
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001117 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001118 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001119 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001120 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +00001121#ifdef CONFIG_VSX
1122 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1123 long int i = id - KVM_REG_PPC_VSR0;
1124
1125 if (!cpu_has_feature(CPU_FTR_VSX)) {
1126 r = -ENXIO;
1127 break;
1128 }
1129 val->vsxval[0] = vcpu->arch.fpr[i];
1130 val->vsxval[1] = vcpu->arch.vsr[i];
1131 break;
1132 }
1133#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +00001134 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001135 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001136 break;
1137 }
1138
1139 return r;
1140}
1141
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301142static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1143 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001144{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001145 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001146
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001147 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001148 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001149 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1150 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001151 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +00001152#ifdef CONFIG_VSX
1153 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1154 long int i = id - KVM_REG_PPC_VSR0;
1155
1156 if (!cpu_has_feature(CPU_FTR_VSX)) {
1157 r = -ENXIO;
1158 break;
1159 }
1160 vcpu->arch.fpr[i] = val->vsxval[0];
1161 vcpu->arch.vsr[i] = val->vsxval[1];
1162 break;
1163 }
1164#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +00001165 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001166 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001167 break;
1168 }
1169
1170 return r;
1171}
1172
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301173static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1174 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001175{
1176 struct kvmppc_vcpu_book3s *vcpu_book3s;
1177 struct kvm_vcpu *vcpu;
1178 int err = -ENOMEM;
1179 unsigned long p;
1180
Paul Mackerras3ff95502013-09-20 14:52:49 +10001181 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1182 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001183 goto out;
1184
Paul Mackerras3ff95502013-09-20 14:52:49 +10001185 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1186 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001187 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001188 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001189
Paul Mackerras3ff95502013-09-20 14:52:49 +10001190#ifdef CONFIG_KVM_BOOK3S_32
1191 vcpu->arch.shadow_vcpu =
1192 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1193 if (!vcpu->arch.shadow_vcpu)
1194 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001195#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001196
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001197 err = kvm_vcpu_init(vcpu, kvm, id);
1198 if (err)
1199 goto free_shadow_vcpu;
1200
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001201 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001202 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001203 if (!p)
1204 goto uninit_vcpu;
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001205 /* the real shared page fills the last 4k of our page */
1206 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001207
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001208#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001209 /*
1210 * Default to the same as the host if we're on sufficiently
1211 * recent machine that we have 1TB segments;
1212 * otherwise default to PPC970FX.
1213 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001214 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001215 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1216 vcpu->arch.pvr = mfspr(SPRN_PVR);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001217#else
1218 /* default to book3s_32 (750) */
1219 vcpu->arch.pvr = 0x84202;
1220#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301221 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001222 vcpu->arch.slb_nr = 64;
1223
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001224 vcpu->arch.shadow_msr = MSR_USER64;
1225
1226 err = kvmppc_mmu_init(vcpu);
1227 if (err < 0)
1228 goto uninit_vcpu;
1229
1230 return vcpu;
1231
1232uninit_vcpu:
1233 kvm_vcpu_uninit(vcpu);
1234free_shadow_vcpu:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001235#ifdef CONFIG_KVM_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +10001236 kfree(vcpu->arch.shadow_vcpu);
1237free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001238#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001239 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001240free_vcpu:
1241 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001242out:
1243 return ERR_PTR(err);
1244}
1245
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301246static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001247{
1248 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1249
1250 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1251 kvm_vcpu_uninit(vcpu);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001252#ifdef CONFIG_KVM_BOOK3S_32
1253 kfree(vcpu->arch.shadow_vcpu);
1254#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001255 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001256 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001257}
1258
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301259static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001260{
1261 int ret;
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001262 struct thread_fp_state fp;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001263 int fpexc_mode;
1264#ifdef CONFIG_ALTIVEC
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001265 struct thread_vr_state vr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001266 unsigned long uninitialized_var(vrsave);
1267 int used_vr;
1268#endif
1269#ifdef CONFIG_VSX
1270 int used_vsr;
1271#endif
1272 ulong ext_msr;
1273
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001274 /* Check if we can run the vcpu at all */
1275 if (!vcpu->arch.sane) {
1276 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001277 ret = -EINVAL;
1278 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001279 }
1280
Alexander Grafe371f712011-12-19 13:36:55 +01001281 /*
1282 * Interrupts could be timers for the guest which we have to inject
1283 * again, so let's postpone them until we're in the guest and if we
1284 * really did time things so badly, then we just exit again due to
1285 * a host external interrupt.
1286 */
Alexander Grafbd2be682012-08-13 01:04:19 +02001287 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001288 ret = kvmppc_prepare_to_enter(vcpu);
1289 if (ret <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +02001290 local_irq_enable();
Alexander Graf7d827142011-12-09 15:46:21 +01001291 goto out;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001292 }
1293
1294 /* Save FPU state in stack */
1295 if (current->thread.regs->msr & MSR_FP)
1296 giveup_fpu(current);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001297 fp = current->thread.fp_state;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001298 fpexc_mode = current->thread.fpexc_mode;
1299
1300#ifdef CONFIG_ALTIVEC
1301 /* Save Altivec state in stack */
1302 used_vr = current->thread.used_vr;
1303 if (used_vr) {
1304 if (current->thread.regs->msr & MSR_VEC)
1305 giveup_altivec(current);
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001306 vr = current->thread.vr_state;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001307 vrsave = current->thread.vrsave;
1308 }
1309#endif
1310
1311#ifdef CONFIG_VSX
1312 /* Save VSX state in stack */
1313 used_vsr = current->thread.used_vsr;
1314 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
Paul Mackerras28c483b2012-11-04 18:16:46 +00001315 __giveup_vsx(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001316#endif
1317
1318 /* Remember the MSR with disabled extensions */
1319 ext_msr = current->thread.regs->msr;
1320
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001321 /* Preload FPU if it's enabled */
1322 if (vcpu->arch.shared->msr & MSR_FP)
1323 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1324
Scott Wood5f1c2482013-07-10 17:47:39 -05001325 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001326
1327 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1328
Alexander Graf24afa372012-08-12 12:42:30 +02001329 /* No need for kvm_guest_exit. It's done in handle_exit.
1330 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001331
Paul Mackerras28c483b2012-11-04 18:16:46 +00001332 /* Make sure we save the guest FPU/Altivec/VSX state */
1333 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1334
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001335 current->thread.regs->msr = ext_msr;
1336
Paul Mackerras28c483b2012-11-04 18:16:46 +00001337 /* Restore FPU/VSX state from stack */
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001338 current->thread.fp_state = fp;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001339 current->thread.fpexc_mode = fpexc_mode;
1340
1341#ifdef CONFIG_ALTIVEC
1342 /* Restore Altivec state from stack */
1343 if (used_vr && current->thread.used_vr) {
Paul Mackerrasde79f7b2013-09-10 20:20:42 +10001344 current->thread.vr_state = vr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001345 current->thread.vrsave = vrsave;
1346 }
1347 current->thread.used_vr = used_vr;
1348#endif
1349
1350#ifdef CONFIG_VSX
1351 current->thread.used_vsr = used_vsr;
1352#endif
1353
Alexander Graf7d827142011-12-09 15:46:21 +01001354out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001355 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001356 return ret;
1357}
1358
Paul Mackerras82ed3612011-12-15 02:03:22 +00001359/*
1360 * Get (and clear) the dirty memory log for a memory slot.
1361 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301362static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1363 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001364{
1365 struct kvm_memory_slot *memslot;
1366 struct kvm_vcpu *vcpu;
1367 ulong ga, ga_end;
1368 int is_dirty = 0;
1369 int r;
1370 unsigned long n;
1371
1372 mutex_lock(&kvm->slots_lock);
1373
1374 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1375 if (r)
1376 goto out;
1377
1378 /* If nothing is dirty, don't bother messing with page tables. */
1379 if (is_dirty) {
1380 memslot = id_to_memslot(kvm->memslots, log->slot);
1381
1382 ga = memslot->base_gfn << PAGE_SHIFT;
1383 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1384
1385 kvm_for_each_vcpu(n, vcpu, kvm)
1386 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1387
1388 n = kvm_dirty_bitmap_bytes(memslot);
1389 memset(memslot->dirty_bitmap, 0, n);
1390 }
1391
1392 r = 0;
1393out:
1394 mutex_unlock(&kvm->slots_lock);
1395 return r;
1396}
1397
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301398static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1399 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001400{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301401 return;
1402}
1403
1404static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1405 struct kvm_memory_slot *memslot,
1406 struct kvm_userspace_memory_region *mem)
1407{
1408 return 0;
1409}
1410
1411static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1412 struct kvm_userspace_memory_region *mem,
1413 const struct kvm_memory_slot *old)
1414{
1415 return;
1416}
1417
1418static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1419 struct kvm_memory_slot *dont)
1420{
1421 return;
1422}
1423
1424static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1425 unsigned long npages)
1426{
1427 return 0;
1428}
1429
1430
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001431#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301432static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1433 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001434{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001435 long int i;
1436 struct kvm_vcpu *vcpu;
1437
1438 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001439
1440 /* SLB is always 64 entries */
1441 info->slb_size = 64;
1442
1443 /* Standard 4k base page size segment */
1444 info->sps[0].page_shift = 12;
1445 info->sps[0].slb_enc = 0;
1446 info->sps[0].enc[0].page_shift = 12;
1447 info->sps[0].enc[0].pte_enc = 0;
1448
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001449 /*
1450 * 64k large page size.
1451 * We only want to put this in if the CPUs we're emulating
1452 * support it, but unfortunately we don't have a vcpu easily
1453 * to hand here to test. Just pick the first vcpu, and if
1454 * that doesn't exist yet, report the minimum capability,
1455 * i.e., no 64k pages.
1456 * 1T segment support goes along with 64k pages.
1457 */
1458 i = 1;
1459 vcpu = kvm_get_vcpu(kvm, 0);
1460 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1461 info->flags = KVM_PPC_1T_SEGMENTS;
1462 info->sps[i].page_shift = 16;
1463 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1464 info->sps[i].enc[0].page_shift = 16;
1465 info->sps[i].enc[0].pte_enc = 1;
1466 ++i;
1467 }
1468
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001469 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001470 info->sps[i].page_shift = 24;
1471 info->sps[i].slb_enc = SLB_VSID_L;
1472 info->sps[i].enc[0].page_shift = 24;
1473 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001474
1475 return 0;
1476}
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301477#else
1478static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1479 struct kvm_ppc_smmu_info *info)
1480{
1481 /* We should not get called */
1482 BUG();
1483}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001484#endif /* CONFIG_PPC64 */
1485
Ian Munsiea413f472012-12-03 18:36:13 +00001486static unsigned int kvm_global_user_count = 0;
1487static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1488
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301489static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001490{
Paul Mackerras9308ab82013-09-20 14:52:48 +10001491 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001492
Ian Munsiea413f472012-12-03 18:36:13 +00001493 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1494 spin_lock(&kvm_global_user_count_lock);
1495 if (++kvm_global_user_count == 1)
1496 pSeries_disable_reloc_on_exc();
1497 spin_unlock(&kvm_global_user_count_lock);
1498 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001499 return 0;
1500}
1501
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301502static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001503{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001504#ifdef CONFIG_PPC64
1505 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1506#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001507
1508 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1509 spin_lock(&kvm_global_user_count_lock);
1510 BUG_ON(kvm_global_user_count == 0);
1511 if (--kvm_global_user_count == 0)
1512 pSeries_enable_reloc_on_exc();
1513 spin_unlock(&kvm_global_user_count_lock);
1514 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001515}
1516
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301517static int kvmppc_core_check_processor_compat_pr(void)
1518{
1519 /* we are always compatible */
1520 return 0;
1521}
1522
1523static long kvm_arch_vm_ioctl_pr(struct file *filp,
1524 unsigned int ioctl, unsigned long arg)
1525{
1526 return -ENOTTY;
1527}
1528
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301529static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301530 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1531 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1532 .get_one_reg = kvmppc_get_one_reg_pr,
1533 .set_one_reg = kvmppc_set_one_reg_pr,
1534 .vcpu_load = kvmppc_core_vcpu_load_pr,
1535 .vcpu_put = kvmppc_core_vcpu_put_pr,
1536 .set_msr = kvmppc_set_msr_pr,
1537 .vcpu_run = kvmppc_vcpu_run_pr,
1538 .vcpu_create = kvmppc_core_vcpu_create_pr,
1539 .vcpu_free = kvmppc_core_vcpu_free_pr,
1540 .check_requests = kvmppc_core_check_requests_pr,
1541 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1542 .flush_memslot = kvmppc_core_flush_memslot_pr,
1543 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1544 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1545 .unmap_hva = kvm_unmap_hva_pr,
1546 .unmap_hva_range = kvm_unmap_hva_range_pr,
1547 .age_hva = kvm_age_hva_pr,
1548 .test_age_hva = kvm_test_age_hva_pr,
1549 .set_spte_hva = kvm_set_spte_hva_pr,
1550 .mmu_destroy = kvmppc_mmu_destroy_pr,
1551 .free_memslot = kvmppc_core_free_memslot_pr,
1552 .create_memslot = kvmppc_core_create_memslot_pr,
1553 .init_vm = kvmppc_core_init_vm_pr,
1554 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301555 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1556 .emulate_op = kvmppc_core_emulate_op_pr,
1557 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1558 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1559 .fast_vcpu_kick = kvm_vcpu_kick,
1560 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1561};
1562
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301563
1564int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001565{
1566 int r;
1567
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301568 r = kvmppc_core_check_processor_compat_pr();
1569 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001570 return r;
1571
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301572 kvm_ops_pr.owner = THIS_MODULE;
1573 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001574
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301575 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001576 return r;
1577}
1578
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301579void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001580{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301581 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001582 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001583}
1584
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301585/*
1586 * We only support separate modules for book3s 64
1587 */
1588#ifdef CONFIG_PPC_BOOK3S_64
1589
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301590module_init(kvmppc_book3s_init_pr);
1591module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301592
1593MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01001594MODULE_ALIAS_MISCDEV(KVM_MINOR);
1595MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301596#endif