blob: 67e4708388a00884ed1f34d759248ecafdbcb4da [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000038#include <linux/gfp.h>
39#include <linux/sched.h>
40#include <linux/vmalloc.h>
41#include <linux/highmem.h>
42
43#include "trace.h"
44
45/* #define EXIT_DEBUG */
46/* #define DEBUG_EXT */
47
48static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
49 ulong msr);
50
51/* Some compatibility defines */
52#ifdef CONFIG_PPC_BOOK3S_32
53#define MSR_USER32 MSR_USER
54#define MSR_USER64 MSR_USER
55#define HW_PAGE_SIZE PAGE_SIZE
56#endif
57
58void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
59{
60#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010061 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
62 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000063 memcpy(&get_paca()->shadow_vcpu, to_book3s(vcpu)->shadow_vcpu,
64 sizeof(get_paca()->shadow_vcpu));
Alexander Graf468a12c2011-12-09 14:44:13 +010065 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
66 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000067#endif
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000068 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000069#ifdef CONFIG_PPC_BOOK3S_32
70 current->thread.kvm_shadow_vcpu = to_book3s(vcpu)->shadow_vcpu;
71#endif
72}
73
74void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
75{
76#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010077 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
78 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000079 memcpy(to_book3s(vcpu)->shadow_vcpu, &get_paca()->shadow_vcpu,
80 sizeof(get_paca()->shadow_vcpu));
Alexander Graf468a12c2011-12-09 14:44:13 +010081 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
82 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000083#endif
84
Paul Mackerras28c483b2012-11-04 18:16:46 +000085 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000086 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000087}
88
Alexander Graf7c973a22012-08-13 12:50:35 +020089int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +020090{
Alexander Graf7c973a22012-08-13 12:50:35 +020091 int r = 1; /* Indicate we want to get back into the guest */
92
Alexander Graf9b0cb3c2012-08-10 13:23:55 +020093 /* We misuse TLB_FLUSH to indicate that we want to clear
94 all shadow cache entries */
95 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
96 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +020097
98 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +020099}
100
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200101/************* MMU Notifiers *************/
102
103int kvm_unmap_hva(struct kvm *kvm, unsigned long hva)
104{
105 trace_kvm_unmap_hva(hva);
106
107 /*
108 * Flush all shadow tlb entries everywhere. This is slow, but
109 * we are 100% sure that we catch the to be unmapped page
110 */
111 kvm_flush_remote_tlbs(kvm);
112
113 return 0;
114}
115
116int kvm_unmap_hva_range(struct kvm *kvm, unsigned long start, unsigned long end)
117{
118 /* kvm_unmap_hva flushes everything anyways */
119 kvm_unmap_hva(kvm, start);
120
121 return 0;
122}
123
124int kvm_age_hva(struct kvm *kvm, unsigned long hva)
125{
126 /* XXX could be more clever ;) */
127 return 0;
128}
129
130int kvm_test_age_hva(struct kvm *kvm, unsigned long hva)
131{
132 /* XXX could be more clever ;) */
133 return 0;
134}
135
136void kvm_set_spte_hva(struct kvm *kvm, unsigned long hva, pte_t pte)
137{
138 /* The page will get remapped properly on its next fault */
139 kvm_unmap_hva(kvm, hva);
140}
141
142/*****************************************/
143
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000144static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
145{
146 ulong smsr = vcpu->arch.shared->msr;
147
148 /* Guest MSR values */
Paul Mackerras3a2e7b02012-11-04 18:17:28 +0000149 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000150 /* Process MSR values */
151 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
152 /* External providers the guest reserved */
153 smsr |= (vcpu->arch.shared->msr & vcpu->arch.guest_owned_ext);
154 /* 64-bit Process MSR values */
155#ifdef CONFIG_PPC_BOOK3S_64
156 smsr |= MSR_ISF | MSR_HV;
157#endif
158 vcpu->arch.shadow_msr = smsr;
159}
160
161void kvmppc_set_msr(struct kvm_vcpu *vcpu, u64 msr)
162{
163 ulong old_msr = vcpu->arch.shared->msr;
164
165#ifdef EXIT_DEBUG
166 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
167#endif
168
169 msr &= to_book3s(vcpu)->msr_mask;
170 vcpu->arch.shared->msr = msr;
171 kvmppc_recalc_shadow_msr(vcpu);
172
173 if (msr & MSR_POW) {
174 if (!vcpu->arch.pending_exceptions) {
175 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100176 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000177 vcpu->stat.halt_wakeup++;
178
179 /* Unset POW bit after we woke up */
180 msr &= ~MSR_POW;
181 vcpu->arch.shared->msr = msr;
182 }
183 }
184
185 if ((vcpu->arch.shared->msr & (MSR_PR|MSR_IR|MSR_DR)) !=
186 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
187 kvmppc_mmu_flush_segments(vcpu);
188 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
189
190 /* Preload magic page segment when in kernel mode */
191 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
192 struct kvm_vcpu_arch *a = &vcpu->arch;
193
194 if (msr & MSR_DR)
195 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
196 else
197 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
198 }
199 }
200
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000201 /*
202 * When switching from 32 to 64-bit, we may have a stale 32-bit
203 * magic page around, we need to flush it. Typically 32-bit magic
204 * page will be instanciated when calling into RTAS. Note: We
205 * assume that such transition only happens while in kernel mode,
206 * ie, we never transition from user 32-bit to kernel 64-bit with
207 * a 32-bit magic page around.
208 */
209 if (vcpu->arch.magic_page_pa &&
210 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
211 /* going from RTAS to normal kernel code */
212 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
213 ~0xFFFUL);
214 }
215
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000216 /* Preload FPU if it's enabled */
217 if (vcpu->arch.shared->msr & MSR_FP)
218 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
219}
220
221void kvmppc_set_pvr(struct kvm_vcpu *vcpu, u32 pvr)
222{
223 u32 host_pvr;
224
225 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
226 vcpu->arch.pvr = pvr;
227#ifdef CONFIG_PPC_BOOK3S_64
228 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
229 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200230 if (!to_book3s(vcpu)->hior_explicit)
231 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000232 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200233 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000234 } else
235#endif
236 {
237 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200238 if (!to_book3s(vcpu)->hior_explicit)
239 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000240 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200241 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000242 }
243
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200244 kvmppc_sanity_check(vcpu);
245
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000246 /* If we are in hypervisor level on 970, we can tell the CPU to
247 * treat DCBZ as 32 bytes store */
248 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
249 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
250 !strcmp(cur_cpu_spec->platform, "ppc970"))
251 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
252
253 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
254 really needs them in a VM on Cell and force disable them. */
255 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
256 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
257
258#ifdef CONFIG_PPC_BOOK3S_32
259 /* 32 bit Book3S always has 32 byte dcbz */
260 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
261#endif
262
263 /* On some CPUs we can execute paired single operations natively */
264 asm ( "mfpvr %0" : "=r"(host_pvr));
265 switch (host_pvr) {
266 case 0x00080200: /* lonestar 2.0 */
267 case 0x00088202: /* lonestar 2.2 */
268 case 0x70000100: /* gekko 1.0 */
269 case 0x00080100: /* gekko 2.0 */
270 case 0x00083203: /* gekko 2.3a */
271 case 0x00083213: /* gekko 2.3b */
272 case 0x00083204: /* gekko 2.4 */
273 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
274 case 0x00087200: /* broadway */
275 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
276 /* Enable HID2.PSE - in case we need it later */
277 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
278 }
279}
280
281/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
282 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
283 * emulate 32 bytes dcbz length.
284 *
285 * The Book3s_64 inventors also realized this case and implemented a special bit
286 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
287 *
288 * My approach here is to patch the dcbz instruction on executing pages.
289 */
290static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
291{
292 struct page *hpage;
293 u64 hpage_offset;
294 u32 *page;
295 int i;
296
297 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800298 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000299 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000300
301 hpage_offset = pte->raddr & ~PAGE_MASK;
302 hpage_offset &= ~0xFFFULL;
303 hpage_offset /= 4;
304
305 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800306 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000307
308 /* patch dcbz into reserved instruction, so we trap */
309 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
310 if ((page[i] & 0xff0007ff) == INS_DCBZ)
311 page[i] &= 0xfffffff7;
312
Cong Wang2480b202011-11-25 23:14:16 +0800313 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000314 put_page(hpage);
315}
316
317static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
318{
319 ulong mp_pa = vcpu->arch.magic_page_pa;
320
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000321 if (!(vcpu->arch.shared->msr & MSR_SF))
322 mp_pa = (uint32_t)mp_pa;
323
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000324 if (unlikely(mp_pa) &&
325 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
326 return 1;
327 }
328
329 return kvm_is_visible_gfn(vcpu->kvm, gfn);
330}
331
332int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
333 ulong eaddr, int vec)
334{
335 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
336 int r = RESUME_GUEST;
337 int relocated;
338 int page_found = 0;
339 struct kvmppc_pte pte;
340 bool is_mmio = false;
341 bool dr = (vcpu->arch.shared->msr & MSR_DR) ? true : false;
342 bool ir = (vcpu->arch.shared->msr & MSR_IR) ? true : false;
343 u64 vsid;
344
345 relocated = data ? dr : ir;
346
347 /* Resolve real address if translation turned on */
348 if (relocated) {
349 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data);
350 } else {
351 pte.may_execute = true;
352 pte.may_read = true;
353 pte.may_write = true;
354 pte.raddr = eaddr & KVM_PAM;
355 pte.eaddr = eaddr;
356 pte.vpage = eaddr >> 12;
357 }
358
359 switch (vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) {
360 case 0:
361 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
362 break;
363 case MSR_DR:
364 case MSR_IR:
365 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
366
367 if ((vcpu->arch.shared->msr & (MSR_DR|MSR_IR)) == MSR_DR)
368 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
369 else
370 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
371 pte.vpage |= vsid;
372
373 if (vsid == -1)
374 page_found = -EINVAL;
375 break;
376 }
377
378 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
379 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
380 /*
381 * If we do the dcbz hack, we have to NX on every execution,
382 * so we can patch the executing code. This renders our guest
383 * NX-less.
384 */
385 pte.may_execute = !data;
386 }
387
388 if (page_found == -ENOENT) {
389 /* Page not found in guest PTE entries */
Alexander Graf468a12c2011-12-09 14:44:13 +0100390 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000391 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Alexander Graf468a12c2011-12-09 14:44:13 +0100392 vcpu->arch.shared->dsisr = svcpu->fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000393 vcpu->arch.shared->msr |=
Alexander Graf468a12c2011-12-09 14:44:13 +0100394 (svcpu->shadow_srr1 & 0x00000000f8000000ULL);
395 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000396 kvmppc_book3s_queue_irqprio(vcpu, vec);
397 } else if (page_found == -EPERM) {
398 /* Storage protection */
Alexander Graf468a12c2011-12-09 14:44:13 +0100399 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000400 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
Alexander Graf468a12c2011-12-09 14:44:13 +0100401 vcpu->arch.shared->dsisr = svcpu->fault_dsisr & ~DSISR_NOHPTE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000402 vcpu->arch.shared->dsisr |= DSISR_PROTFAULT;
403 vcpu->arch.shared->msr |=
Alexander Graf468a12c2011-12-09 14:44:13 +0100404 svcpu->shadow_srr1 & 0x00000000f8000000ULL;
405 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000406 kvmppc_book3s_queue_irqprio(vcpu, vec);
407 } else if (page_found == -EINVAL) {
408 /* Page not found in guest SLB */
409 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
410 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
411 } else if (!is_mmio &&
412 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
413 /* The guest's PTE is not mapped yet. Map on the host */
414 kvmppc_mmu_map_page(vcpu, &pte);
415 if (data)
416 vcpu->stat.sp_storage++;
417 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
418 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
419 kvmppc_patch_dcbz(vcpu, &pte);
420 } else {
421 /* MMIO */
422 vcpu->stat.mmio_exits++;
423 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100424 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000425 r = kvmppc_emulate_mmio(run, vcpu);
426 if ( r == RESUME_HOST_NV )
427 r = RESUME_HOST;
428 }
429
430 return r;
431}
432
433static inline int get_fpr_index(int i)
434{
Paul Mackerras28c483b2012-11-04 18:16:46 +0000435 return i * TS_FPRWIDTH;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000436}
437
438/* Give up external provider (FPU, Altivec, VSX) */
439void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
440{
441 struct thread_struct *t = &current->thread;
442 u64 *vcpu_fpr = vcpu->arch.fpr;
443#ifdef CONFIG_VSX
444 u64 *vcpu_vsx = vcpu->arch.vsr;
445#endif
446 u64 *thread_fpr = (u64*)t->fpr;
447 int i;
448
Paul Mackerras28c483b2012-11-04 18:16:46 +0000449 /*
450 * VSX instructions can access FP and vector registers, so if
451 * we are giving up VSX, make sure we give up FP and VMX as well.
452 */
453 if (msr & MSR_VSX)
454 msr |= MSR_FP | MSR_VEC;
455
456 msr &= vcpu->arch.guest_owned_ext;
457 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000458 return;
459
460#ifdef DEBUG_EXT
461 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
462#endif
463
Paul Mackerras28c483b2012-11-04 18:16:46 +0000464 if (msr & MSR_FP) {
465 /*
466 * Note that on CPUs with VSX, giveup_fpu stores
467 * both the traditional FP registers and the added VSX
468 * registers into thread.fpr[].
469 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000470 giveup_fpu(current);
471 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
472 vcpu_fpr[i] = thread_fpr[get_fpr_index(i)];
473
474 vcpu->arch.fpscr = t->fpscr.val;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000475
476#ifdef CONFIG_VSX
477 if (cpu_has_feature(CPU_FTR_VSX))
478 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
479 vcpu_vsx[i] = thread_fpr[get_fpr_index(i) + 1];
480#endif
481 }
482
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000483#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000484 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000485 giveup_altivec(current);
486 memcpy(vcpu->arch.vr, t->vr, sizeof(vcpu->arch.vr));
487 vcpu->arch.vscr = t->vscr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000488 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000489#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000490
Paul Mackerras28c483b2012-11-04 18:16:46 +0000491 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000492 kvmppc_recalc_shadow_msr(vcpu);
493}
494
495static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
496{
497 ulong srr0 = kvmppc_get_pc(vcpu);
498 u32 last_inst = kvmppc_get_last_inst(vcpu);
499 int ret;
500
501 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
502 if (ret == -ENOENT) {
503 ulong msr = vcpu->arch.shared->msr;
504
505 msr = kvmppc_set_field(msr, 33, 33, 1);
506 msr = kvmppc_set_field(msr, 34, 36, 0);
507 vcpu->arch.shared->msr = kvmppc_set_field(msr, 42, 47, 0);
508 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
509 return EMULATE_AGAIN;
510 }
511
512 return EMULATE_DONE;
513}
514
515static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
516{
517
518 /* Need to do paired single emulation? */
519 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
520 return EMULATE_DONE;
521
522 /* Read out the instruction */
523 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
524 /* Need to emulate */
525 return EMULATE_FAIL;
526
527 return EMULATE_AGAIN;
528}
529
530/* Handle external providers (FPU, Altivec, VSX) */
531static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
532 ulong msr)
533{
534 struct thread_struct *t = &current->thread;
535 u64 *vcpu_fpr = vcpu->arch.fpr;
536#ifdef CONFIG_VSX
537 u64 *vcpu_vsx = vcpu->arch.vsr;
538#endif
539 u64 *thread_fpr = (u64*)t->fpr;
540 int i;
541
542 /* When we have paired singles, we emulate in software */
543 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
544 return RESUME_GUEST;
545
546 if (!(vcpu->arch.shared->msr & msr)) {
547 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
548 return RESUME_GUEST;
549 }
550
Paul Mackerras28c483b2012-11-04 18:16:46 +0000551 if (msr == MSR_VSX) {
552 /* No VSX? Give an illegal instruction interrupt */
553#ifdef CONFIG_VSX
554 if (!cpu_has_feature(CPU_FTR_VSX))
555#endif
556 {
557 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
558 return RESUME_GUEST;
559 }
560
561 /*
562 * We have to load up all the FP and VMX registers before
563 * we can let the guest use VSX instructions.
564 */
565 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000566 }
567
Paul Mackerras28c483b2012-11-04 18:16:46 +0000568 /* See if we already own all the ext(s) needed */
569 msr &= ~vcpu->arch.guest_owned_ext;
570 if (!msr)
571 return RESUME_GUEST;
572
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000573#ifdef DEBUG_EXT
574 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
575#endif
576
577 current->thread.regs->msr |= msr;
578
Paul Mackerras28c483b2012-11-04 18:16:46 +0000579 if (msr & MSR_FP) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000580 for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++)
581 thread_fpr[get_fpr_index(i)] = vcpu_fpr[i];
Paul Mackerras28c483b2012-11-04 18:16:46 +0000582#ifdef CONFIG_VSX
583 for (i = 0; i < ARRAY_SIZE(vcpu->arch.vsr) / 2; i++)
584 thread_fpr[get_fpr_index(i) + 1] = vcpu_vsx[i];
585#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000586 t->fpscr.val = vcpu->arch.fpscr;
587 t->fpexc_mode = 0;
588 kvmppc_load_up_fpu();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000589 }
590
591 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000592#ifdef CONFIG_ALTIVEC
593 memcpy(t->vr, vcpu->arch.vr, sizeof(vcpu->arch.vr));
594 t->vscr = vcpu->arch.vscr;
595 t->vrsave = -1;
596 kvmppc_load_up_altivec();
597#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000598 }
599
600 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000601 kvmppc_recalc_shadow_msr(vcpu);
602
603 return RESUME_GUEST;
604}
605
606int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
607 unsigned int exit_nr)
608{
609 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200610 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000611
612 vcpu->stat.sum_exits++;
613
614 run->exit_reason = KVM_EXIT_UNKNOWN;
615 run->ready_for_interrupt_injection = 1;
616
Alexander Grafbd2be682012-08-13 01:04:19 +0200617 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +0200618
Alexander Graf97c95052012-08-02 15:10:00 +0200619 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200620 kvm_guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +0200621
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000622 switch (exit_nr) {
623 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +0100624 {
625 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
626 ulong shadow_srr1 = svcpu->shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000627 vcpu->stat.pf_instruc++;
628
629#ifdef CONFIG_PPC_BOOK3S_32
630 /* We set segments as unused segments when invalidating them. So
631 * treat the respective fault as segment fault. */
Alexander Graf468a12c2011-12-09 14:44:13 +0100632 if (svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT] == SR_INVALID) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000633 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
634 r = RESUME_GUEST;
Alexander Graf468a12c2011-12-09 14:44:13 +0100635 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000636 break;
637 }
638#endif
Alexander Graf468a12c2011-12-09 14:44:13 +0100639 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000640
641 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +0100642 if (shadow_srr1 & 0x40000000) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000643 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
644 vcpu->stat.sp_instruc++;
645 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
646 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
647 /*
648 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
649 * so we can't use the NX bit inside the guest. Let's cross our fingers,
650 * that no guest that needs the dcbz hack does NX.
651 */
652 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
653 r = RESUME_GUEST;
654 } else {
Alexander Graf468a12c2011-12-09 14:44:13 +0100655 vcpu->arch.shared->msr |= shadow_srr1 & 0x58000000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000656 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
657 r = RESUME_GUEST;
658 }
659 break;
Alexander Graf468a12c2011-12-09 14:44:13 +0100660 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000661 case BOOK3S_INTERRUPT_DATA_STORAGE:
662 {
663 ulong dar = kvmppc_get_fault_dar(vcpu);
Alexander Graf468a12c2011-12-09 14:44:13 +0100664 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
665 u32 fault_dsisr = svcpu->fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000666 vcpu->stat.pf_storage++;
667
668#ifdef CONFIG_PPC_BOOK3S_32
669 /* We set segments as unused segments when invalidating them. So
670 * treat the respective fault as segment fault. */
Alexander Graf468a12c2011-12-09 14:44:13 +0100671 if ((svcpu->sr[dar >> SID_SHIFT]) == SR_INVALID) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000672 kvmppc_mmu_map_segment(vcpu, dar);
673 r = RESUME_GUEST;
Alexander Graf468a12c2011-12-09 14:44:13 +0100674 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000675 break;
676 }
677#endif
Alexander Graf468a12c2011-12-09 14:44:13 +0100678 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000679
680 /* The only case we need to handle is missing shadow PTEs */
Alexander Graf468a12c2011-12-09 14:44:13 +0100681 if (fault_dsisr & DSISR_NOHPTE) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000682 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
683 } else {
684 vcpu->arch.shared->dar = dar;
Alexander Graf468a12c2011-12-09 14:44:13 +0100685 vcpu->arch.shared->dsisr = fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000686 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
687 r = RESUME_GUEST;
688 }
689 break;
690 }
691 case BOOK3S_INTERRUPT_DATA_SEGMENT:
692 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
693 vcpu->arch.shared->dar = kvmppc_get_fault_dar(vcpu);
694 kvmppc_book3s_queue_irqprio(vcpu,
695 BOOK3S_INTERRUPT_DATA_SEGMENT);
696 }
697 r = RESUME_GUEST;
698 break;
699 case BOOK3S_INTERRUPT_INST_SEGMENT:
700 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
701 kvmppc_book3s_queue_irqprio(vcpu,
702 BOOK3S_INTERRUPT_INST_SEGMENT);
703 }
704 r = RESUME_GUEST;
705 break;
706 /* We're good on these - the host merely wanted to get our attention */
707 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100708 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000709 vcpu->stat.dec_exits++;
710 r = RESUME_GUEST;
711 break;
712 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100713 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
714 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000715 vcpu->stat.ext_intr_exits++;
716 r = RESUME_GUEST;
717 break;
718 case BOOK3S_INTERRUPT_PERFMON:
719 r = RESUME_GUEST;
720 break;
721 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100722 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000723 {
724 enum emulation_result er;
Alexander Graf468a12c2011-12-09 14:44:13 +0100725 struct kvmppc_book3s_shadow_vcpu *svcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000726 ulong flags;
727
728program_interrupt:
Alexander Graf468a12c2011-12-09 14:44:13 +0100729 svcpu = svcpu_get(vcpu);
730 flags = svcpu->shadow_srr1 & 0x1f0000ull;
731 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000732
733 if (vcpu->arch.shared->msr & MSR_PR) {
734#ifdef EXIT_DEBUG
735 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
736#endif
737 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
738 (INS_DCBZ & 0xfffffff7)) {
739 kvmppc_core_queue_program(vcpu, flags);
740 r = RESUME_GUEST;
741 break;
742 }
743 }
744
745 vcpu->stat.emulated_inst_exits++;
746 er = kvmppc_emulate_instruction(run, vcpu);
747 switch (er) {
748 case EMULATE_DONE:
749 r = RESUME_GUEST_NV;
750 break;
751 case EMULATE_AGAIN:
752 r = RESUME_GUEST;
753 break;
754 case EMULATE_FAIL:
755 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
756 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
757 kvmppc_core_queue_program(vcpu, flags);
758 r = RESUME_GUEST;
759 break;
760 case EMULATE_DO_MMIO:
761 run->exit_reason = KVM_EXIT_MMIO;
762 r = RESUME_HOST_NV;
763 break;
764 default:
765 BUG();
766 }
767 break;
768 }
769 case BOOK3S_INTERRUPT_SYSCALL:
Alexander Grafa668f2b2011-08-08 17:26:24 +0200770 if (vcpu->arch.papr_enabled &&
771 (kvmppc_get_last_inst(vcpu) == 0x44000022) &&
772 !(vcpu->arch.shared->msr & MSR_PR)) {
773 /* SC 1 papr hypercalls */
774 ulong cmd = kvmppc_get_gpr(vcpu, 3);
775 int i;
776
Andreas Schwab96f38d72011-11-08 07:17:39 +0000777#ifdef CONFIG_KVM_BOOK3S_64_PR
Alexander Grafa668f2b2011-08-08 17:26:24 +0200778 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
779 r = RESUME_GUEST;
780 break;
781 }
Andreas Schwab96f38d72011-11-08 07:17:39 +0000782#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +0200783
784 run->papr_hcall.nr = cmd;
785 for (i = 0; i < 9; ++i) {
786 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
787 run->papr_hcall.args[i] = gpr;
788 }
789 run->exit_reason = KVM_EXIT_PAPR_HCALL;
790 vcpu->arch.hcall_needed = 1;
791 r = RESUME_HOST;
792 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000793 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
794 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
795 /* MOL hypercalls */
796 u64 *gprs = run->osi.gprs;
797 int i;
798
799 run->exit_reason = KVM_EXIT_OSI;
800 for (i = 0; i < 32; i++)
801 gprs[i] = kvmppc_get_gpr(vcpu, i);
802 vcpu->arch.osi_needed = 1;
803 r = RESUME_HOST_NV;
804 } else if (!(vcpu->arch.shared->msr & MSR_PR) &&
805 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
806 /* KVM PV hypercalls */
807 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
808 r = RESUME_GUEST;
809 } else {
810 /* Guest syscalls */
811 vcpu->stat.syscall_exits++;
812 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
813 r = RESUME_GUEST;
814 }
815 break;
816 case BOOK3S_INTERRUPT_FP_UNAVAIL:
817 case BOOK3S_INTERRUPT_ALTIVEC:
818 case BOOK3S_INTERRUPT_VSX:
819 {
820 int ext_msr = 0;
821
822 switch (exit_nr) {
823 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
824 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
825 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
826 }
827
828 switch (kvmppc_check_ext(vcpu, exit_nr)) {
829 case EMULATE_DONE:
830 /* everything ok - let's enable the ext */
831 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
832 break;
833 case EMULATE_FAIL:
834 /* we need to emulate this instruction */
835 goto program_interrupt;
836 break;
837 default:
838 /* nothing to worry about - go again */
839 break;
840 }
841 break;
842 }
843 case BOOK3S_INTERRUPT_ALIGNMENT:
844 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
845 vcpu->arch.shared->dsisr = kvmppc_alignment_dsisr(vcpu,
846 kvmppc_get_last_inst(vcpu));
847 vcpu->arch.shared->dar = kvmppc_alignment_dar(vcpu,
848 kvmppc_get_last_inst(vcpu));
849 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
850 }
851 r = RESUME_GUEST;
852 break;
853 case BOOK3S_INTERRUPT_MACHINE_CHECK:
854 case BOOK3S_INTERRUPT_TRACE:
855 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
856 r = RESUME_GUEST;
857 break;
858 default:
Alexander Graf468a12c2011-12-09 14:44:13 +0100859 {
860 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
861 ulong shadow_srr1 = svcpu->shadow_srr1;
862 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000863 /* Ugh - bork here! What did we get? */
864 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +0100865 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000866 r = RESUME_HOST;
867 BUG();
868 break;
869 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100870 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000871
872 if (!(r & RESUME_HOST)) {
873 /* To avoid clobbering exit_reason, only check for signals if
874 * we aren't already exiting to userspace for some other
875 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +0100876
877 /*
878 * Interrupts could be timers for the guest which we have to
879 * inject again, so let's postpone them until we're in the guest
880 * and if we really did time things so badly, then we just exit
881 * again due to a host external interrupt.
882 */
Alexander Grafbd2be682012-08-13 01:04:19 +0200883 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200884 s = kvmppc_prepare_to_enter(vcpu);
885 if (s <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +0200886 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200887 r = s;
Alexander Graf24afa372012-08-12 12:42:30 +0200888 } else {
Alexander Grafbd2be682012-08-13 01:04:19 +0200889 kvmppc_lazy_ee_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000890 }
891 }
892
893 trace_kvm_book3s_reenter(r, vcpu);
894
895 return r;
896}
897
898int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
899 struct kvm_sregs *sregs)
900{
901 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
902 int i;
903
904 sregs->pvr = vcpu->arch.pvr;
905
906 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
907 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
908 for (i = 0; i < 64; i++) {
909 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
910 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
911 }
912 } else {
913 for (i = 0; i < 16; i++)
914 sregs->u.s.ppc32.sr[i] = vcpu->arch.shared->sr[i];
915
916 for (i = 0; i < 8; i++) {
917 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
918 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
919 }
920 }
921
922 return 0;
923}
924
925int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
926 struct kvm_sregs *sregs)
927{
928 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
929 int i;
930
931 kvmppc_set_pvr(vcpu, sregs->pvr);
932
933 vcpu3s->sdr1 = sregs->u.s.sdr1;
934 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
935 for (i = 0; i < 64; i++) {
936 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
937 sregs->u.s.ppc64.slb[i].slbe);
938 }
939 } else {
940 for (i = 0; i < 16; i++) {
941 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
942 }
943 for (i = 0; i < 8; i++) {
944 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
945 (u32)sregs->u.s.ppc32.ibat[i]);
946 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
947 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
948 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
949 (u32)sregs->u.s.ppc32.dbat[i]);
950 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
951 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
952 }
953 }
954
955 /* Flush the MMU after messing with the segments */
956 kvmppc_mmu_pte_flush(vcpu, 0, 0);
957
958 return 0;
959}
960
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000961int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +0000962{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000963 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +0000964
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000965 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +0000966 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000967 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +0000968 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +0000969#ifdef CONFIG_VSX
970 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
971 long int i = id - KVM_REG_PPC_VSR0;
972
973 if (!cpu_has_feature(CPU_FTR_VSX)) {
974 r = -ENXIO;
975 break;
976 }
977 val->vsxval[0] = vcpu->arch.fpr[i];
978 val->vsxval[1] = vcpu->arch.vsr[i];
979 break;
980 }
981#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +0000982 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000983 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +0000984 break;
985 }
986
987 return r;
988}
989
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000990int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +0000991{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000992 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +0000993
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000994 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +0000995 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000996 to_book3s(vcpu)->hior = set_reg_val(id, *val);
997 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +0000998 break;
Paul Mackerrasa8bd19e2012-09-25 20:32:30 +0000999#ifdef CONFIG_VSX
1000 case KVM_REG_PPC_VSR0 ... KVM_REG_PPC_VSR31: {
1001 long int i = id - KVM_REG_PPC_VSR0;
1002
1003 if (!cpu_has_feature(CPU_FTR_VSX)) {
1004 r = -ENXIO;
1005 break;
1006 }
1007 vcpu->arch.fpr[i] = val->vsxval[0];
1008 vcpu->arch.vsr[i] = val->vsxval[1];
1009 break;
1010 }
1011#endif /* CONFIG_VSX */
Paul Mackerras31f34382011-12-12 12:26:50 +00001012 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001013 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001014 break;
1015 }
1016
1017 return r;
1018}
1019
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001020int kvmppc_core_check_processor_compat(void)
1021{
1022 return 0;
1023}
1024
1025struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1026{
1027 struct kvmppc_vcpu_book3s *vcpu_book3s;
1028 struct kvm_vcpu *vcpu;
1029 int err = -ENOMEM;
1030 unsigned long p;
1031
1032 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1033 if (!vcpu_book3s)
1034 goto out;
1035
1036 vcpu_book3s->shadow_vcpu = (struct kvmppc_book3s_shadow_vcpu *)
1037 kzalloc(sizeof(*vcpu_book3s->shadow_vcpu), GFP_KERNEL);
1038 if (!vcpu_book3s->shadow_vcpu)
1039 goto free_vcpu;
1040
1041 vcpu = &vcpu_book3s->vcpu;
1042 err = kvm_vcpu_init(vcpu, kvm, id);
1043 if (err)
1044 goto free_shadow_vcpu;
1045
1046 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
1047 /* the real shared page fills the last 4k of our page */
1048 vcpu->arch.shared = (void*)(p + PAGE_SIZE - 4096);
1049 if (!p)
1050 goto uninit_vcpu;
1051
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001052#ifdef CONFIG_PPC_BOOK3S_64
1053 /* default to book3s_64 (970fx) */
1054 vcpu->arch.pvr = 0x3C0301;
1055#else
1056 /* default to book3s_32 (750) */
1057 vcpu->arch.pvr = 0x84202;
1058#endif
1059 kvmppc_set_pvr(vcpu, vcpu->arch.pvr);
1060 vcpu->arch.slb_nr = 64;
1061
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001062 vcpu->arch.shadow_msr = MSR_USER64;
1063
1064 err = kvmppc_mmu_init(vcpu);
1065 if (err < 0)
1066 goto uninit_vcpu;
1067
1068 return vcpu;
1069
1070uninit_vcpu:
1071 kvm_vcpu_uninit(vcpu);
1072free_shadow_vcpu:
1073 kfree(vcpu_book3s->shadow_vcpu);
1074free_vcpu:
1075 vfree(vcpu_book3s);
1076out:
1077 return ERR_PTR(err);
1078}
1079
1080void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1081{
1082 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1083
1084 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1085 kvm_vcpu_uninit(vcpu);
1086 kfree(vcpu_book3s->shadow_vcpu);
1087 vfree(vcpu_book3s);
1088}
1089
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001090int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001091{
1092 int ret;
1093 double fpr[32][TS_FPRWIDTH];
1094 unsigned int fpscr;
1095 int fpexc_mode;
1096#ifdef CONFIG_ALTIVEC
1097 vector128 vr[32];
1098 vector128 vscr;
1099 unsigned long uninitialized_var(vrsave);
1100 int used_vr;
1101#endif
1102#ifdef CONFIG_VSX
1103 int used_vsr;
1104#endif
1105 ulong ext_msr;
1106
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001107 /* Check if we can run the vcpu at all */
1108 if (!vcpu->arch.sane) {
1109 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001110 ret = -EINVAL;
1111 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001112 }
1113
Alexander Grafe371f712011-12-19 13:36:55 +01001114 /*
1115 * Interrupts could be timers for the guest which we have to inject
1116 * again, so let's postpone them until we're in the guest and if we
1117 * really did time things so badly, then we just exit again due to
1118 * a host external interrupt.
1119 */
Alexander Grafbd2be682012-08-13 01:04:19 +02001120 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001121 ret = kvmppc_prepare_to_enter(vcpu);
1122 if (ret <= 0) {
Alexander Grafbd2be682012-08-13 01:04:19 +02001123 local_irq_enable();
Alexander Graf7d827142011-12-09 15:46:21 +01001124 goto out;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001125 }
1126
1127 /* Save FPU state in stack */
1128 if (current->thread.regs->msr & MSR_FP)
1129 giveup_fpu(current);
1130 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
1131 fpscr = current->thread.fpscr.val;
1132 fpexc_mode = current->thread.fpexc_mode;
1133
1134#ifdef CONFIG_ALTIVEC
1135 /* Save Altivec state in stack */
1136 used_vr = current->thread.used_vr;
1137 if (used_vr) {
1138 if (current->thread.regs->msr & MSR_VEC)
1139 giveup_altivec(current);
1140 memcpy(vr, current->thread.vr, sizeof(current->thread.vr));
1141 vscr = current->thread.vscr;
1142 vrsave = current->thread.vrsave;
1143 }
1144#endif
1145
1146#ifdef CONFIG_VSX
1147 /* Save VSX state in stack */
1148 used_vsr = current->thread.used_vsr;
1149 if (used_vsr && (current->thread.regs->msr & MSR_VSX))
Paul Mackerras28c483b2012-11-04 18:16:46 +00001150 __giveup_vsx(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001151#endif
1152
1153 /* Remember the MSR with disabled extensions */
1154 ext_msr = current->thread.regs->msr;
1155
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001156 /* Preload FPU if it's enabled */
1157 if (vcpu->arch.shared->msr & MSR_FP)
1158 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1159
Alexander Grafbd2be682012-08-13 01:04:19 +02001160 kvmppc_lazy_ee_enable();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001161
1162 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1163
Alexander Graf24afa372012-08-12 12:42:30 +02001164 /* No need for kvm_guest_exit. It's done in handle_exit.
1165 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001166
Paul Mackerras28c483b2012-11-04 18:16:46 +00001167 /* Make sure we save the guest FPU/Altivec/VSX state */
1168 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1169
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001170 current->thread.regs->msr = ext_msr;
1171
Paul Mackerras28c483b2012-11-04 18:16:46 +00001172 /* Restore FPU/VSX state from stack */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001173 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
1174 current->thread.fpscr.val = fpscr;
1175 current->thread.fpexc_mode = fpexc_mode;
1176
1177#ifdef CONFIG_ALTIVEC
1178 /* Restore Altivec state from stack */
1179 if (used_vr && current->thread.used_vr) {
1180 memcpy(current->thread.vr, vr, sizeof(current->thread.vr));
1181 current->thread.vscr = vscr;
1182 current->thread.vrsave = vrsave;
1183 }
1184 current->thread.used_vr = used_vr;
1185#endif
1186
1187#ifdef CONFIG_VSX
1188 current->thread.used_vsr = used_vsr;
1189#endif
1190
Alexander Graf7d827142011-12-09 15:46:21 +01001191out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001192 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001193 return ret;
1194}
1195
Paul Mackerras82ed3612011-12-15 02:03:22 +00001196/*
1197 * Get (and clear) the dirty memory log for a memory slot.
1198 */
1199int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm,
1200 struct kvm_dirty_log *log)
1201{
1202 struct kvm_memory_slot *memslot;
1203 struct kvm_vcpu *vcpu;
1204 ulong ga, ga_end;
1205 int is_dirty = 0;
1206 int r;
1207 unsigned long n;
1208
1209 mutex_lock(&kvm->slots_lock);
1210
1211 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1212 if (r)
1213 goto out;
1214
1215 /* If nothing is dirty, don't bother messing with page tables. */
1216 if (is_dirty) {
1217 memslot = id_to_memslot(kvm->memslots, log->slot);
1218
1219 ga = memslot->base_gfn << PAGE_SHIFT;
1220 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1221
1222 kvm_for_each_vcpu(n, vcpu, kvm)
1223 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1224
1225 n = kvm_dirty_bitmap_bytes(memslot);
1226 memset(memslot->dirty_bitmap, 0, n);
1227 }
1228
1229 r = 0;
1230out:
1231 mutex_unlock(&kvm->slots_lock);
1232 return r;
1233}
1234
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001235#ifdef CONFIG_PPC64
1236int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm, struct kvm_ppc_smmu_info *info)
1237{
1238 /* No flags */
1239 info->flags = 0;
1240
1241 /* SLB is always 64 entries */
1242 info->slb_size = 64;
1243
1244 /* Standard 4k base page size segment */
1245 info->sps[0].page_shift = 12;
1246 info->sps[0].slb_enc = 0;
1247 info->sps[0].enc[0].page_shift = 12;
1248 info->sps[0].enc[0].pte_enc = 0;
1249
1250 /* Standard 16M large page size segment */
1251 info->sps[1].page_shift = 24;
1252 info->sps[1].slb_enc = SLB_VSID_L;
1253 info->sps[1].enc[0].page_shift = 24;
1254 info->sps[1].enc[0].pte_enc = 0;
1255
1256 return 0;
1257}
1258#endif /* CONFIG_PPC64 */
1259
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001260void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1261 struct kvm_memory_slot *dont)
1262{
1263}
1264
1265int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1266 unsigned long npages)
1267{
1268 return 0;
1269}
1270
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001271int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001272 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001273 struct kvm_userspace_memory_region *mem)
1274{
1275 return 0;
1276}
1277
1278void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001279 struct kvm_userspace_memory_region *mem,
1280 struct kvm_memory_slot old)
1281{
1282}
1283
1284void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001285{
1286}
1287
Ian Munsiea413f472012-12-03 18:36:13 +00001288static unsigned int kvm_global_user_count = 0;
1289static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1290
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001291int kvmppc_core_init_vm(struct kvm *kvm)
1292{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001293#ifdef CONFIG_PPC64
1294 INIT_LIST_HEAD(&kvm->arch.spapr_tce_tables);
1295#endif
1296
Ian Munsiea413f472012-12-03 18:36:13 +00001297 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1298 spin_lock(&kvm_global_user_count_lock);
1299 if (++kvm_global_user_count == 1)
1300 pSeries_disable_reloc_on_exc();
1301 spin_unlock(&kvm_global_user_count_lock);
1302 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001303 return 0;
1304}
1305
1306void kvmppc_core_destroy_vm(struct kvm *kvm)
1307{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001308#ifdef CONFIG_PPC64
1309 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1310#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001311
1312 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1313 spin_lock(&kvm_global_user_count_lock);
1314 BUG_ON(kvm_global_user_count == 0);
1315 if (--kvm_global_user_count == 0)
1316 pSeries_enable_reloc_on_exc();
1317 spin_unlock(&kvm_global_user_count_lock);
1318 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001319}
1320
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001321static int kvmppc_book3s_init(void)
1322{
1323 int r;
1324
1325 r = kvm_init(NULL, sizeof(struct kvmppc_vcpu_book3s), 0,
1326 THIS_MODULE);
1327
1328 if (r)
1329 return r;
1330
1331 r = kvmppc_mmu_hpte_sysinit();
1332
1333 return r;
1334}
1335
1336static void kvmppc_book3s_exit(void)
1337{
1338 kvmppc_mmu_hpte_sysexit();
1339 kvm_exit();
1340}
1341
1342module_init(kvmppc_book3s_init);
1343module_exit(kvmppc_book3s_exit);