blob: 3da412e16b3a4fc22e988e5c3602a7f12fd65114 [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
31#include <asm/uaccess.h>
32#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Paul Mackerrasdeb26c22013-02-04 18:11:44 +000038#include <asm/hvcall.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000045
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053046#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053047
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000050
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
Alexander Graf616dff82014-04-29 16:48:44 +020056static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000057
58/* Some compatibility defines */
59#ifdef CONFIG_PPC_BOOK3S_32
60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE
63#endif
64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053065static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000066{
67#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010068 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
69 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010070 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +010071 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +010072 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000073#endif
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000074 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000075#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +100076 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000077#endif
78}
79
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053080static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000081{
82#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010083 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +010084 if (svcpu->in_use) {
85 kvmppc_copy_from_svcpu(vcpu, svcpu);
86 }
Alexander Graf468a12c2011-12-09 14:44:13 +010087 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010088 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
89 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000090#endif
91
Paul Mackerras28c483b2012-11-04 18:16:46 +000092 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +020093 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Paul Mackerrasa47d72f2012-09-20 19:35:51 +000094 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000095}
96
Paul Mackerrasa2d56022013-09-20 14:52:43 +100097/* Copy data needed by real-mode code from vcpu to shadow vcpu */
98void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
99 struct kvm_vcpu *vcpu)
100{
101 svcpu->gpr[0] = vcpu->arch.gpr[0];
102 svcpu->gpr[1] = vcpu->arch.gpr[1];
103 svcpu->gpr[2] = vcpu->arch.gpr[2];
104 svcpu->gpr[3] = vcpu->arch.gpr[3];
105 svcpu->gpr[4] = vcpu->arch.gpr[4];
106 svcpu->gpr[5] = vcpu->arch.gpr[5];
107 svcpu->gpr[6] = vcpu->arch.gpr[6];
108 svcpu->gpr[7] = vcpu->arch.gpr[7];
109 svcpu->gpr[8] = vcpu->arch.gpr[8];
110 svcpu->gpr[9] = vcpu->arch.gpr[9];
111 svcpu->gpr[10] = vcpu->arch.gpr[10];
112 svcpu->gpr[11] = vcpu->arch.gpr[11];
113 svcpu->gpr[12] = vcpu->arch.gpr[12];
114 svcpu->gpr[13] = vcpu->arch.gpr[13];
115 svcpu->cr = vcpu->arch.cr;
116 svcpu->xer = vcpu->arch.xer;
117 svcpu->ctr = vcpu->arch.ctr;
118 svcpu->lr = vcpu->arch.lr;
119 svcpu->pc = vcpu->arch.pc;
Alexander Graf616dff82014-04-29 16:48:44 +0200120#ifdef CONFIG_PPC_BOOK3S_64
121 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
122#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530123 /*
124 * Now also save the current time base value. We use this
125 * to find the guest purr and spurr value.
126 */
127 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530128 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530129 if (cpu_has_feature(CPU_FTR_ARCH_207S))
130 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100131 svcpu->in_use = true;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000132}
133
134/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
135void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
136 struct kvmppc_book3s_shadow_vcpu *svcpu)
137{
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100138 /*
139 * vcpu_put would just call us again because in_use hasn't
140 * been updated yet.
141 */
142 preempt_disable();
143
144 /*
145 * Maybe we were already preempted and synced the svcpu from
146 * our preempt notifiers. Don't bother touching this svcpu then.
147 */
148 if (!svcpu->in_use)
149 goto out;
150
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000151 vcpu->arch.gpr[0] = svcpu->gpr[0];
152 vcpu->arch.gpr[1] = svcpu->gpr[1];
153 vcpu->arch.gpr[2] = svcpu->gpr[2];
154 vcpu->arch.gpr[3] = svcpu->gpr[3];
155 vcpu->arch.gpr[4] = svcpu->gpr[4];
156 vcpu->arch.gpr[5] = svcpu->gpr[5];
157 vcpu->arch.gpr[6] = svcpu->gpr[6];
158 vcpu->arch.gpr[7] = svcpu->gpr[7];
159 vcpu->arch.gpr[8] = svcpu->gpr[8];
160 vcpu->arch.gpr[9] = svcpu->gpr[9];
161 vcpu->arch.gpr[10] = svcpu->gpr[10];
162 vcpu->arch.gpr[11] = svcpu->gpr[11];
163 vcpu->arch.gpr[12] = svcpu->gpr[12];
164 vcpu->arch.gpr[13] = svcpu->gpr[13];
165 vcpu->arch.cr = svcpu->cr;
166 vcpu->arch.xer = svcpu->xer;
167 vcpu->arch.ctr = svcpu->ctr;
168 vcpu->arch.lr = svcpu->lr;
169 vcpu->arch.pc = svcpu->pc;
170 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
171 vcpu->arch.fault_dar = svcpu->fault_dar;
172 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
173 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200174#ifdef CONFIG_PPC_BOOK3S_64
175 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
176#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530177 /*
178 * Update purr and spurr using time base on exit.
179 */
180 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
181 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530182 vcpu->arch.vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530183 if (cpu_has_feature(CPU_FTR_ARCH_207S))
184 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100185 svcpu->in_use = false;
186
187out:
188 preempt_enable();
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000189}
190
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530191static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200192{
Alexander Graf7c973a22012-08-13 12:50:35 +0200193 int r = 1; /* Indicate we want to get back into the guest */
194
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200195 /* We misuse TLB_FLUSH to indicate that we want to clear
196 all shadow cache entries */
197 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
198 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200199
200 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200201}
202
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200203/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000204static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
205 unsigned long end)
206{
207 long i;
208 struct kvm_vcpu *vcpu;
209 struct kvm_memslots *slots;
210 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200211
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000212 slots = kvm_memslots(kvm);
213 kvm_for_each_memslot(memslot, slots) {
214 unsigned long hva_start, hva_end;
215 gfn_t gfn, gfn_end;
216
217 hva_start = max(start, memslot->userspace_addr);
218 hva_end = min(end, memslot->userspace_addr +
219 (memslot->npages << PAGE_SHIFT));
220 if (hva_start >= hva_end)
221 continue;
222 /*
223 * {gfn(page) | page intersects with [hva_start, hva_end)} =
224 * {gfn, gfn+1, ..., gfn_end-1}.
225 */
226 gfn = hva_to_gfn_memslot(hva_start, memslot);
227 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
228 kvm_for_each_vcpu(i, vcpu, kvm)
229 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
230 gfn_end << PAGE_SHIFT);
231 }
232}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200233
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530234static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200235{
236 trace_kvm_unmap_hva(hva);
237
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000238 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200239
240 return 0;
241}
242
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530243static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
244 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200245{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000246 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200247
248 return 0;
249}
250
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530251static int kvm_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200252{
253 /* XXX could be more clever ;) */
254 return 0;
255}
256
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530257static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200258{
259 /* XXX could be more clever ;) */
260 return 0;
261}
262
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530263static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200264{
265 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000266 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200267}
268
269/*****************************************/
270
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000271static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
272{
Alexander Graf5deb8e72014-04-24 13:46:24 +0200273 ulong guest_msr = kvmppc_get_msr(vcpu);
274 ulong smsr = guest_msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000275
276 /* Guest MSR values */
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +0530277 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000278 /* Process MSR values */
279 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
280 /* External providers the guest reserved */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200281 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000282 /* 64-bit Process MSR values */
283#ifdef CONFIG_PPC_BOOK3S_64
284 smsr |= MSR_ISF | MSR_HV;
285#endif
286 vcpu->arch.shadow_msr = smsr;
287}
288
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530289static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000290{
Alexander Graf5deb8e72014-04-24 13:46:24 +0200291 ulong old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000292
293#ifdef EXIT_DEBUG
294 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
295#endif
296
297 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200298 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000299 kvmppc_recalc_shadow_msr(vcpu);
300
301 if (msr & MSR_POW) {
302 if (!vcpu->arch.pending_exceptions) {
303 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100304 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000305 vcpu->stat.halt_wakeup++;
306
307 /* Unset POW bit after we woke up */
308 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200309 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000310 }
311 }
312
Alexander Graf5deb8e72014-04-24 13:46:24 +0200313 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000314 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
315 kvmppc_mmu_flush_segments(vcpu);
316 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
317
318 /* Preload magic page segment when in kernel mode */
319 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
320 struct kvm_vcpu_arch *a = &vcpu->arch;
321
322 if (msr & MSR_DR)
323 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
324 else
325 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
326 }
327 }
328
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000329 /*
330 * When switching from 32 to 64-bit, we may have a stale 32-bit
331 * magic page around, we need to flush it. Typically 32-bit magic
332 * page will be instanciated when calling into RTAS. Note: We
333 * assume that such transition only happens while in kernel mode,
334 * ie, we never transition from user 32-bit to kernel 64-bit with
335 * a 32-bit magic page around.
336 */
337 if (vcpu->arch.magic_page_pa &&
338 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
339 /* going from RTAS to normal kernel code */
340 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
341 ~0xFFFUL);
342 }
343
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000344 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200345 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000346 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
347}
348
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530349void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000350{
351 u32 host_pvr;
352
353 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
354 vcpu->arch.pvr = pvr;
355#ifdef CONFIG_PPC_BOOK3S_64
356 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
357 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200358 if (!to_book3s(vcpu)->hior_explicit)
359 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000360 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200361 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000362 } else
363#endif
364 {
365 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200366 if (!to_book3s(vcpu)->hior_explicit)
367 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000368 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200369 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000370 }
371
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200372 kvmppc_sanity_check(vcpu);
373
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000374 /* If we are in hypervisor level on 970, we can tell the CPU to
375 * treat DCBZ as 32 bytes store */
376 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
377 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
378 !strcmp(cur_cpu_spec->platform, "ppc970"))
379 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
380
381 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
382 really needs them in a VM on Cell and force disable them. */
383 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
384 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
385
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000386 /*
387 * If they're asking for POWER6 or later, set the flag
388 * indicating that we can do multiple large page sizes
389 * and 1TB segments.
390 * Also set the flag that indicates that tlbie has the large
391 * page bit in the RB operand instead of the instruction.
392 */
393 switch (PVR_VER(pvr)) {
394 case PVR_POWER6:
395 case PVR_POWER7:
396 case PVR_POWER7p:
397 case PVR_POWER8:
398 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
399 BOOK3S_HFLAG_NEW_TLBIE;
400 break;
401 }
402
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000403#ifdef CONFIG_PPC_BOOK3S_32
404 /* 32 bit Book3S always has 32 byte dcbz */
405 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
406#endif
407
408 /* On some CPUs we can execute paired single operations natively */
409 asm ( "mfpvr %0" : "=r"(host_pvr));
410 switch (host_pvr) {
411 case 0x00080200: /* lonestar 2.0 */
412 case 0x00088202: /* lonestar 2.2 */
413 case 0x70000100: /* gekko 1.0 */
414 case 0x00080100: /* gekko 2.0 */
415 case 0x00083203: /* gekko 2.3a */
416 case 0x00083213: /* gekko 2.3b */
417 case 0x00083204: /* gekko 2.4 */
418 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
419 case 0x00087200: /* broadway */
420 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
421 /* Enable HID2.PSE - in case we need it later */
422 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
423 }
424}
425
426/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
427 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
428 * emulate 32 bytes dcbz length.
429 *
430 * The Book3s_64 inventors also realized this case and implemented a special bit
431 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
432 *
433 * My approach here is to patch the dcbz instruction on executing pages.
434 */
435static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
436{
437 struct page *hpage;
438 u64 hpage_offset;
439 u32 *page;
440 int i;
441
442 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800443 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000444 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000445
446 hpage_offset = pte->raddr & ~PAGE_MASK;
447 hpage_offset &= ~0xFFFULL;
448 hpage_offset /= 4;
449
450 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800451 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000452
453 /* patch dcbz into reserved instruction, so we trap */
454 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200455 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
456 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000457
Cong Wang2480b202011-11-25 23:14:16 +0800458 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000459 put_page(hpage);
460}
461
462static int kvmppc_visible_gfn(struct kvm_vcpu *vcpu, gfn_t gfn)
463{
464 ulong mp_pa = vcpu->arch.magic_page_pa;
465
Alexander Graf5deb8e72014-04-24 13:46:24 +0200466 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000467 mp_pa = (uint32_t)mp_pa;
468
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000469 if (unlikely(mp_pa) &&
470 unlikely((mp_pa & KVM_PAM) >> PAGE_SHIFT == gfn)) {
471 return 1;
472 }
473
474 return kvm_is_visible_gfn(vcpu->kvm, gfn);
475}
476
477int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
478 ulong eaddr, int vec)
479{
480 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000481 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000482 int r = RESUME_GUEST;
483 int relocated;
484 int page_found = 0;
485 struct kvmppc_pte pte;
486 bool is_mmio = false;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200487 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
488 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000489 u64 vsid;
490
491 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000492 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
493 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000494
495 /* Resolve real address if translation turned on */
496 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000497 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000498 } else {
499 pte.may_execute = true;
500 pte.may_read = true;
501 pte.may_write = true;
502 pte.raddr = eaddr & KVM_PAM;
503 pte.eaddr = eaddr;
504 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000505 pte.page_size = MMU_PAGE_64K;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000506 }
507
Alexander Graf5deb8e72014-04-24 13:46:24 +0200508 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000509 case 0:
510 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
511 break;
512 case MSR_DR:
513 case MSR_IR:
514 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
515
Alexander Graf5deb8e72014-04-24 13:46:24 +0200516 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000517 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
518 else
519 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
520 pte.vpage |= vsid;
521
522 if (vsid == -1)
523 page_found = -EINVAL;
524 break;
525 }
526
527 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
528 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
529 /*
530 * If we do the dcbz hack, we have to NX on every execution,
531 * so we can patch the executing code. This renders our guest
532 * NX-less.
533 */
534 pte.may_execute = !data;
535 }
536
537 if (page_found == -ENOENT) {
538 /* Page not found in guest PTE entries */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200539 u64 ssrr1 = vcpu->arch.shadow_srr1;
540 u64 msr = kvmppc_get_msr(vcpu);
541 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
542 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
543 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000544 kvmppc_book3s_queue_irqprio(vcpu, vec);
545 } else if (page_found == -EPERM) {
546 /* Storage protection */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200547 u32 dsisr = vcpu->arch.fault_dsisr;
548 u64 ssrr1 = vcpu->arch.shadow_srr1;
549 u64 msr = kvmppc_get_msr(vcpu);
550 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
551 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
552 kvmppc_set_dsisr(vcpu, dsisr);
553 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000554 kvmppc_book3s_queue_irqprio(vcpu, vec);
555 } else if (page_found == -EINVAL) {
556 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200557 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000558 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
559 } else if (!is_mmio &&
560 kvmppc_visible_gfn(vcpu, pte.raddr >> PAGE_SHIFT)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000561 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
562 /*
563 * There is already a host HPTE there, presumably
564 * a read-only one for a page the guest thinks
565 * is writable, so get rid of it first.
566 */
567 kvmppc_mmu_unmap_page(vcpu, &pte);
568 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000569 /* The guest's PTE is not mapped yet. Map on the host */
Paul Mackerras93b159b2013-09-20 14:52:51 +1000570 kvmppc_mmu_map_page(vcpu, &pte, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000571 if (data)
572 vcpu->stat.sp_storage++;
573 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000574 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000575 kvmppc_patch_dcbz(vcpu, &pte);
576 } else {
577 /* MMIO */
578 vcpu->stat.mmio_exits++;
579 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100580 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000581 r = kvmppc_emulate_mmio(run, vcpu);
582 if ( r == RESUME_HOST_NV )
583 r = RESUME_HOST;
584 }
585
586 return r;
587}
588
589static inline int get_fpr_index(int i)
590{
Paul Mackerras28c483b2012-11-04 18:16:46 +0000591 return i * TS_FPRWIDTH;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000592}
593
594/* Give up external provider (FPU, Altivec, VSX) */
595void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
596{
597 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000598
Paul Mackerras28c483b2012-11-04 18:16:46 +0000599 /*
600 * VSX instructions can access FP and vector registers, so if
601 * we are giving up VSX, make sure we give up FP and VMX as well.
602 */
603 if (msr & MSR_VSX)
604 msr |= MSR_FP | MSR_VEC;
605
606 msr &= vcpu->arch.guest_owned_ext;
607 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000608 return;
609
610#ifdef DEBUG_EXT
611 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
612#endif
613
Paul Mackerras28c483b2012-11-04 18:16:46 +0000614 if (msr & MSR_FP) {
615 /*
616 * Note that on CPUs with VSX, giveup_fpu stores
617 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000618 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000619 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100620 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000621 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100622 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000623 }
624
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000625#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000626 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000627 if (current->thread.regs->msr & MSR_VEC)
628 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100629 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000630 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000631#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000632
Paul Mackerras28c483b2012-11-04 18:16:46 +0000633 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000634 kvmppc_recalc_shadow_msr(vcpu);
635}
636
Alexander Graf616dff82014-04-29 16:48:44 +0200637/* Give up facility (TAR / EBB / DSCR) */
638static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
639{
640#ifdef CONFIG_PPC_BOOK3S_64
641 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
642 /* Facility not available to the guest, ignore giveup request*/
643 return;
644 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200645
646 switch (fac) {
647 case FSCR_TAR_LG:
648 vcpu->arch.tar = mfspr(SPRN_TAR);
649 mtspr(SPRN_TAR, current->thread.tar);
650 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
651 break;
652 }
Alexander Graf616dff82014-04-29 16:48:44 +0200653#endif
654}
655
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000656static int kvmppc_read_inst(struct kvm_vcpu *vcpu)
657{
658 ulong srr0 = kvmppc_get_pc(vcpu);
659 u32 last_inst = kvmppc_get_last_inst(vcpu);
660 int ret;
661
662 ret = kvmppc_ld(vcpu, &srr0, sizeof(u32), &last_inst, false);
663 if (ret == -ENOENT) {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200664 ulong msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000665
666 msr = kvmppc_set_field(msr, 33, 33, 1);
667 msr = kvmppc_set_field(msr, 34, 36, 0);
Alexander Graf5deb8e72014-04-24 13:46:24 +0200668 msr = kvmppc_set_field(msr, 42, 47, 0);
669 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000670 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_INST_STORAGE);
671 return EMULATE_AGAIN;
672 }
673
674 return EMULATE_DONE;
675}
676
677static int kvmppc_check_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr)
678{
679
680 /* Need to do paired single emulation? */
681 if (!(vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE))
682 return EMULATE_DONE;
683
684 /* Read out the instruction */
685 if (kvmppc_read_inst(vcpu) == EMULATE_DONE)
686 /* Need to emulate */
687 return EMULATE_FAIL;
688
689 return EMULATE_AGAIN;
690}
691
692/* Handle external providers (FPU, Altivec, VSX) */
693static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
694 ulong msr)
695{
696 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000697
698 /* When we have paired singles, we emulate in software */
699 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
700 return RESUME_GUEST;
701
Alexander Graf5deb8e72014-04-24 13:46:24 +0200702 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000703 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
704 return RESUME_GUEST;
705 }
706
Paul Mackerras28c483b2012-11-04 18:16:46 +0000707 if (msr == MSR_VSX) {
708 /* No VSX? Give an illegal instruction interrupt */
709#ifdef CONFIG_VSX
710 if (!cpu_has_feature(CPU_FTR_VSX))
711#endif
712 {
713 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
714 return RESUME_GUEST;
715 }
716
717 /*
718 * We have to load up all the FP and VMX registers before
719 * we can let the guest use VSX instructions.
720 */
721 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000722 }
723
Paul Mackerras28c483b2012-11-04 18:16:46 +0000724 /* See if we already own all the ext(s) needed */
725 msr &= ~vcpu->arch.guest_owned_ext;
726 if (!msr)
727 return RESUME_GUEST;
728
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000729#ifdef DEBUG_EXT
730 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
731#endif
732
Paul Mackerras28c483b2012-11-04 18:16:46 +0000733 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530734 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100735 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100736 load_fp_state(&vcpu->arch.fp);
737 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530738 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000739 }
740
741 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000742#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530743 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100744 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100745 load_vr_state(&vcpu->arch.vr);
746 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530747 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000748#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000749 }
750
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100751 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000752 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000753 kvmppc_recalc_shadow_msr(vcpu);
754
755 return RESUME_GUEST;
756}
757
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000758/*
759 * Kernel code using FP or VMX could have flushed guest state to
760 * the thread_struct; if so, get it back now.
761 */
762static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
763{
764 unsigned long lost_ext;
765
766 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
767 if (!lost_ext)
768 return;
769
Paul Mackerras09548fd2013-10-15 20:43:01 +1100770 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530771 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100772 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100773 load_fp_state(&vcpu->arch.fp);
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530774 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100775 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000776#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100777 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530778 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100779 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100780 load_vr_state(&vcpu->arch.vr);
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530781 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100782 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000783#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000784 current->thread.regs->msr |= lost_ext;
785}
786
Alexander Graf616dff82014-04-29 16:48:44 +0200787#ifdef CONFIG_PPC_BOOK3S_64
788
789static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
790{
791 /* Inject the Interrupt Cause field and trigger a guest interrupt */
792 vcpu->arch.fscr &= ~(0xffULL << 56);
793 vcpu->arch.fscr |= (fac << 56);
794 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
795}
796
797static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
798{
799 enum emulation_result er = EMULATE_FAIL;
800
801 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
802 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
803
804 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
805 /* Couldn't emulate, trigger interrupt in guest */
806 kvmppc_trigger_fac_interrupt(vcpu, fac);
807 }
808}
809
810/* Enable facilities (TAR, EBB, DSCR) for the guest */
811static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
812{
Alexander Graf9916d572014-04-29 17:54:40 +0200813 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +0200814 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
815
Alexander Graf9916d572014-04-29 17:54:40 +0200816 /*
817 * Not every facility is enabled by FSCR bits, check whether the
818 * guest has this facility enabled at all.
819 */
820 switch (fac) {
821 case FSCR_TAR_LG:
822 case FSCR_EBB_LG:
823 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
824 break;
825 case FSCR_TM_LG:
826 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
827 break;
828 default:
829 guest_fac_enabled = false;
830 break;
831 }
832
833 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +0200834 /* Facility not enabled by the guest */
835 kvmppc_trigger_fac_interrupt(vcpu, fac);
836 return RESUME_GUEST;
837 }
838
839 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +0200840 case FSCR_TAR_LG:
841 /* TAR switching isn't lazy in Linux yet */
842 current->thread.tar = mfspr(SPRN_TAR);
843 mtspr(SPRN_TAR, vcpu->arch.tar);
844 vcpu->arch.shadow_fscr |= FSCR_TAR;
845 break;
Alexander Graf616dff82014-04-29 16:48:44 +0200846 default:
847 kvmppc_emulate_fac(vcpu, fac);
848 break;
849 }
850
851 return RESUME_GUEST;
852}
853#endif
854
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530855int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
856 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000857{
858 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200859 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000860
861 vcpu->stat.sum_exits++;
862
863 run->exit_reason = KVM_EXIT_UNKNOWN;
864 run->ready_for_interrupt_injection = 1;
865
Alexander Grafbd2be682012-08-13 01:04:19 +0200866 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +0200867
Alexander Graf97c95052012-08-02 15:10:00 +0200868 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200869 kvm_guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +0200870
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000871 switch (exit_nr) {
872 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +0100873 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000874 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000875 vcpu->stat.pf_instruc++;
876
877#ifdef CONFIG_PPC_BOOK3S_32
878 /* We set segments as unused segments when invalidating them. So
879 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000880 {
881 struct kvmppc_book3s_shadow_vcpu *svcpu;
882 u32 sr;
883
884 svcpu = svcpu_get(vcpu);
885 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100886 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000887 if (sr == SR_INVALID) {
888 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
889 r = RESUME_GUEST;
890 break;
891 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000892 }
893#endif
894
895 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +0100896 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000897 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000898 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000899 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000900 vcpu->stat.sp_instruc++;
901 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
902 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
903 /*
904 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
905 * so we can't use the NX bit inside the guest. Let's cross our fingers,
906 * that no guest that needs the dcbz hack does NX.
907 */
908 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
909 r = RESUME_GUEST;
910 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200911 u64 msr = kvmppc_get_msr(vcpu);
912 msr |= shadow_srr1 & 0x58000000;
913 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000914 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
915 r = RESUME_GUEST;
916 }
917 break;
Alexander Graf468a12c2011-12-09 14:44:13 +0100918 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000919 case BOOK3S_INTERRUPT_DATA_STORAGE:
920 {
921 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000922 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000923 vcpu->stat.pf_storage++;
924
925#ifdef CONFIG_PPC_BOOK3S_32
926 /* We set segments as unused segments when invalidating them. So
927 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000928 {
929 struct kvmppc_book3s_shadow_vcpu *svcpu;
930 u32 sr;
931
932 svcpu = svcpu_get(vcpu);
933 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +0100934 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000935 if (sr == SR_INVALID) {
936 kvmppc_mmu_map_segment(vcpu, dar);
937 r = RESUME_GUEST;
938 break;
939 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000940 }
941#endif
942
Paul Mackerras93b159b2013-09-20 14:52:51 +1000943 /*
944 * We need to handle missing shadow PTEs, and
945 * protection faults due to us mapping a page read-only
946 * when the guest thinks it is writable.
947 */
948 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
949 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000950 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000951 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000952 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200953 kvmppc_set_dar(vcpu, dar);
954 kvmppc_set_dsisr(vcpu, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000955 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
956 r = RESUME_GUEST;
957 }
958 break;
959 }
960 case BOOK3S_INTERRUPT_DATA_SEGMENT:
961 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +0200962 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000963 kvmppc_book3s_queue_irqprio(vcpu,
964 BOOK3S_INTERRUPT_DATA_SEGMENT);
965 }
966 r = RESUME_GUEST;
967 break;
968 case BOOK3S_INTERRUPT_INST_SEGMENT:
969 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
970 kvmppc_book3s_queue_irqprio(vcpu,
971 BOOK3S_INTERRUPT_INST_SEGMENT);
972 }
973 r = RESUME_GUEST;
974 break;
975 /* We're good on these - the host merely wanted to get our attention */
976 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100977 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +1100978 case BOOK3S_INTERRUPT_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000979 vcpu->stat.dec_exits++;
980 r = RESUME_GUEST;
981 break;
982 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100983 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
984 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000985 vcpu->stat.ext_intr_exits++;
986 r = RESUME_GUEST;
987 break;
988 case BOOK3S_INTERRUPT_PERFMON:
989 r = RESUME_GUEST;
990 break;
991 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +0100992 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000993 {
994 enum emulation_result er;
995 ulong flags;
996
997program_interrupt:
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000998 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000999
Alexander Graf5deb8e72014-04-24 13:46:24 +02001000 if (kvmppc_get_msr(vcpu) & MSR_PR) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001001#ifdef EXIT_DEBUG
1002 printk(KERN_INFO "Userspace triggered 0x700 exception at 0x%lx (0x%x)\n", kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
1003#endif
1004 if ((kvmppc_get_last_inst(vcpu) & 0xff0007ff) !=
1005 (INS_DCBZ & 0xfffffff7)) {
1006 kvmppc_core_queue_program(vcpu, flags);
1007 r = RESUME_GUEST;
1008 break;
1009 }
1010 }
1011
1012 vcpu->stat.emulated_inst_exits++;
1013 er = kvmppc_emulate_instruction(run, vcpu);
1014 switch (er) {
1015 case EMULATE_DONE:
1016 r = RESUME_GUEST_NV;
1017 break;
1018 case EMULATE_AGAIN:
1019 r = RESUME_GUEST;
1020 break;
1021 case EMULATE_FAIL:
1022 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
1023 __func__, kvmppc_get_pc(vcpu), kvmppc_get_last_inst(vcpu));
1024 kvmppc_core_queue_program(vcpu, flags);
1025 r = RESUME_GUEST;
1026 break;
1027 case EMULATE_DO_MMIO:
1028 run->exit_reason = KVM_EXIT_MMIO;
1029 r = RESUME_HOST_NV;
1030 break;
Bharat Bhushanc402a3f2013-04-08 00:32:13 +00001031 case EMULATE_EXIT_USER:
Alexander Graf50c7bb82012-12-14 23:42:05 +01001032 r = RESUME_HOST_NV;
1033 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001034 default:
1035 BUG();
1036 }
1037 break;
1038 }
1039 case BOOK3S_INTERRUPT_SYSCALL:
Alexander Grafa668f2b2011-08-08 17:26:24 +02001040 if (vcpu->arch.papr_enabled &&
Paul Mackerras8b23de22013-08-06 14:15:19 +10001041 (kvmppc_get_last_sc(vcpu) == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001042 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001043 /* SC 1 papr hypercalls */
1044 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1045 int i;
1046
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301047#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001048 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1049 r = RESUME_GUEST;
1050 break;
1051 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001052#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001053
1054 run->papr_hcall.nr = cmd;
1055 for (i = 0; i < 9; ++i) {
1056 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1057 run->papr_hcall.args[i] = gpr;
1058 }
1059 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1060 vcpu->arch.hcall_needed = 1;
1061 r = RESUME_HOST;
1062 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001063 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1064 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1065 /* MOL hypercalls */
1066 u64 *gprs = run->osi.gprs;
1067 int i;
1068
1069 run->exit_reason = KVM_EXIT_OSI;
1070 for (i = 0; i < 32; i++)
1071 gprs[i] = kvmppc_get_gpr(vcpu, i);
1072 vcpu->arch.osi_needed = 1;
1073 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001074 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001075 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1076 /* KVM PV hypercalls */
1077 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1078 r = RESUME_GUEST;
1079 } else {
1080 /* Guest syscalls */
1081 vcpu->stat.syscall_exits++;
1082 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1083 r = RESUME_GUEST;
1084 }
1085 break;
1086 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1087 case BOOK3S_INTERRUPT_ALTIVEC:
1088 case BOOK3S_INTERRUPT_VSX:
1089 {
1090 int ext_msr = 0;
1091
1092 switch (exit_nr) {
1093 case BOOK3S_INTERRUPT_FP_UNAVAIL: ext_msr = MSR_FP; break;
1094 case BOOK3S_INTERRUPT_ALTIVEC: ext_msr = MSR_VEC; break;
1095 case BOOK3S_INTERRUPT_VSX: ext_msr = MSR_VSX; break;
1096 }
1097
1098 switch (kvmppc_check_ext(vcpu, exit_nr)) {
1099 case EMULATE_DONE:
1100 /* everything ok - let's enable the ext */
1101 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
1102 break;
1103 case EMULATE_FAIL:
1104 /* we need to emulate this instruction */
1105 goto program_interrupt;
1106 break;
1107 default:
1108 /* nothing to worry about - go again */
1109 break;
1110 }
1111 break;
1112 }
1113 case BOOK3S_INTERRUPT_ALIGNMENT:
1114 if (kvmppc_read_inst(vcpu) == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001115 u32 last_inst = kvmppc_get_last_inst(vcpu);
1116 u32 dsisr;
1117 u64 dar;
1118
1119 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1120 dar = kvmppc_alignment_dar(vcpu, last_inst);
1121
1122 kvmppc_set_dsisr(vcpu, dsisr);
1123 kvmppc_set_dar(vcpu, dar);
1124
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001125 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1126 }
1127 r = RESUME_GUEST;
1128 break;
Alexander Graf616dff82014-04-29 16:48:44 +02001129#ifdef CONFIG_PPC_BOOK3S_64
1130 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1131 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1132 r = RESUME_GUEST;
1133 break;
1134#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001135 case BOOK3S_INTERRUPT_MACHINE_CHECK:
1136 case BOOK3S_INTERRUPT_TRACE:
1137 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1138 r = RESUME_GUEST;
1139 break;
1140 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001141 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001142 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001143 /* Ugh - bork here! What did we get? */
1144 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001145 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001146 r = RESUME_HOST;
1147 BUG();
1148 break;
1149 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001150 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001151
1152 if (!(r & RESUME_HOST)) {
1153 /* To avoid clobbering exit_reason, only check for signals if
1154 * we aren't already exiting to userspace for some other
1155 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001156
1157 /*
1158 * Interrupts could be timers for the guest which we have to
1159 * inject again, so let's postpone them until we're in the guest
1160 * and if we really did time things so badly, then we just exit
1161 * again due to a host external interrupt.
1162 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001163 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001164 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001165 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001166 else {
1167 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001168 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001169 }
Scott Wood6c85f522014-01-09 19:18:40 -06001170
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001171 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001172 }
1173
1174 trace_kvm_book3s_reenter(r, vcpu);
1175
1176 return r;
1177}
1178
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301179static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1180 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001181{
1182 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1183 int i;
1184
1185 sregs->pvr = vcpu->arch.pvr;
1186
1187 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1188 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1189 for (i = 0; i < 64; i++) {
1190 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1191 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1192 }
1193 } else {
1194 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001195 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001196
1197 for (i = 0; i < 8; i++) {
1198 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1199 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1200 }
1201 }
1202
1203 return 0;
1204}
1205
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301206static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1207 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001208{
1209 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1210 int i;
1211
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301212 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001213
1214 vcpu3s->sdr1 = sregs->u.s.sdr1;
1215 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1216 for (i = 0; i < 64; i++) {
1217 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1218 sregs->u.s.ppc64.slb[i].slbe);
1219 }
1220 } else {
1221 for (i = 0; i < 16; i++) {
1222 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1223 }
1224 for (i = 0; i < 8; i++) {
1225 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1226 (u32)sregs->u.s.ppc32.ibat[i]);
1227 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1228 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1229 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1230 (u32)sregs->u.s.ppc32.dbat[i]);
1231 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1232 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1233 }
1234 }
1235
1236 /* Flush the MMU after messing with the segments */
1237 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1238
1239 return 0;
1240}
1241
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301242static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1243 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001244{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001245 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001246
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001247 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001248 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001249 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001250 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301251 case KVM_REG_PPC_LPCR:
1252 /*
1253 * We are only interested in the LPCR_ILE bit
1254 */
1255 if (vcpu->arch.intr_msr & MSR_LE)
1256 *val = get_reg_val(id, LPCR_ILE);
1257 else
1258 *val = get_reg_val(id, 0);
1259 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001260 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001261 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001262 break;
1263 }
1264
1265 return r;
1266}
1267
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301268static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1269{
1270 if (new_lpcr & LPCR_ILE)
1271 vcpu->arch.intr_msr |= MSR_LE;
1272 else
1273 vcpu->arch.intr_msr &= ~MSR_LE;
1274}
1275
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301276static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1277 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001278{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001279 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001280
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001281 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001282 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001283 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1284 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001285 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301286 case KVM_REG_PPC_LPCR:
1287 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1288 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001289 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001290 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001291 break;
1292 }
1293
1294 return r;
1295}
1296
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301297static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1298 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001299{
1300 struct kvmppc_vcpu_book3s *vcpu_book3s;
1301 struct kvm_vcpu *vcpu;
1302 int err = -ENOMEM;
1303 unsigned long p;
1304
Paul Mackerras3ff95502013-09-20 14:52:49 +10001305 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1306 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001307 goto out;
1308
Paul Mackerras3ff95502013-09-20 14:52:49 +10001309 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1310 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001311 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001312 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001313
Alexander Grafab784752014-04-06 23:31:48 +02001314#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001315 vcpu->arch.shadow_vcpu =
1316 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1317 if (!vcpu->arch.shadow_vcpu)
1318 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001319#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001320
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001321 err = kvm_vcpu_init(vcpu, kvm, id);
1322 if (err)
1323 goto free_shadow_vcpu;
1324
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001325 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001326 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001327 if (!p)
1328 goto uninit_vcpu;
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001329 /* the real shared page fills the last 4k of our page */
1330 vcpu->arch.shared = (void *)(p + PAGE_SIZE - 4096);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001331#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001332 /* Always start the shared struct in native endian mode */
1333#ifdef __BIG_ENDIAN__
1334 vcpu->arch.shared_big_endian = true;
1335#else
1336 vcpu->arch.shared_big_endian = false;
1337#endif
1338
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001339 /*
1340 * Default to the same as the host if we're on sufficiently
1341 * recent machine that we have 1TB segments;
1342 * otherwise default to PPC970FX.
1343 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001344 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001345 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1346 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301347 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001348#else
1349 /* default to book3s_32 (750) */
1350 vcpu->arch.pvr = 0x84202;
1351#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301352 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001353 vcpu->arch.slb_nr = 64;
1354
Alexander Graf94810ba2014-04-24 13:04:01 +02001355 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001356
1357 err = kvmppc_mmu_init(vcpu);
1358 if (err < 0)
1359 goto uninit_vcpu;
1360
1361 return vcpu;
1362
1363uninit_vcpu:
1364 kvm_vcpu_uninit(vcpu);
1365free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001366#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001367 kfree(vcpu->arch.shadow_vcpu);
1368free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001369#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001370 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001371free_vcpu:
1372 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001373out:
1374 return ERR_PTR(err);
1375}
1376
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301377static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001378{
1379 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1380
1381 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1382 kvm_vcpu_uninit(vcpu);
Alexander Grafab784752014-04-06 23:31:48 +02001383#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001384 kfree(vcpu->arch.shadow_vcpu);
1385#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001386 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001387 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001388}
1389
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301390static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001391{
1392 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001393#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001394 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001395#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001396
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001397 /* Check if we can run the vcpu at all */
1398 if (!vcpu->arch.sane) {
1399 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001400 ret = -EINVAL;
1401 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001402 }
1403
Alexander Grafe371f712011-12-19 13:36:55 +01001404 /*
1405 * Interrupts could be timers for the guest which we have to inject
1406 * again, so let's postpone them until we're in the guest and if we
1407 * really did time things so badly, then we just exit again due to
1408 * a host external interrupt.
1409 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001410 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001411 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001412 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001413 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001414
Paul Mackerras99dae3b2013-10-15 20:43:03 +11001415 /* Save FPU state in thread_struct */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001416 if (current->thread.regs->msr & MSR_FP)
1417 giveup_fpu(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001418
1419#ifdef CONFIG_ALTIVEC
Paul Mackerras99dae3b2013-10-15 20:43:03 +11001420 /* Save Altivec state in thread_struct */
1421 if (current->thread.regs->msr & MSR_VEC)
1422 giveup_altivec(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001423#endif
1424
1425#ifdef CONFIG_VSX
Paul Mackerras99dae3b2013-10-15 20:43:03 +11001426 /* Save VSX state in thread_struct */
1427 if (current->thread.regs->msr & MSR_VSX)
Paul Mackerras28c483b2012-11-04 18:16:46 +00001428 __giveup_vsx(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001429#endif
1430
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001431 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001432 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001433 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1434
Scott Wood5f1c2482013-07-10 17:47:39 -05001435 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001436
1437 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1438
Alexander Graf24afa372012-08-12 12:42:30 +02001439 /* No need for kvm_guest_exit. It's done in handle_exit.
1440 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001441
Paul Mackerras28c483b2012-11-04 18:16:46 +00001442 /* Make sure we save the guest FPU/Altivec/VSX state */
1443 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1444
Alexander Grafe14e7a12014-04-22 12:26:58 +02001445 /* Make sure we save the guest TAR/EBB/DSCR state */
1446 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1447
Alexander Graf7d827142011-12-09 15:46:21 +01001448out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001449 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001450 return ret;
1451}
1452
Paul Mackerras82ed3612011-12-15 02:03:22 +00001453/*
1454 * Get (and clear) the dirty memory log for a memory slot.
1455 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301456static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1457 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001458{
1459 struct kvm_memory_slot *memslot;
1460 struct kvm_vcpu *vcpu;
1461 ulong ga, ga_end;
1462 int is_dirty = 0;
1463 int r;
1464 unsigned long n;
1465
1466 mutex_lock(&kvm->slots_lock);
1467
1468 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1469 if (r)
1470 goto out;
1471
1472 /* If nothing is dirty, don't bother messing with page tables. */
1473 if (is_dirty) {
1474 memslot = id_to_memslot(kvm->memslots, log->slot);
1475
1476 ga = memslot->base_gfn << PAGE_SHIFT;
1477 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1478
1479 kvm_for_each_vcpu(n, vcpu, kvm)
1480 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1481
1482 n = kvm_dirty_bitmap_bytes(memslot);
1483 memset(memslot->dirty_bitmap, 0, n);
1484 }
1485
1486 r = 0;
1487out:
1488 mutex_unlock(&kvm->slots_lock);
1489 return r;
1490}
1491
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301492static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1493 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001494{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301495 return;
1496}
1497
1498static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1499 struct kvm_memory_slot *memslot,
1500 struct kvm_userspace_memory_region *mem)
1501{
1502 return 0;
1503}
1504
1505static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
1506 struct kvm_userspace_memory_region *mem,
1507 const struct kvm_memory_slot *old)
1508{
1509 return;
1510}
1511
1512static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1513 struct kvm_memory_slot *dont)
1514{
1515 return;
1516}
1517
1518static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1519 unsigned long npages)
1520{
1521 return 0;
1522}
1523
1524
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001525#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301526static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1527 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001528{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001529 long int i;
1530 struct kvm_vcpu *vcpu;
1531
1532 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001533
1534 /* SLB is always 64 entries */
1535 info->slb_size = 64;
1536
1537 /* Standard 4k base page size segment */
1538 info->sps[0].page_shift = 12;
1539 info->sps[0].slb_enc = 0;
1540 info->sps[0].enc[0].page_shift = 12;
1541 info->sps[0].enc[0].pte_enc = 0;
1542
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001543 /*
1544 * 64k large page size.
1545 * We only want to put this in if the CPUs we're emulating
1546 * support it, but unfortunately we don't have a vcpu easily
1547 * to hand here to test. Just pick the first vcpu, and if
1548 * that doesn't exist yet, report the minimum capability,
1549 * i.e., no 64k pages.
1550 * 1T segment support goes along with 64k pages.
1551 */
1552 i = 1;
1553 vcpu = kvm_get_vcpu(kvm, 0);
1554 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1555 info->flags = KVM_PPC_1T_SEGMENTS;
1556 info->sps[i].page_shift = 16;
1557 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1558 info->sps[i].enc[0].page_shift = 16;
1559 info->sps[i].enc[0].pte_enc = 1;
1560 ++i;
1561 }
1562
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001563 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001564 info->sps[i].page_shift = 24;
1565 info->sps[i].slb_enc = SLB_VSID_L;
1566 info->sps[i].enc[0].page_shift = 24;
1567 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001568
1569 return 0;
1570}
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301571#else
1572static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1573 struct kvm_ppc_smmu_info *info)
1574{
1575 /* We should not get called */
1576 BUG();
1577}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001578#endif /* CONFIG_PPC64 */
1579
Ian Munsiea413f472012-12-03 18:36:13 +00001580static unsigned int kvm_global_user_count = 0;
1581static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1582
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301583static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001584{
Paul Mackerras9308ab82013-09-20 14:52:48 +10001585 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001586
Ian Munsiea413f472012-12-03 18:36:13 +00001587 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1588 spin_lock(&kvm_global_user_count_lock);
1589 if (++kvm_global_user_count == 1)
1590 pSeries_disable_reloc_on_exc();
1591 spin_unlock(&kvm_global_user_count_lock);
1592 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001593 return 0;
1594}
1595
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301596static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001597{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001598#ifdef CONFIG_PPC64
1599 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1600#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001601
1602 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1603 spin_lock(&kvm_global_user_count_lock);
1604 BUG_ON(kvm_global_user_count == 0);
1605 if (--kvm_global_user_count == 0)
1606 pSeries_enable_reloc_on_exc();
1607 spin_unlock(&kvm_global_user_count_lock);
1608 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001609}
1610
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301611static int kvmppc_core_check_processor_compat_pr(void)
1612{
1613 /* we are always compatible */
1614 return 0;
1615}
1616
1617static long kvm_arch_vm_ioctl_pr(struct file *filp,
1618 unsigned int ioctl, unsigned long arg)
1619{
1620 return -ENOTTY;
1621}
1622
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301623static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301624 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1625 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1626 .get_one_reg = kvmppc_get_one_reg_pr,
1627 .set_one_reg = kvmppc_set_one_reg_pr,
1628 .vcpu_load = kvmppc_core_vcpu_load_pr,
1629 .vcpu_put = kvmppc_core_vcpu_put_pr,
1630 .set_msr = kvmppc_set_msr_pr,
1631 .vcpu_run = kvmppc_vcpu_run_pr,
1632 .vcpu_create = kvmppc_core_vcpu_create_pr,
1633 .vcpu_free = kvmppc_core_vcpu_free_pr,
1634 .check_requests = kvmppc_core_check_requests_pr,
1635 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1636 .flush_memslot = kvmppc_core_flush_memslot_pr,
1637 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1638 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1639 .unmap_hva = kvm_unmap_hva_pr,
1640 .unmap_hva_range = kvm_unmap_hva_range_pr,
1641 .age_hva = kvm_age_hva_pr,
1642 .test_age_hva = kvm_test_age_hva_pr,
1643 .set_spte_hva = kvm_set_spte_hva_pr,
1644 .mmu_destroy = kvmppc_mmu_destroy_pr,
1645 .free_memslot = kvmppc_core_free_memslot_pr,
1646 .create_memslot = kvmppc_core_create_memslot_pr,
1647 .init_vm = kvmppc_core_init_vm_pr,
1648 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301649 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1650 .emulate_op = kvmppc_core_emulate_op_pr,
1651 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1652 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1653 .fast_vcpu_kick = kvm_vcpu_kick,
1654 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
1655};
1656
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301657
1658int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001659{
1660 int r;
1661
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301662 r = kvmppc_core_check_processor_compat_pr();
1663 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001664 return r;
1665
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301666 kvm_ops_pr.owner = THIS_MODULE;
1667 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001668
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301669 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001670 return r;
1671}
1672
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301673void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001674{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301675 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001676 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001677}
1678
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301679/*
1680 * We only support separate modules for book3s 64
1681 */
1682#ifdef CONFIG_PPC_BOOK3S_64
1683
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301684module_init(kvmppc_book3s_init_pr);
1685module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301686
1687MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01001688MODULE_ALIAS_MISCDEV(KVM_MINOR);
1689MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301690#endif