blob: f026b062c0ed22881b000d5bff5605f23bb8f5ed [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080031#include <linux/uaccess.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000032#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +100038#include <asm/setup.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000045
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053046#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053047
48#define CREATE_TRACE_POINTS
49#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000050
51/* #define EXIT_DEBUG */
52/* #define DEBUG_EXT */
53
54static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
55 ulong msr);
Alexander Graf616dff82014-04-29 16:48:44 +020056static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000057
58/* Some compatibility defines */
59#ifdef CONFIG_PPC_BOOK3S_32
60#define MSR_USER32 MSR_USER
61#define MSR_USER64 MSR_USER
62#define HW_PAGE_SIZE PAGE_SIZE
63#endif
64
Alexander Grafc01e3f62014-07-11 02:58:58 +020065static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
66{
67 ulong msr = kvmppc_get_msr(vcpu);
68 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
69}
70
71static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
72{
73 ulong msr = kvmppc_get_msr(vcpu);
74 ulong pc = kvmppc_get_pc(vcpu);
75
76 /* We are in DR only split real mode */
77 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
78 return;
79
80 /* We have not fixed up the guest already */
81 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
82 return;
83
84 /* The code is in fixupable address space */
85 if (pc & SPLIT_HACK_MASK)
86 return;
87
88 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
89 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
90}
91
92void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
93
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053094static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000095{
96#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +010097 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
98 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +010099 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100100 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +0100101 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000102#endif
Alexander Graffb4188b2014-06-09 01:16:32 +0200103
104 /* Disable AIL if supported */
105 if (cpu_has_feature(CPU_FTR_HVMODE) &&
106 cpu_has_feature(CPU_FTR_ARCH_207S))
107 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
108
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000109 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000110#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +1000111 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000112#endif
Alexander Grafc01e3f62014-07-11 02:58:58 +0200113
114 if (kvmppc_is_split_real(vcpu))
115 kvmppc_fixup_split_real(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000116}
117
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530118static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000119{
120#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100121 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100122 if (svcpu->in_use) {
123 kvmppc_copy_from_svcpu(vcpu, svcpu);
124 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100125 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100126 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
127 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000128#endif
129
Alexander Grafc01e3f62014-07-11 02:58:58 +0200130 if (kvmppc_is_split_real(vcpu))
131 kvmppc_unfixup_split_real(vcpu);
132
Paul Mackerras28c483b2012-11-04 18:16:46 +0000133 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +0200134 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Alexander Graffb4188b2014-06-09 01:16:32 +0200135
136 /* Enable AIL if supported */
137 if (cpu_has_feature(CPU_FTR_HVMODE) &&
138 cpu_has_feature(CPU_FTR_ARCH_207S))
139 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
140
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000141 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000142}
143
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000144/* Copy data needed by real-mode code from vcpu to shadow vcpu */
145void kvmppc_copy_to_svcpu(struct kvmppc_book3s_shadow_vcpu *svcpu,
146 struct kvm_vcpu *vcpu)
147{
148 svcpu->gpr[0] = vcpu->arch.gpr[0];
149 svcpu->gpr[1] = vcpu->arch.gpr[1];
150 svcpu->gpr[2] = vcpu->arch.gpr[2];
151 svcpu->gpr[3] = vcpu->arch.gpr[3];
152 svcpu->gpr[4] = vcpu->arch.gpr[4];
153 svcpu->gpr[5] = vcpu->arch.gpr[5];
154 svcpu->gpr[6] = vcpu->arch.gpr[6];
155 svcpu->gpr[7] = vcpu->arch.gpr[7];
156 svcpu->gpr[8] = vcpu->arch.gpr[8];
157 svcpu->gpr[9] = vcpu->arch.gpr[9];
158 svcpu->gpr[10] = vcpu->arch.gpr[10];
159 svcpu->gpr[11] = vcpu->arch.gpr[11];
160 svcpu->gpr[12] = vcpu->arch.gpr[12];
161 svcpu->gpr[13] = vcpu->arch.gpr[13];
162 svcpu->cr = vcpu->arch.cr;
163 svcpu->xer = vcpu->arch.xer;
164 svcpu->ctr = vcpu->arch.ctr;
165 svcpu->lr = vcpu->arch.lr;
166 svcpu->pc = vcpu->arch.pc;
Alexander Graf616dff82014-04-29 16:48:44 +0200167#ifdef CONFIG_PPC_BOOK3S_64
168 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
169#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530170 /*
171 * Now also save the current time base value. We use this
172 * to find the guest purr and spurr value.
173 */
174 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530175 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530176 if (cpu_has_feature(CPU_FTR_ARCH_207S))
177 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100178 svcpu->in_use = true;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000179}
180
181/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
182void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu,
183 struct kvmppc_book3s_shadow_vcpu *svcpu)
184{
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100185 /*
186 * vcpu_put would just call us again because in_use hasn't
187 * been updated yet.
188 */
189 preempt_disable();
190
191 /*
192 * Maybe we were already preempted and synced the svcpu from
193 * our preempt notifiers. Don't bother touching this svcpu then.
194 */
195 if (!svcpu->in_use)
196 goto out;
197
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000198 vcpu->arch.gpr[0] = svcpu->gpr[0];
199 vcpu->arch.gpr[1] = svcpu->gpr[1];
200 vcpu->arch.gpr[2] = svcpu->gpr[2];
201 vcpu->arch.gpr[3] = svcpu->gpr[3];
202 vcpu->arch.gpr[4] = svcpu->gpr[4];
203 vcpu->arch.gpr[5] = svcpu->gpr[5];
204 vcpu->arch.gpr[6] = svcpu->gpr[6];
205 vcpu->arch.gpr[7] = svcpu->gpr[7];
206 vcpu->arch.gpr[8] = svcpu->gpr[8];
207 vcpu->arch.gpr[9] = svcpu->gpr[9];
208 vcpu->arch.gpr[10] = svcpu->gpr[10];
209 vcpu->arch.gpr[11] = svcpu->gpr[11];
210 vcpu->arch.gpr[12] = svcpu->gpr[12];
211 vcpu->arch.gpr[13] = svcpu->gpr[13];
212 vcpu->arch.cr = svcpu->cr;
213 vcpu->arch.xer = svcpu->xer;
214 vcpu->arch.ctr = svcpu->ctr;
215 vcpu->arch.lr = svcpu->lr;
216 vcpu->arch.pc = svcpu->pc;
217 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
218 vcpu->arch.fault_dar = svcpu->fault_dar;
219 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
220 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200221#ifdef CONFIG_PPC_BOOK3S_64
222 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
223#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530224 /*
225 * Update purr and spurr using time base on exit.
226 */
227 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
228 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000229 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530230 if (cpu_has_feature(CPU_FTR_ARCH_207S))
231 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100232 svcpu->in_use = false;
233
234out:
235 preempt_enable();
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000236}
237
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530238static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200239{
Alexander Graf7c973a22012-08-13 12:50:35 +0200240 int r = 1; /* Indicate we want to get back into the guest */
241
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200242 /* We misuse TLB_FLUSH to indicate that we want to clear
243 all shadow cache entries */
244 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
245 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200246
247 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200248}
249
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200250/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000251static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
252 unsigned long end)
253{
254 long i;
255 struct kvm_vcpu *vcpu;
256 struct kvm_memslots *slots;
257 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200258
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000259 slots = kvm_memslots(kvm);
260 kvm_for_each_memslot(memslot, slots) {
261 unsigned long hva_start, hva_end;
262 gfn_t gfn, gfn_end;
263
264 hva_start = max(start, memslot->userspace_addr);
265 hva_end = min(end, memslot->userspace_addr +
266 (memslot->npages << PAGE_SHIFT));
267 if (hva_start >= hva_end)
268 continue;
269 /*
270 * {gfn(page) | page intersects with [hva_start, hva_end)} =
271 * {gfn, gfn+1, ..., gfn_end-1}.
272 */
273 gfn = hva_to_gfn_memslot(hva_start, memslot);
274 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
275 kvm_for_each_vcpu(i, vcpu, kvm)
276 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
277 gfn_end << PAGE_SHIFT);
278 }
279}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200280
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530281static int kvm_unmap_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200282{
283 trace_kvm_unmap_hva(hva);
284
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000285 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200286
287 return 0;
288}
289
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530290static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
291 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200292{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000293 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200294
295 return 0;
296}
297
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700298static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
299 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200300{
301 /* XXX could be more clever ;) */
302 return 0;
303}
304
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530305static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200306{
307 /* XXX could be more clever ;) */
308 return 0;
309}
310
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530311static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200312{
313 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000314 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200315}
316
317/*****************************************/
318
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000319static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
320{
Alexander Graf5deb8e72014-04-24 13:46:24 +0200321 ulong guest_msr = kvmppc_get_msr(vcpu);
322 ulong smsr = guest_msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000323
324 /* Guest MSR values */
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +0530325 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000326 /* Process MSR values */
327 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
328 /* External providers the guest reserved */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200329 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000330 /* 64-bit Process MSR values */
331#ifdef CONFIG_PPC_BOOK3S_64
332 smsr |= MSR_ISF | MSR_HV;
333#endif
334 vcpu->arch.shadow_msr = smsr;
335}
336
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530337static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000338{
Alexander Graf5deb8e72014-04-24 13:46:24 +0200339 ulong old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000340
341#ifdef EXIT_DEBUG
342 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
343#endif
344
345 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200346 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000347 kvmppc_recalc_shadow_msr(vcpu);
348
349 if (msr & MSR_POW) {
350 if (!vcpu->arch.pending_exceptions) {
351 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100352 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000353 vcpu->stat.halt_wakeup++;
354
355 /* Unset POW bit after we woke up */
356 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200357 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000358 }
359 }
360
Alexander Grafc01e3f62014-07-11 02:58:58 +0200361 if (kvmppc_is_split_real(vcpu))
362 kvmppc_fixup_split_real(vcpu);
363 else
364 kvmppc_unfixup_split_real(vcpu);
365
Alexander Graf5deb8e72014-04-24 13:46:24 +0200366 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000367 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
368 kvmppc_mmu_flush_segments(vcpu);
369 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
370
371 /* Preload magic page segment when in kernel mode */
372 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
373 struct kvm_vcpu_arch *a = &vcpu->arch;
374
375 if (msr & MSR_DR)
376 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
377 else
378 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
379 }
380 }
381
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000382 /*
383 * When switching from 32 to 64-bit, we may have a stale 32-bit
384 * magic page around, we need to flush it. Typically 32-bit magic
385 * page will be instanciated when calling into RTAS. Note: We
386 * assume that such transition only happens while in kernel mode,
387 * ie, we never transition from user 32-bit to kernel 64-bit with
388 * a 32-bit magic page around.
389 */
390 if (vcpu->arch.magic_page_pa &&
391 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
392 /* going from RTAS to normal kernel code */
393 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
394 ~0xFFFUL);
395 }
396
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000397 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200398 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000399 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
400}
401
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530402void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000403{
404 u32 host_pvr;
405
406 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
407 vcpu->arch.pvr = pvr;
408#ifdef CONFIG_PPC_BOOK3S_64
409 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
410 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200411 if (!to_book3s(vcpu)->hior_explicit)
412 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000413 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200414 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000415 } else
416#endif
417 {
418 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200419 if (!to_book3s(vcpu)->hior_explicit)
420 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000421 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200422 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000423 }
424
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200425 kvmppc_sanity_check(vcpu);
426
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000427 /* If we are in hypervisor level on 970, we can tell the CPU to
428 * treat DCBZ as 32 bytes store */
429 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
430 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
431 !strcmp(cur_cpu_spec->platform, "ppc970"))
432 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
433
434 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
435 really needs them in a VM on Cell and force disable them. */
436 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
437 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
438
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000439 /*
440 * If they're asking for POWER6 or later, set the flag
441 * indicating that we can do multiple large page sizes
442 * and 1TB segments.
443 * Also set the flag that indicates that tlbie has the large
444 * page bit in the RB operand instead of the instruction.
445 */
446 switch (PVR_VER(pvr)) {
447 case PVR_POWER6:
448 case PVR_POWER7:
449 case PVR_POWER7p:
450 case PVR_POWER8:
Thomas Huth2365f6b2016-09-21 13:53:46 +0200451 case PVR_POWER8E:
452 case PVR_POWER8NVL:
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000453 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
454 BOOK3S_HFLAG_NEW_TLBIE;
455 break;
456 }
457
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000458#ifdef CONFIG_PPC_BOOK3S_32
459 /* 32 bit Book3S always has 32 byte dcbz */
460 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
461#endif
462
463 /* On some CPUs we can execute paired single operations natively */
464 asm ( "mfpvr %0" : "=r"(host_pvr));
465 switch (host_pvr) {
466 case 0x00080200: /* lonestar 2.0 */
467 case 0x00088202: /* lonestar 2.2 */
468 case 0x70000100: /* gekko 1.0 */
469 case 0x00080100: /* gekko 2.0 */
470 case 0x00083203: /* gekko 2.3a */
471 case 0x00083213: /* gekko 2.3b */
472 case 0x00083204: /* gekko 2.4 */
473 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
474 case 0x00087200: /* broadway */
475 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
476 /* Enable HID2.PSE - in case we need it later */
477 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
478 }
479}
480
481/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
482 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
483 * emulate 32 bytes dcbz length.
484 *
485 * The Book3s_64 inventors also realized this case and implemented a special bit
486 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
487 *
488 * My approach here is to patch the dcbz instruction on executing pages.
489 */
490static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
491{
492 struct page *hpage;
493 u64 hpage_offset;
494 u32 *page;
495 int i;
496
497 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800498 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000499 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000500
501 hpage_offset = pte->raddr & ~PAGE_MASK;
502 hpage_offset &= ~0xFFFULL;
503 hpage_offset /= 4;
504
505 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800506 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000507
508 /* patch dcbz into reserved instruction, so we trap */
509 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200510 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
511 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000512
Cong Wang2480b202011-11-25 23:14:16 +0800513 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000514 put_page(hpage);
515}
516
Yaowei Bai378b4172015-11-16 11:10:24 +0800517static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000518{
519 ulong mp_pa = vcpu->arch.magic_page_pa;
520
Alexander Graf5deb8e72014-04-24 13:46:24 +0200521 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000522 mp_pa = (uint32_t)mp_pa;
523
Alexander Graf89b68c92014-07-13 16:37:12 +0200524 gpa &= ~0xFFFULL;
525 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
Yaowei Bai378b4172015-11-16 11:10:24 +0800526 return true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000527 }
528
Alexander Graf89b68c92014-07-13 16:37:12 +0200529 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000530}
531
532int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
533 ulong eaddr, int vec)
534{
535 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000536 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000537 int r = RESUME_GUEST;
538 int relocated;
539 int page_found = 0;
Alexey Kardashevskiy96df2262017-03-24 17:49:22 +1100540 struct kvmppc_pte pte = { 0 };
Alexander Graf5deb8e72014-04-24 13:46:24 +0200541 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
542 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000543 u64 vsid;
544
545 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000546 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
547 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000548
549 /* Resolve real address if translation turned on */
550 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000551 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000552 } else {
553 pte.may_execute = true;
554 pte.may_read = true;
555 pte.may_write = true;
556 pte.raddr = eaddr & KVM_PAM;
557 pte.eaddr = eaddr;
558 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000559 pte.page_size = MMU_PAGE_64K;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000560 }
561
Alexander Graf5deb8e72014-04-24 13:46:24 +0200562 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000563 case 0:
564 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
565 break;
566 case MSR_DR:
Alexander Grafc01e3f62014-07-11 02:58:58 +0200567 if (!data &&
568 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
569 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
570 pte.raddr &= ~SPLIT_HACK_MASK;
571 /* fall through */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000572 case MSR_IR:
573 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
574
Alexander Graf5deb8e72014-04-24 13:46:24 +0200575 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000576 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
577 else
578 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
579 pte.vpage |= vsid;
580
581 if (vsid == -1)
582 page_found = -EINVAL;
583 break;
584 }
585
586 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
587 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
588 /*
589 * If we do the dcbz hack, we have to NX on every execution,
590 * so we can patch the executing code. This renders our guest
591 * NX-less.
592 */
593 pte.may_execute = !data;
594 }
595
596 if (page_found == -ENOENT) {
597 /* Page not found in guest PTE entries */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200598 u64 ssrr1 = vcpu->arch.shadow_srr1;
599 u64 msr = kvmppc_get_msr(vcpu);
600 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
601 kvmppc_set_dsisr(vcpu, vcpu->arch.fault_dsisr);
602 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000603 kvmppc_book3s_queue_irqprio(vcpu, vec);
604 } else if (page_found == -EPERM) {
605 /* Storage protection */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200606 u32 dsisr = vcpu->arch.fault_dsisr;
607 u64 ssrr1 = vcpu->arch.shadow_srr1;
608 u64 msr = kvmppc_get_msr(vcpu);
609 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
610 dsisr = (dsisr & ~DSISR_NOHPTE) | DSISR_PROTFAULT;
611 kvmppc_set_dsisr(vcpu, dsisr);
612 kvmppc_set_msr_fast(vcpu, msr | (ssrr1 & 0xf8000000ULL));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000613 kvmppc_book3s_queue_irqprio(vcpu, vec);
614 } else if (page_found == -EINVAL) {
615 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200616 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000617 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
Alexey Kardashevskiy9eecec12017-03-24 17:47:13 +1100618 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000619 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
620 /*
621 * There is already a host HPTE there, presumably
622 * a read-only one for a page the guest thinks
623 * is writable, so get rid of it first.
624 */
625 kvmppc_mmu_unmap_page(vcpu, &pte);
626 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000627 /* The guest's PTE is not mapped yet. Map on the host */
Alexey Kardashevskiybd9166f2017-03-24 17:48:10 +1100628 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
629 /* Exit KVM if mapping failed */
630 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
631 return RESUME_HOST;
632 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000633 if (data)
634 vcpu->stat.sp_storage++;
635 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000636 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000637 kvmppc_patch_dcbz(vcpu, &pte);
638 } else {
639 /* MMIO */
640 vcpu->stat.mmio_exits++;
641 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100642 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000643 r = kvmppc_emulate_mmio(run, vcpu);
644 if ( r == RESUME_HOST_NV )
645 r = RESUME_HOST;
646 }
647
648 return r;
649}
650
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000651/* Give up external provider (FPU, Altivec, VSX) */
652void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
653{
654 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000655
Paul Mackerras28c483b2012-11-04 18:16:46 +0000656 /*
657 * VSX instructions can access FP and vector registers, so if
658 * we are giving up VSX, make sure we give up FP and VMX as well.
659 */
660 if (msr & MSR_VSX)
661 msr |= MSR_FP | MSR_VEC;
662
663 msr &= vcpu->arch.guest_owned_ext;
664 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000665 return;
666
667#ifdef DEBUG_EXT
668 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
669#endif
670
Paul Mackerras28c483b2012-11-04 18:16:46 +0000671 if (msr & MSR_FP) {
672 /*
673 * Note that on CPUs with VSX, giveup_fpu stores
674 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000675 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000676 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100677 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000678 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100679 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000680 }
681
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000682#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000683 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000684 if (current->thread.regs->msr & MSR_VEC)
685 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100686 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000687 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000688#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000689
Paul Mackerras28c483b2012-11-04 18:16:46 +0000690 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000691 kvmppc_recalc_shadow_msr(vcpu);
692}
693
Alexander Graf616dff82014-04-29 16:48:44 +0200694/* Give up facility (TAR / EBB / DSCR) */
695static void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
696{
697#ifdef CONFIG_PPC_BOOK3S_64
698 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
699 /* Facility not available to the guest, ignore giveup request*/
700 return;
701 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200702
703 switch (fac) {
704 case FSCR_TAR_LG:
705 vcpu->arch.tar = mfspr(SPRN_TAR);
706 mtspr(SPRN_TAR, current->thread.tar);
707 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
708 break;
709 }
Alexander Graf616dff82014-04-29 16:48:44 +0200710#endif
711}
712
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000713/* Handle external providers (FPU, Altivec, VSX) */
714static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
715 ulong msr)
716{
717 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000718
719 /* When we have paired singles, we emulate in software */
720 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
721 return RESUME_GUEST;
722
Alexander Graf5deb8e72014-04-24 13:46:24 +0200723 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000724 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
725 return RESUME_GUEST;
726 }
727
Paul Mackerras28c483b2012-11-04 18:16:46 +0000728 if (msr == MSR_VSX) {
729 /* No VSX? Give an illegal instruction interrupt */
730#ifdef CONFIG_VSX
731 if (!cpu_has_feature(CPU_FTR_VSX))
732#endif
733 {
734 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
735 return RESUME_GUEST;
736 }
737
738 /*
739 * We have to load up all the FP and VMX registers before
740 * we can let the guest use VSX instructions.
741 */
742 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000743 }
744
Paul Mackerras28c483b2012-11-04 18:16:46 +0000745 /* See if we already own all the ext(s) needed */
746 msr &= ~vcpu->arch.guest_owned_ext;
747 if (!msr)
748 return RESUME_GUEST;
749
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000750#ifdef DEBUG_EXT
751 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
752#endif
753
Paul Mackerras28c483b2012-11-04 18:16:46 +0000754 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530755 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100756 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100757 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100758 disable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100759 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530760 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000761 }
762
763 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000764#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530765 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100766 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100767 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100768 disable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100769 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530770 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000771#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000772 }
773
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100774 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000775 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000776 kvmppc_recalc_shadow_msr(vcpu);
777
778 return RESUME_GUEST;
779}
780
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000781/*
782 * Kernel code using FP or VMX could have flushed guest state to
783 * the thread_struct; if so, get it back now.
784 */
785static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
786{
787 unsigned long lost_ext;
788
789 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
790 if (!lost_ext)
791 return;
792
Paul Mackerras09548fd2013-10-15 20:43:01 +1100793 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530794 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100795 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100796 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100797 disable_kernel_fp();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530798 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100799 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000800#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100801 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530802 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100803 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100804 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100805 disable_kernel_altivec();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530806 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100807 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000808#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000809 current->thread.regs->msr |= lost_ext;
810}
811
Alexander Graf616dff82014-04-29 16:48:44 +0200812#ifdef CONFIG_PPC_BOOK3S_64
813
814static void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
815{
816 /* Inject the Interrupt Cause field and trigger a guest interrupt */
817 vcpu->arch.fscr &= ~(0xffULL << 56);
818 vcpu->arch.fscr |= (fac << 56);
819 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
820}
821
822static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
823{
824 enum emulation_result er = EMULATE_FAIL;
825
826 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
827 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
828
829 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
830 /* Couldn't emulate, trigger interrupt in guest */
831 kvmppc_trigger_fac_interrupt(vcpu, fac);
832 }
833}
834
835/* Enable facilities (TAR, EBB, DSCR) for the guest */
836static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
837{
Alexander Graf9916d572014-04-29 17:54:40 +0200838 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +0200839 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
840
Alexander Graf9916d572014-04-29 17:54:40 +0200841 /*
842 * Not every facility is enabled by FSCR bits, check whether the
843 * guest has this facility enabled at all.
844 */
845 switch (fac) {
846 case FSCR_TAR_LG:
847 case FSCR_EBB_LG:
848 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
849 break;
850 case FSCR_TM_LG:
851 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
852 break;
853 default:
854 guest_fac_enabled = false;
855 break;
856 }
857
858 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +0200859 /* Facility not enabled by the guest */
860 kvmppc_trigger_fac_interrupt(vcpu, fac);
861 return RESUME_GUEST;
862 }
863
864 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +0200865 case FSCR_TAR_LG:
866 /* TAR switching isn't lazy in Linux yet */
867 current->thread.tar = mfspr(SPRN_TAR);
868 mtspr(SPRN_TAR, vcpu->arch.tar);
869 vcpu->arch.shadow_fscr |= FSCR_TAR;
870 break;
Alexander Graf616dff82014-04-29 16:48:44 +0200871 default:
872 kvmppc_emulate_fac(vcpu, fac);
873 break;
874 }
875
876 return RESUME_GUEST;
877}
Alexander Graf8e6afa32014-07-31 10:21:59 +0200878
879void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
880{
881 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
882 /* TAR got dropped, drop it in shadow too */
883 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
884 }
885 vcpu->arch.fscr = fscr;
886}
Alexander Graf616dff82014-04-29 16:48:44 +0200887#endif
888
Laurent Vivier11dd6ac2016-04-08 18:05:00 +0200889static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
890{
891 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
892 u64 msr = kvmppc_get_msr(vcpu);
893
894 kvmppc_set_msr(vcpu, msr | MSR_SE);
895 }
896}
897
898static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
899{
900 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
901 u64 msr = kvmppc_get_msr(vcpu);
902
903 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
904 }
905}
906
Thomas Huthfcd4f3c2017-01-25 13:27:22 +0100907static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
908 unsigned int exit_nr)
909{
910 enum emulation_result er;
911 ulong flags;
912 u32 last_inst;
913 int emul, r;
914
915 /*
916 * shadow_srr1 only contains valid flags if we came here via a program
917 * exception. The other exceptions (emulation assist, FP unavailable,
918 * etc.) do not provide flags in SRR1, so use an illegal-instruction
919 * exception when injecting a program interrupt into the guest.
920 */
921 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
922 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
923 else
924 flags = SRR1_PROGILL;
925
926 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
927 if (emul != EMULATE_DONE)
928 return RESUME_GUEST;
929
930 if (kvmppc_get_msr(vcpu) & MSR_PR) {
931#ifdef EXIT_DEBUG
932 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
933 kvmppc_get_pc(vcpu), last_inst);
934#endif
935 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
936 kvmppc_core_queue_program(vcpu, flags);
937 return RESUME_GUEST;
938 }
939 }
940
941 vcpu->stat.emulated_inst_exits++;
942 er = kvmppc_emulate_instruction(run, vcpu);
943 switch (er) {
944 case EMULATE_DONE:
945 r = RESUME_GUEST_NV;
946 break;
947 case EMULATE_AGAIN:
948 r = RESUME_GUEST;
949 break;
950 case EMULATE_FAIL:
951 pr_crit("%s: emulation at %lx failed (%08x)\n",
952 __func__, kvmppc_get_pc(vcpu), last_inst);
953 kvmppc_core_queue_program(vcpu, flags);
954 r = RESUME_GUEST;
955 break;
956 case EMULATE_DO_MMIO:
957 run->exit_reason = KVM_EXIT_MMIO;
958 r = RESUME_HOST_NV;
959 break;
960 case EMULATE_EXIT_USER:
961 r = RESUME_HOST_NV;
962 break;
963 default:
964 BUG();
965 }
966
967 return r;
968}
969
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530970int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
971 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000972{
973 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200974 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000975
976 vcpu->stat.sum_exits++;
977
978 run->exit_reason = KVM_EXIT_UNKNOWN;
979 run->ready_for_interrupt_injection = 1;
980
Alexander Grafbd2be682012-08-13 01:04:19 +0200981 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +0200982
Alexander Graf97c95052012-08-02 15:10:00 +0200983 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200984 guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +0200985
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000986 switch (exit_nr) {
987 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +0100988 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000989 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000990 vcpu->stat.pf_instruc++;
991
Alexander Grafc01e3f62014-07-11 02:58:58 +0200992 if (kvmppc_is_split_real(vcpu))
993 kvmppc_fixup_split_real(vcpu);
994
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000995#ifdef CONFIG_PPC_BOOK3S_32
996 /* We set segments as unused segments when invalidating them. So
997 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000998 {
999 struct kvmppc_book3s_shadow_vcpu *svcpu;
1000 u32 sr;
1001
1002 svcpu = svcpu_get(vcpu);
1003 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001004 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001005 if (sr == SR_INVALID) {
1006 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1007 r = RESUME_GUEST;
1008 break;
1009 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001010 }
1011#endif
1012
1013 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +01001014 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +10001015 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001016 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001017 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001018 vcpu->stat.sp_instruc++;
1019 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1020 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1021 /*
1022 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1023 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1024 * that no guest that needs the dcbz hack does NX.
1025 */
1026 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1027 r = RESUME_GUEST;
1028 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001029 u64 msr = kvmppc_get_msr(vcpu);
1030 msr |= shadow_srr1 & 0x58000000;
1031 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001032 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1033 r = RESUME_GUEST;
1034 }
1035 break;
Alexander Graf468a12c2011-12-09 14:44:13 +01001036 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001037 case BOOK3S_INTERRUPT_DATA_STORAGE:
1038 {
1039 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001040 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001041 vcpu->stat.pf_storage++;
1042
1043#ifdef CONFIG_PPC_BOOK3S_32
1044 /* We set segments as unused segments when invalidating them. So
1045 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001046 {
1047 struct kvmppc_book3s_shadow_vcpu *svcpu;
1048 u32 sr;
1049
1050 svcpu = svcpu_get(vcpu);
1051 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001052 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001053 if (sr == SR_INVALID) {
1054 kvmppc_mmu_map_segment(vcpu, dar);
1055 r = RESUME_GUEST;
1056 break;
1057 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001058 }
1059#endif
1060
Paul Mackerras93b159b2013-09-20 14:52:51 +10001061 /*
1062 * We need to handle missing shadow PTEs, and
1063 * protection faults due to us mapping a page read-only
1064 * when the guest thinks it is writable.
1065 */
1066 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1067 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001068 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001069 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001070 } else {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001071 kvmppc_set_dar(vcpu, dar);
1072 kvmppc_set_dsisr(vcpu, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001073 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1074 r = RESUME_GUEST;
1075 }
1076 break;
1077 }
1078 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1079 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001080 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001081 kvmppc_book3s_queue_irqprio(vcpu,
1082 BOOK3S_INTERRUPT_DATA_SEGMENT);
1083 }
1084 r = RESUME_GUEST;
1085 break;
1086 case BOOK3S_INTERRUPT_INST_SEGMENT:
1087 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1088 kvmppc_book3s_queue_irqprio(vcpu,
1089 BOOK3S_INTERRUPT_INST_SEGMENT);
1090 }
1091 r = RESUME_GUEST;
1092 break;
1093 /* We're good on these - the host merely wanted to get our attention */
1094 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001095 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +11001096 case BOOK3S_INTERRUPT_DOORBELL:
Alexander Graf568fccc2014-06-16 16:37:38 +02001097 case BOOK3S_INTERRUPT_H_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001098 vcpu->stat.dec_exits++;
1099 r = RESUME_GUEST;
1100 break;
1101 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001102 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1103 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001104 vcpu->stat.ext_intr_exits++;
1105 r = RESUME_GUEST;
1106 break;
1107 case BOOK3S_INTERRUPT_PERFMON:
1108 r = RESUME_GUEST;
1109 break;
1110 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001111 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001112 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001113 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001114 case BOOK3S_INTERRUPT_SYSCALL:
Mihai Caraman51f04722014-07-23 19:06:21 +03001115 {
1116 u32 last_sc;
1117 int emul;
1118
1119 /* Get last sc for papr */
1120 if (vcpu->arch.papr_enabled) {
1121 /* The sc instuction points SRR0 to the next inst */
1122 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1123 if (emul != EMULATE_DONE) {
1124 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1125 r = RESUME_GUEST;
1126 break;
1127 }
1128 }
1129
Alexander Grafa668f2b2011-08-08 17:26:24 +02001130 if (vcpu->arch.papr_enabled &&
Mihai Caraman51f04722014-07-23 19:06:21 +03001131 (last_sc == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001132 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001133 /* SC 1 papr hypercalls */
1134 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1135 int i;
1136
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301137#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001138 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1139 r = RESUME_GUEST;
1140 break;
1141 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001142#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001143
1144 run->papr_hcall.nr = cmd;
1145 for (i = 0; i < 9; ++i) {
1146 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1147 run->papr_hcall.args[i] = gpr;
1148 }
1149 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1150 vcpu->arch.hcall_needed = 1;
1151 r = RESUME_HOST;
1152 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001153 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1154 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1155 /* MOL hypercalls */
1156 u64 *gprs = run->osi.gprs;
1157 int i;
1158
1159 run->exit_reason = KVM_EXIT_OSI;
1160 for (i = 0; i < 32; i++)
1161 gprs[i] = kvmppc_get_gpr(vcpu, i);
1162 vcpu->arch.osi_needed = 1;
1163 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001164 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001165 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1166 /* KVM PV hypercalls */
1167 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1168 r = RESUME_GUEST;
1169 } else {
1170 /* Guest syscalls */
1171 vcpu->stat.syscall_exits++;
1172 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1173 r = RESUME_GUEST;
1174 }
1175 break;
Mihai Caraman51f04722014-07-23 19:06:21 +03001176 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001177 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1178 case BOOK3S_INTERRUPT_ALTIVEC:
1179 case BOOK3S_INTERRUPT_VSX:
1180 {
1181 int ext_msr = 0;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001182 int emul;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001183 u32 last_inst;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001184
Mihai Caraman9a26af62014-07-23 19:06:20 +03001185 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1186 /* Do paired single instruction emulation */
Mihai Caraman51f04722014-07-23 19:06:21 +03001187 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1188 &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001189 if (emul == EMULATE_DONE)
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001190 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001191 else
1192 r = RESUME_GUEST;
1193
1194 break;
1195 }
1196
1197 /* Enable external provider */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001198 switch (exit_nr) {
Mihai Caraman9a26af62014-07-23 19:06:20 +03001199 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1200 ext_msr = MSR_FP;
1201 break;
1202
1203 case BOOK3S_INTERRUPT_ALTIVEC:
1204 ext_msr = MSR_VEC;
1205 break;
1206
1207 case BOOK3S_INTERRUPT_VSX:
1208 ext_msr = MSR_VSX;
1209 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001210 }
1211
Mihai Caraman9a26af62014-07-23 19:06:20 +03001212 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001213 break;
1214 }
1215 case BOOK3S_INTERRUPT_ALIGNMENT:
Mihai Caraman9a26af62014-07-23 19:06:20 +03001216 {
Mihai Caraman51f04722014-07-23 19:06:21 +03001217 u32 last_inst;
1218 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001219
1220 if (emul == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001221 u32 dsisr;
1222 u64 dar;
1223
1224 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1225 dar = kvmppc_alignment_dar(vcpu, last_inst);
1226
1227 kvmppc_set_dsisr(vcpu, dsisr);
1228 kvmppc_set_dar(vcpu, dar);
1229
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001230 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1231 }
1232 r = RESUME_GUEST;
1233 break;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001234 }
Alexander Graf616dff82014-04-29 16:48:44 +02001235#ifdef CONFIG_PPC_BOOK3S_64
1236 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
1237 kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
1238 r = RESUME_GUEST;
1239 break;
1240#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001241 case BOOK3S_INTERRUPT_MACHINE_CHECK:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001242 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1243 r = RESUME_GUEST;
1244 break;
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001245 case BOOK3S_INTERRUPT_TRACE:
1246 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1247 run->exit_reason = KVM_EXIT_DEBUG;
1248 r = RESUME_HOST;
1249 } else {
1250 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1251 r = RESUME_GUEST;
1252 }
1253 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001254 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001255 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001256 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001257 /* Ugh - bork here! What did we get? */
1258 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001259 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001260 r = RESUME_HOST;
1261 BUG();
1262 break;
1263 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001264 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001265
1266 if (!(r & RESUME_HOST)) {
1267 /* To avoid clobbering exit_reason, only check for signals if
1268 * we aren't already exiting to userspace for some other
1269 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001270
1271 /*
1272 * Interrupts could be timers for the guest which we have to
1273 * inject again, so let's postpone them until we're in the guest
1274 * and if we really did time things so badly, then we just exit
1275 * again due to a host external interrupt.
1276 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001277 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001278 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001279 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001280 else {
1281 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001282 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001283 }
Scott Wood6c85f522014-01-09 19:18:40 -06001284
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001285 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001286 }
1287
1288 trace_kvm_book3s_reenter(r, vcpu);
1289
1290 return r;
1291}
1292
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301293static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1294 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001295{
1296 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1297 int i;
1298
1299 sregs->pvr = vcpu->arch.pvr;
1300
1301 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1302 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1303 for (i = 0; i < 64; i++) {
1304 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1305 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1306 }
1307 } else {
1308 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001309 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001310
1311 for (i = 0; i < 8; i++) {
1312 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1313 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1314 }
1315 }
1316
1317 return 0;
1318}
1319
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301320static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1321 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001322{
1323 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1324 int i;
1325
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301326 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001327
1328 vcpu3s->sdr1 = sregs->u.s.sdr1;
1329 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1330 for (i = 0; i < 64; i++) {
1331 vcpu->arch.mmu.slbmte(vcpu, sregs->u.s.ppc64.slb[i].slbv,
1332 sregs->u.s.ppc64.slb[i].slbe);
1333 }
1334 } else {
1335 for (i = 0; i < 16; i++) {
1336 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1337 }
1338 for (i = 0; i < 8; i++) {
1339 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1340 (u32)sregs->u.s.ppc32.ibat[i]);
1341 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1342 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1343 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1344 (u32)sregs->u.s.ppc32.dbat[i]);
1345 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1346 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1347 }
1348 }
1349
1350 /* Flush the MMU after messing with the segments */
1351 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1352
1353 return 0;
1354}
1355
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301356static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1357 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001358{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001359 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001360
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001361 switch (id) {
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +05301362 case KVM_REG_PPC_DEBUG_INST:
1363 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1364 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001365 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001366 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001367 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001368 case KVM_REG_PPC_VTB:
1369 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1370 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301371 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001372 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301373 /*
1374 * We are only interested in the LPCR_ILE bit
1375 */
1376 if (vcpu->arch.intr_msr & MSR_LE)
1377 *val = get_reg_val(id, LPCR_ILE);
1378 else
1379 *val = get_reg_val(id, 0);
1380 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001381 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001382 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001383 break;
1384 }
1385
1386 return r;
1387}
1388
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301389static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1390{
1391 if (new_lpcr & LPCR_ILE)
1392 vcpu->arch.intr_msr |= MSR_LE;
1393 else
1394 vcpu->arch.intr_msr &= ~MSR_LE;
1395}
1396
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301397static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1398 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001399{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001400 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001401
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001402 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001403 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001404 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1405 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001406 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001407 case KVM_REG_PPC_VTB:
1408 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1409 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301410 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001411 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301412 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1413 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001414 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001415 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001416 break;
1417 }
1418
1419 return r;
1420}
1421
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301422static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1423 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001424{
1425 struct kvmppc_vcpu_book3s *vcpu_book3s;
1426 struct kvm_vcpu *vcpu;
1427 int err = -ENOMEM;
1428 unsigned long p;
1429
Paul Mackerras3ff95502013-09-20 14:52:49 +10001430 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1431 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001432 goto out;
1433
Paul Mackerras3ff95502013-09-20 14:52:49 +10001434 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1435 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001436 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001437 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001438
Alexander Grafab784752014-04-06 23:31:48 +02001439#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001440 vcpu->arch.shadow_vcpu =
1441 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1442 if (!vcpu->arch.shadow_vcpu)
1443 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001444#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001445
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001446 err = kvm_vcpu_init(vcpu, kvm, id);
1447 if (err)
1448 goto free_shadow_vcpu;
1449
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001450 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001451 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001452 if (!p)
1453 goto uninit_vcpu;
Alexander Graf89b68c92014-07-13 16:37:12 +02001454 vcpu->arch.shared = (void *)p;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001455#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001456 /* Always start the shared struct in native endian mode */
1457#ifdef __BIG_ENDIAN__
1458 vcpu->arch.shared_big_endian = true;
1459#else
1460 vcpu->arch.shared_big_endian = false;
1461#endif
1462
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001463 /*
1464 * Default to the same as the host if we're on sufficiently
1465 * recent machine that we have 1TB segments;
1466 * otherwise default to PPC970FX.
1467 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001468 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001469 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1470 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301471 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001472#else
1473 /* default to book3s_32 (750) */
1474 vcpu->arch.pvr = 0x84202;
1475#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301476 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001477 vcpu->arch.slb_nr = 64;
1478
Alexander Graf94810ba2014-04-24 13:04:01 +02001479 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001480
1481 err = kvmppc_mmu_init(vcpu);
1482 if (err < 0)
1483 goto uninit_vcpu;
1484
1485 return vcpu;
1486
1487uninit_vcpu:
1488 kvm_vcpu_uninit(vcpu);
1489free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001490#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001491 kfree(vcpu->arch.shadow_vcpu);
1492free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001493#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001494 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001495free_vcpu:
1496 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001497out:
1498 return ERR_PTR(err);
1499}
1500
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301501static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001502{
1503 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1504
1505 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1506 kvm_vcpu_uninit(vcpu);
Alexander Grafab784752014-04-06 23:31:48 +02001507#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001508 kfree(vcpu->arch.shadow_vcpu);
1509#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001510 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001511 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001512}
1513
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301514static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001515{
1516 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001517#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001518 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001519#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001520
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001521 /* Check if we can run the vcpu at all */
1522 if (!vcpu->arch.sane) {
1523 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001524 ret = -EINVAL;
1525 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001526 }
1527
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001528 kvmppc_setup_debug(vcpu);
1529
Alexander Grafe371f712011-12-19 13:36:55 +01001530 /*
1531 * Interrupts could be timers for the guest which we have to inject
1532 * again, so let's postpone them until we're in the guest and if we
1533 * really did time things so badly, then we just exit again due to
1534 * a host external interrupt.
1535 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001536 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001537 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001538 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001539 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001540
Anton Blanchardc2085052015-10-29 11:44:08 +11001541 /* Save FPU, Altivec and VSX state */
1542 giveup_all(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001543
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001544 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001545 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001546 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1547
Scott Wood5f1c2482013-07-10 17:47:39 -05001548 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001549
1550 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1551
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001552 kvmppc_clear_debug(vcpu);
1553
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001554 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa37b2012-08-12 12:42:30 +02001555 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001556
Paul Mackerras28c483b2012-11-04 18:16:46 +00001557 /* Make sure we save the guest FPU/Altivec/VSX state */
1558 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1559
Alexander Grafe14e7a12014-04-22 12:26:58 +02001560 /* Make sure we save the guest TAR/EBB/DSCR state */
1561 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1562
Alexander Graf7d827142011-12-09 15:46:21 +01001563out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001564 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001565 return ret;
1566}
1567
Paul Mackerras82ed3612011-12-15 02:03:22 +00001568/*
1569 * Get (and clear) the dirty memory log for a memory slot.
1570 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301571static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1572 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001573{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001574 struct kvm_memslots *slots;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001575 struct kvm_memory_slot *memslot;
1576 struct kvm_vcpu *vcpu;
1577 ulong ga, ga_end;
1578 int is_dirty = 0;
1579 int r;
1580 unsigned long n;
1581
1582 mutex_lock(&kvm->slots_lock);
1583
1584 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1585 if (r)
1586 goto out;
1587
1588 /* If nothing is dirty, don't bother messing with page tables. */
1589 if (is_dirty) {
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001590 slots = kvm_memslots(kvm);
1591 memslot = id_to_memslot(slots, log->slot);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001592
1593 ga = memslot->base_gfn << PAGE_SHIFT;
1594 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1595
1596 kvm_for_each_vcpu(n, vcpu, kvm)
1597 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1598
1599 n = kvm_dirty_bitmap_bytes(memslot);
1600 memset(memslot->dirty_bitmap, 0, n);
1601 }
1602
1603 r = 0;
1604out:
1605 mutex_unlock(&kvm->slots_lock);
1606 return r;
1607}
1608
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301609static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1610 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001611{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301612 return;
1613}
1614
1615static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1616 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001617 const struct kvm_userspace_memory_region *mem)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301618{
1619 return 0;
1620}
1621
1622static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001623 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001624 const struct kvm_memory_slot *old,
1625 const struct kvm_memory_slot *new)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301626{
1627 return;
1628}
1629
1630static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1631 struct kvm_memory_slot *dont)
1632{
1633 return;
1634}
1635
1636static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1637 unsigned long npages)
1638{
1639 return 0;
1640}
1641
1642
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001643#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301644static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1645 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001646{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001647 long int i;
1648 struct kvm_vcpu *vcpu;
1649
1650 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001651
1652 /* SLB is always 64 entries */
1653 info->slb_size = 64;
1654
1655 /* Standard 4k base page size segment */
1656 info->sps[0].page_shift = 12;
1657 info->sps[0].slb_enc = 0;
1658 info->sps[0].enc[0].page_shift = 12;
1659 info->sps[0].enc[0].pte_enc = 0;
1660
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001661 /*
1662 * 64k large page size.
1663 * We only want to put this in if the CPUs we're emulating
1664 * support it, but unfortunately we don't have a vcpu easily
1665 * to hand here to test. Just pick the first vcpu, and if
1666 * that doesn't exist yet, report the minimum capability,
1667 * i.e., no 64k pages.
1668 * 1T segment support goes along with 64k pages.
1669 */
1670 i = 1;
1671 vcpu = kvm_get_vcpu(kvm, 0);
1672 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1673 info->flags = KVM_PPC_1T_SEGMENTS;
1674 info->sps[i].page_shift = 16;
1675 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1676 info->sps[i].enc[0].page_shift = 16;
1677 info->sps[i].enc[0].pte_enc = 1;
1678 ++i;
1679 }
1680
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001681 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001682 info->sps[i].page_shift = 24;
1683 info->sps[i].slb_enc = SLB_VSID_L;
1684 info->sps[i].enc[0].page_shift = 24;
1685 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001686
1687 return 0;
1688}
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301689#else
1690static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1691 struct kvm_ppc_smmu_info *info)
1692{
1693 /* We should not get called */
1694 BUG();
1695}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001696#endif /* CONFIG_PPC64 */
1697
Ian Munsiea413f472012-12-03 18:36:13 +00001698static unsigned int kvm_global_user_count = 0;
1699static DEFINE_SPINLOCK(kvm_global_user_count_lock);
1700
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301701static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001702{
Paul Mackerras9308ab82013-09-20 14:52:48 +10001703 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001704
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001705#ifdef CONFIG_PPC_BOOK3S_64
1706 /* Start out with the default set of hcalls enabled */
1707 kvmppc_pr_init_default_hcalls(kvm);
1708#endif
1709
Ian Munsiea413f472012-12-03 18:36:13 +00001710 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1711 spin_lock(&kvm_global_user_count_lock);
1712 if (++kvm_global_user_count == 1)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001713 pseries_disable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001714 spin_unlock(&kvm_global_user_count_lock);
1715 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001716 return 0;
1717}
1718
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301719static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001720{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00001721#ifdef CONFIG_PPC64
1722 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
1723#endif
Ian Munsiea413f472012-12-03 18:36:13 +00001724
1725 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
1726 spin_lock(&kvm_global_user_count_lock);
1727 BUG_ON(kvm_global_user_count == 0);
1728 if (--kvm_global_user_count == 0)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10001729 pseries_enable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00001730 spin_unlock(&kvm_global_user_count_lock);
1731 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001732}
1733
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301734static int kvmppc_core_check_processor_compat_pr(void)
1735{
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10001736 /*
1737 * Disable KVM for Power9 untill the required bits merged.
1738 */
1739 if (cpu_has_feature(CPU_FTR_ARCH_300))
1740 return -EIO;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301741 return 0;
1742}
1743
1744static long kvm_arch_vm_ioctl_pr(struct file *filp,
1745 unsigned int ioctl, unsigned long arg)
1746{
1747 return -ENOTTY;
1748}
1749
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301750static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301751 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
1752 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
1753 .get_one_reg = kvmppc_get_one_reg_pr,
1754 .set_one_reg = kvmppc_set_one_reg_pr,
1755 .vcpu_load = kvmppc_core_vcpu_load_pr,
1756 .vcpu_put = kvmppc_core_vcpu_put_pr,
1757 .set_msr = kvmppc_set_msr_pr,
1758 .vcpu_run = kvmppc_vcpu_run_pr,
1759 .vcpu_create = kvmppc_core_vcpu_create_pr,
1760 .vcpu_free = kvmppc_core_vcpu_free_pr,
1761 .check_requests = kvmppc_core_check_requests_pr,
1762 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
1763 .flush_memslot = kvmppc_core_flush_memslot_pr,
1764 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
1765 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
1766 .unmap_hva = kvm_unmap_hva_pr,
1767 .unmap_hva_range = kvm_unmap_hva_range_pr,
1768 .age_hva = kvm_age_hva_pr,
1769 .test_age_hva = kvm_test_age_hva_pr,
1770 .set_spte_hva = kvm_set_spte_hva_pr,
1771 .mmu_destroy = kvmppc_mmu_destroy_pr,
1772 .free_memslot = kvmppc_core_free_memslot_pr,
1773 .create_memslot = kvmppc_core_create_memslot_pr,
1774 .init_vm = kvmppc_core_init_vm_pr,
1775 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301776 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
1777 .emulate_op = kvmppc_core_emulate_op_pr,
1778 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
1779 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
1780 .fast_vcpu_kick = kvm_vcpu_kick,
1781 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10001782#ifdef CONFIG_PPC_BOOK3S_64
1783 .hcall_implemented = kvmppc_hcall_impl_pr,
1784#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301785};
1786
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301787
1788int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001789{
1790 int r;
1791
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301792 r = kvmppc_core_check_processor_compat_pr();
1793 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001794 return r;
1795
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301796 kvm_ops_pr.owner = THIS_MODULE;
1797 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001798
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301799 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001800 return r;
1801}
1802
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301803void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001804{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301805 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001806 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001807}
1808
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301809/*
1810 * We only support separate modules for book3s 64
1811 */
1812#ifdef CONFIG_PPC_BOOK3S_64
1813
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301814module_init(kvmppc_book3s_init_pr);
1815module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301816
1817MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01001818MODULE_ALIAS_MISCDEV(KVM_MINOR);
1819MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301820#endif