blob: e8036ddeded157a44078542e3219ad364daff009 [file] [log] [blame]
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001/*
2 * Copyright (C) 2009. SUSE Linux Products GmbH. All rights reserved.
3 *
4 * Authors:
5 * Alexander Graf <agraf@suse.de>
6 * Kevin Wolf <mail@kevin-wolf.de>
7 * Paul Mackerras <paulus@samba.org>
8 *
9 * Description:
10 * Functions relating to running KVM on Book 3S processors where
11 * we don't have access to hypervisor mode, and we run the guest
12 * in problem state (user mode).
13 *
14 * This file is derived from arch/powerpc/kvm/44x.c,
15 * by Hollis Blanchard <hollisb@us.ibm.com>.
16 *
17 * This program is free software; you can redistribute it and/or modify
18 * it under the terms of the GNU General Public License, version 2, as
19 * published by the Free Software Foundation.
20 */
21
22#include <linux/kvm_host.h>
Paul Gortmaker93087942011-07-29 16:19:31 +100023#include <linux/export.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000024#include <linux/err.h>
25#include <linux/slab.h>
26
27#include <asm/reg.h>
28#include <asm/cputable.h>
29#include <asm/cacheflush.h>
30#include <asm/tlbflush.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080031#include <linux/uaccess.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000032#include <asm/io.h>
33#include <asm/kvm_ppc.h>
34#include <asm/kvm_book3s.h>
35#include <asm/mmu_context.h>
Benjamin Herrenschmidt95327d02012-04-01 17:35:53 +000036#include <asm/switch_to.h>
Ian Munsiea413f472012-12-03 18:36:13 +000037#include <asm/firmware.h>
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +100038#include <asm/setup.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000039#include <linux/gfp.h>
40#include <linux/sched.h>
41#include <linux/vmalloc.h>
42#include <linux/highmem.h>
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +053043#include <linux/module.h>
Alexander Graf398a76c2013-12-09 13:53:42 +010044#include <linux/miscdevice.h>
Simon Guo66c33e72018-05-23 15:01:57 +080045#include <asm/asm-prototypes.h>
Simon Guo8d2e2fc2018-05-23 15:01:58 +080046#include <asm/tm.h>
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000047
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053048#include "book3s.h"
Aneesh Kumar K.V72c12532013-10-07 22:17:57 +053049
50#define CREATE_TRACE_POINTS
51#include "trace_pr.h"
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000052
53/* #define EXIT_DEBUG */
54/* #define DEBUG_EXT */
55
56static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
57 ulong msr);
Simon Guo7284ca82018-05-23 15:02:07 +080058#ifdef CONFIG_PPC_BOOK3S_64
59static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac);
60#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000061
62/* Some compatibility defines */
63#ifdef CONFIG_PPC_BOOK3S_32
64#define MSR_USER32 MSR_USER
65#define MSR_USER64 MSR_USER
66#define HW_PAGE_SIZE PAGE_SIZE
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +110067#define HPTE_R_M _PAGE_COHERENT
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +000068#endif
69
Alexander Grafc01e3f62014-07-11 02:58:58 +020070static bool kvmppc_is_split_real(struct kvm_vcpu *vcpu)
71{
72 ulong msr = kvmppc_get_msr(vcpu);
73 return (msr & (MSR_IR|MSR_DR)) == MSR_DR;
74}
75
76static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
77{
78 ulong msr = kvmppc_get_msr(vcpu);
79 ulong pc = kvmppc_get_pc(vcpu);
80
81 /* We are in DR only split real mode */
82 if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
83 return;
84
85 /* We have not fixed up the guest already */
86 if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
87 return;
88
89 /* The code is in fixupable address space */
90 if (pc & SPLIT_HACK_MASK)
91 return;
92
93 vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
94 kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
95}
96
97void kvmppc_unfixup_split_real(struct kvm_vcpu *vcpu);
98
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +053099static void kvmppc_core_vcpu_load_pr(struct kvm_vcpu *vcpu, int cpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000100{
101#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100102 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
103 memcpy(svcpu->slb, to_book3s(vcpu)->slb_shadow, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100104 svcpu->slb_max = to_book3s(vcpu)->slb_shadow_max;
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100105 svcpu->in_use = 0;
Alexander Graf468a12c2011-12-09 14:44:13 +0100106 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000107#endif
Alexander Graffb4188b2014-06-09 01:16:32 +0200108
109 /* Disable AIL if supported */
110 if (cpu_has_feature(CPU_FTR_HVMODE) &&
111 cpu_has_feature(CPU_FTR_ARCH_207S))
112 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) & ~LPCR_AIL);
113
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000114 vcpu->cpu = smp_processor_id();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000115#ifdef CONFIG_PPC_BOOK3S_32
Paul Mackerras3ff95502013-09-20 14:52:49 +1000116 current->thread.kvm_shadow_vcpu = vcpu->arch.shadow_vcpu;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000117#endif
Alexander Grafc01e3f62014-07-11 02:58:58 +0200118
119 if (kvmppc_is_split_real(vcpu))
120 kvmppc_fixup_split_real(vcpu);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800121
122 kvmppc_restore_tm_pr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000123}
124
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530125static void kvmppc_core_vcpu_put_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000126{
127#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf468a12c2011-12-09 14:44:13 +0100128 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100129 if (svcpu->in_use) {
Alexander Graf07ae5382018-01-31 22:24:58 +0100130 kvmppc_copy_from_svcpu(vcpu);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100131 }
Alexander Graf468a12c2011-12-09 14:44:13 +0100132 memcpy(to_book3s(vcpu)->slb_shadow, svcpu->slb, sizeof(svcpu->slb));
Alexander Graf468a12c2011-12-09 14:44:13 +0100133 to_book3s(vcpu)->slb_shadow_max = svcpu->slb_max;
134 svcpu_put(svcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000135#endif
136
Alexander Grafc01e3f62014-07-11 02:58:58 +0200137 if (kvmppc_is_split_real(vcpu))
138 kvmppc_unfixup_split_real(vcpu);
139
Paul Mackerras28c483b2012-11-04 18:16:46 +0000140 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
Alexander Grafe14e7a12014-04-22 12:26:58 +0200141 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800142 kvmppc_save_tm_pr(vcpu);
Alexander Graffb4188b2014-06-09 01:16:32 +0200143
144 /* Enable AIL if supported */
145 if (cpu_has_feature(CPU_FTR_HVMODE) &&
146 cpu_has_feature(CPU_FTR_ARCH_207S))
147 mtspr(SPRN_LPCR, mfspr(SPRN_LPCR) | LPCR_AIL_3);
148
Paul Mackerrasa47d72f2012-09-20 19:35:51 +0000149 vcpu->cpu = -1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000150}
151
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000152/* Copy data needed by real-mode code from vcpu to shadow vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100153void kvmppc_copy_to_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000154{
Alexander Graf07ae5382018-01-31 22:24:58 +0100155 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
156
Simon Guo1143a702018-05-07 14:20:07 +0800157 svcpu->gpr[0] = vcpu->arch.regs.gpr[0];
158 svcpu->gpr[1] = vcpu->arch.regs.gpr[1];
159 svcpu->gpr[2] = vcpu->arch.regs.gpr[2];
160 svcpu->gpr[3] = vcpu->arch.regs.gpr[3];
161 svcpu->gpr[4] = vcpu->arch.regs.gpr[4];
162 svcpu->gpr[5] = vcpu->arch.regs.gpr[5];
163 svcpu->gpr[6] = vcpu->arch.regs.gpr[6];
164 svcpu->gpr[7] = vcpu->arch.regs.gpr[7];
165 svcpu->gpr[8] = vcpu->arch.regs.gpr[8];
166 svcpu->gpr[9] = vcpu->arch.regs.gpr[9];
167 svcpu->gpr[10] = vcpu->arch.regs.gpr[10];
168 svcpu->gpr[11] = vcpu->arch.regs.gpr[11];
169 svcpu->gpr[12] = vcpu->arch.regs.gpr[12];
170 svcpu->gpr[13] = vcpu->arch.regs.gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000171 svcpu->cr = vcpu->arch.cr;
Simon Guo173c5202018-05-07 14:20:08 +0800172 svcpu->xer = vcpu->arch.regs.xer;
173 svcpu->ctr = vcpu->arch.regs.ctr;
174 svcpu->lr = vcpu->arch.regs.link;
175 svcpu->pc = vcpu->arch.regs.nip;
Alexander Graf616dff82014-04-29 16:48:44 +0200176#ifdef CONFIG_PPC_BOOK3S_64
177 svcpu->shadow_fscr = vcpu->arch.shadow_fscr;
178#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530179 /*
180 * Now also save the current time base value. We use this
181 * to find the guest purr and spurr value.
182 */
183 vcpu->arch.entry_tb = get_tb();
Aneesh Kumar K.V8f42ab22014-06-05 17:38:02 +0530184 vcpu->arch.entry_vtb = get_vtb();
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530185 if (cpu_has_feature(CPU_FTR_ARCH_207S))
186 vcpu->arch.entry_ic = mfspr(SPRN_IC);
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100187 svcpu->in_use = true;
Alexander Graf07ae5382018-01-31 22:24:58 +0100188
189 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000190}
191
Simon Guo95757bf2018-05-23 15:01:53 +0800192static void kvmppc_recalc_shadow_msr(struct kvm_vcpu *vcpu)
193{
194 ulong guest_msr = kvmppc_get_msr(vcpu);
195 ulong smsr = guest_msr;
196
197 /* Guest MSR values */
198#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
199 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE |
200 MSR_TM | MSR_TS_MASK;
201#else
202 smsr &= MSR_FE0 | MSR_FE1 | MSR_SF | MSR_SE | MSR_BE | MSR_LE;
203#endif
204 /* Process MSR values */
205 smsr |= MSR_ME | MSR_RI | MSR_IR | MSR_DR | MSR_PR | MSR_EE;
206 /* External providers the guest reserved */
207 smsr |= (guest_msr & vcpu->arch.guest_owned_ext);
208 /* 64-bit Process MSR values */
209#ifdef CONFIG_PPC_BOOK3S_64
210 smsr |= MSR_ISF | MSR_HV;
211#endif
Simon Guo57063402018-05-23 15:02:01 +0800212#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
213 /*
214 * in guest privileged state, we want to fail all TM transactions.
215 * So disable MSR TM bit so that all tbegin. will be able to be
216 * trapped into host.
217 */
218 if (!(guest_msr & MSR_PR))
219 smsr &= ~MSR_TM;
220#endif
Simon Guo95757bf2018-05-23 15:01:53 +0800221 vcpu->arch.shadow_msr = smsr;
222}
223
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000224/* Copy data touched by real-mode code from shadow vcpu back to vcpu */
Alexander Graf07ae5382018-01-31 22:24:58 +0100225void kvmppc_copy_from_svcpu(struct kvm_vcpu *vcpu)
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000226{
Alexander Graf07ae5382018-01-31 22:24:58 +0100227 struct kvmppc_book3s_shadow_vcpu *svcpu = svcpu_get(vcpu);
Simon Guo95757bf2018-05-23 15:01:53 +0800228#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
229 ulong old_msr;
230#endif
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100231
232 /*
233 * Maybe we were already preempted and synced the svcpu from
234 * our preempt notifiers. Don't bother touching this svcpu then.
235 */
236 if (!svcpu->in_use)
237 goto out;
238
Simon Guo1143a702018-05-07 14:20:07 +0800239 vcpu->arch.regs.gpr[0] = svcpu->gpr[0];
240 vcpu->arch.regs.gpr[1] = svcpu->gpr[1];
241 vcpu->arch.regs.gpr[2] = svcpu->gpr[2];
242 vcpu->arch.regs.gpr[3] = svcpu->gpr[3];
243 vcpu->arch.regs.gpr[4] = svcpu->gpr[4];
244 vcpu->arch.regs.gpr[5] = svcpu->gpr[5];
245 vcpu->arch.regs.gpr[6] = svcpu->gpr[6];
246 vcpu->arch.regs.gpr[7] = svcpu->gpr[7];
247 vcpu->arch.regs.gpr[8] = svcpu->gpr[8];
248 vcpu->arch.regs.gpr[9] = svcpu->gpr[9];
249 vcpu->arch.regs.gpr[10] = svcpu->gpr[10];
250 vcpu->arch.regs.gpr[11] = svcpu->gpr[11];
251 vcpu->arch.regs.gpr[12] = svcpu->gpr[12];
252 vcpu->arch.regs.gpr[13] = svcpu->gpr[13];
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000253 vcpu->arch.cr = svcpu->cr;
Simon Guo173c5202018-05-07 14:20:08 +0800254 vcpu->arch.regs.xer = svcpu->xer;
255 vcpu->arch.regs.ctr = svcpu->ctr;
256 vcpu->arch.regs.link = svcpu->lr;
257 vcpu->arch.regs.nip = svcpu->pc;
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000258 vcpu->arch.shadow_srr1 = svcpu->shadow_srr1;
259 vcpu->arch.fault_dar = svcpu->fault_dar;
260 vcpu->arch.fault_dsisr = svcpu->fault_dsisr;
261 vcpu->arch.last_inst = svcpu->last_inst;
Alexander Graf616dff82014-04-29 16:48:44 +0200262#ifdef CONFIG_PPC_BOOK3S_64
263 vcpu->arch.shadow_fscr = svcpu->shadow_fscr;
264#endif
Aneesh Kumar K.V3cd60e32014-06-04 16:47:55 +0530265 /*
266 * Update purr and spurr using time base on exit.
267 */
268 vcpu->arch.purr += get_tb() - vcpu->arch.entry_tb;
269 vcpu->arch.spurr += get_tb() - vcpu->arch.entry_tb;
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000270 to_book3s(vcpu)->vtb += get_vtb() - vcpu->arch.entry_vtb;
Aneesh Kumar K.V06da28e2014-06-05 17:38:05 +0530271 if (cpu_has_feature(CPU_FTR_ARCH_207S))
272 vcpu->arch.ic += mfspr(SPRN_IC) - vcpu->arch.entry_ic;
Simon Guo95757bf2018-05-23 15:01:53 +0800273
274#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
275 /*
276 * Unlike other MSR bits, MSR[TS]bits can be changed at guest without
277 * notifying host:
278 * modified by unprivileged instructions like "tbegin"/"tend"/
279 * "tresume"/"tsuspend" in PR KVM guest.
280 *
281 * It is necessary to sync here to calculate a correct shadow_msr.
282 *
283 * privileged guest's tbegin will be failed at present. So we
284 * only take care of problem state guest.
285 */
286 old_msr = kvmppc_get_msr(vcpu);
287 if (unlikely((old_msr & MSR_PR) &&
288 (vcpu->arch.shadow_srr1 & (MSR_TS_MASK)) !=
289 (old_msr & (MSR_TS_MASK)))) {
290 old_msr &= ~(MSR_TS_MASK);
291 old_msr |= (vcpu->arch.shadow_srr1 & (MSR_TS_MASK));
292 kvmppc_set_msr_fast(vcpu, old_msr);
293 kvmppc_recalc_shadow_msr(vcpu);
294 }
295#endif
296
Alexander Graf40fdd8c2013-11-29 02:29:00 +0100297 svcpu->in_use = false;
298
299out:
Alexander Graf07ae5382018-01-31 22:24:58 +0100300 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +1000301}
302
Simon Guo66c33e72018-05-23 15:01:57 +0800303#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Simon Guoe32c53d2018-05-23 15:02:04 +0800304void kvmppc_save_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800305{
306 tm_enable();
307 vcpu->arch.tfhar = mfspr(SPRN_TFHAR);
308 vcpu->arch.texasr = mfspr(SPRN_TEXASR);
309 vcpu->arch.tfiar = mfspr(SPRN_TFIAR);
310 tm_disable();
311}
312
Simon Guo57063402018-05-23 15:02:01 +0800313void kvmppc_restore_tm_sprs(struct kvm_vcpu *vcpu)
Simon Guo66c33e72018-05-23 15:01:57 +0800314{
315 tm_enable();
316 mtspr(SPRN_TFHAR, vcpu->arch.tfhar);
317 mtspr(SPRN_TEXASR, vcpu->arch.texasr);
318 mtspr(SPRN_TFIAR, vcpu->arch.tfiar);
319 tm_disable();
320}
321
Simon Guo13989b62018-05-23 15:01:59 +0800322/* loadup math bits which is enabled at kvmppc_get_msr() but not enabled at
323 * hardware.
324 */
325static void kvmppc_handle_lost_math_exts(struct kvm_vcpu *vcpu)
326{
327 ulong exit_nr;
328 ulong ext_diff = (kvmppc_get_msr(vcpu) & ~vcpu->arch.guest_owned_ext) &
329 (MSR_FP | MSR_VEC | MSR_VSX);
330
331 if (!ext_diff)
332 return;
333
334 if (ext_diff == MSR_FP)
335 exit_nr = BOOK3S_INTERRUPT_FP_UNAVAIL;
336 else if (ext_diff == MSR_VEC)
337 exit_nr = BOOK3S_INTERRUPT_ALTIVEC;
338 else
339 exit_nr = BOOK3S_INTERRUPT_VSX;
340
341 kvmppc_handle_ext(vcpu, exit_nr, ext_diff);
342}
343
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800344void kvmppc_save_tm_pr(struct kvm_vcpu *vcpu)
345{
346 if (!(MSR_TM_ACTIVE(kvmppc_get_msr(vcpu)))) {
347 kvmppc_save_tm_sprs(vcpu);
348 return;
349 }
350
Simon Guo7284ca82018-05-23 15:02:07 +0800351 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo13989b62018-05-23 15:01:59 +0800352 kvmppc_giveup_ext(vcpu, MSR_VSX);
353
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800354 preempt_disable();
355 _kvmppc_save_tm_pr(vcpu, mfmsr());
356 preempt_enable();
357}
358
359void kvmppc_restore_tm_pr(struct kvm_vcpu *vcpu)
360{
361 if (!MSR_TM_ACTIVE(kvmppc_get_msr(vcpu))) {
362 kvmppc_restore_tm_sprs(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800363 if (kvmppc_get_msr(vcpu) & MSR_TM) {
Simon Guo13989b62018-05-23 15:01:59 +0800364 kvmppc_handle_lost_math_exts(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800365 if (vcpu->arch.fscr & FSCR_TAR)
366 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
367 }
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800368 return;
369 }
370
371 preempt_disable();
372 _kvmppc_restore_tm_pr(vcpu, kvmppc_get_msr(vcpu));
373 preempt_enable();
Simon Guo13989b62018-05-23 15:01:59 +0800374
Simon Guo7284ca82018-05-23 15:02:07 +0800375 if (kvmppc_get_msr(vcpu) & MSR_TM) {
Simon Guo13989b62018-05-23 15:01:59 +0800376 kvmppc_handle_lost_math_exts(vcpu);
Simon Guo7284ca82018-05-23 15:02:07 +0800377 if (vcpu->arch.fscr & FSCR_TAR)
378 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
379 }
Simon Guo8d2e2fc2018-05-23 15:01:58 +0800380}
Simon Guo66c33e72018-05-23 15:01:57 +0800381#endif
382
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530383static int kvmppc_core_check_requests_pr(struct kvm_vcpu *vcpu)
Alexander Graf03d25c52012-08-10 12:28:50 +0200384{
Alexander Graf7c973a22012-08-13 12:50:35 +0200385 int r = 1; /* Indicate we want to get back into the guest */
386
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200387 /* We misuse TLB_FLUSH to indicate that we want to clear
388 all shadow cache entries */
389 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
390 kvmppc_mmu_pte_flush(vcpu, 0, 0);
Alexander Graf7c973a22012-08-13 12:50:35 +0200391
392 return r;
Alexander Graf03d25c52012-08-10 12:28:50 +0200393}
394
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200395/************* MMU Notifiers *************/
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000396static void do_kvm_unmap_hva(struct kvm *kvm, unsigned long start,
397 unsigned long end)
398{
399 long i;
400 struct kvm_vcpu *vcpu;
401 struct kvm_memslots *slots;
402 struct kvm_memory_slot *memslot;
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200403
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000404 slots = kvm_memslots(kvm);
405 kvm_for_each_memslot(memslot, slots) {
406 unsigned long hva_start, hva_end;
407 gfn_t gfn, gfn_end;
408
409 hva_start = max(start, memslot->userspace_addr);
410 hva_end = min(end, memslot->userspace_addr +
411 (memslot->npages << PAGE_SHIFT));
412 if (hva_start >= hva_end)
413 continue;
414 /*
415 * {gfn(page) | page intersects with [hva_start, hva_end)} =
416 * {gfn, gfn+1, ..., gfn_end-1}.
417 */
418 gfn = hva_to_gfn_memslot(hva_start, memslot);
419 gfn_end = hva_to_gfn_memslot(hva_end + PAGE_SIZE - 1, memslot);
420 kvm_for_each_vcpu(i, vcpu, kvm)
421 kvmppc_mmu_pte_pflush(vcpu, gfn << PAGE_SHIFT,
422 gfn_end << PAGE_SHIFT);
423 }
424}
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200425
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530426static int kvm_unmap_hva_range_pr(struct kvm *kvm, unsigned long start,
427 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200428{
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000429 do_kvm_unmap_hva(kvm, start, end);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200430
431 return 0;
432}
433
Andres Lagar-Cavilla57128462014-09-22 14:54:42 -0700434static int kvm_age_hva_pr(struct kvm *kvm, unsigned long start,
435 unsigned long end)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200436{
437 /* XXX could be more clever ;) */
438 return 0;
439}
440
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530441static int kvm_test_age_hva_pr(struct kvm *kvm, unsigned long hva)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200442{
443 /* XXX could be more clever ;) */
444 return 0;
445}
446
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530447static void kvm_set_spte_hva_pr(struct kvm *kvm, unsigned long hva, pte_t pte)
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200448{
449 /* The page will get remapped properly on its next fault */
Paul Mackerras491d6ec2013-09-20 14:52:54 +1000450 do_kvm_unmap_hva(kvm, hva, hva + PAGE_SIZE);
Alexander Graf9b0cb3c2012-08-10 13:23:55 +0200451}
452
453/*****************************************/
454
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530455static void kvmppc_set_msr_pr(struct kvm_vcpu *vcpu, u64 msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000456{
Simon Guo68ab07b2018-05-23 15:02:06 +0800457 ulong old_msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000458
459#ifdef EXIT_DEBUG
460 printk(KERN_INFO "KVM: Set MSR to 0x%llx\n", msr);
461#endif
462
Simon Guo68ab07b2018-05-23 15:02:06 +0800463#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
464 /* We should never target guest MSR to TS=10 && PR=0,
465 * since we always fail transaction for guest privilege
466 * state.
467 */
468 if (!(msr & MSR_PR) && MSR_TM_TRANSACTIONAL(msr))
469 kvmppc_emulate_tabort(vcpu,
470 TM_CAUSE_KVM_FAC_UNAV | TM_CAUSE_PERSISTENT);
471#endif
472
473 old_msr = kvmppc_get_msr(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000474 msr &= to_book3s(vcpu)->msr_mask;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200475 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000476 kvmppc_recalc_shadow_msr(vcpu);
477
478 if (msr & MSR_POW) {
479 if (!vcpu->arch.pending_exceptions) {
480 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200481 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000482 vcpu->stat.halt_wakeup++;
483
484 /* Unset POW bit after we woke up */
485 msr &= ~MSR_POW;
Alexander Graf5deb8e72014-04-24 13:46:24 +0200486 kvmppc_set_msr_fast(vcpu, msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000487 }
488 }
489
Alexander Grafc01e3f62014-07-11 02:58:58 +0200490 if (kvmppc_is_split_real(vcpu))
491 kvmppc_fixup_split_real(vcpu);
492 else
493 kvmppc_unfixup_split_real(vcpu);
494
Alexander Graf5deb8e72014-04-24 13:46:24 +0200495 if ((kvmppc_get_msr(vcpu) & (MSR_PR|MSR_IR|MSR_DR)) !=
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000496 (old_msr & (MSR_PR|MSR_IR|MSR_DR))) {
497 kvmppc_mmu_flush_segments(vcpu);
498 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
499
500 /* Preload magic page segment when in kernel mode */
501 if (!(msr & MSR_PR) && vcpu->arch.magic_page_pa) {
502 struct kvm_vcpu_arch *a = &vcpu->arch;
503
504 if (msr & MSR_DR)
505 kvmppc_mmu_map_segment(vcpu, a->magic_page_ea);
506 else
507 kvmppc_mmu_map_segment(vcpu, a->magic_page_pa);
508 }
509 }
510
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000511 /*
512 * When switching from 32 to 64-bit, we may have a stale 32-bit
513 * magic page around, we need to flush it. Typically 32-bit magic
514 * page will be instanciated when calling into RTAS. Note: We
515 * assume that such transition only happens while in kernel mode,
516 * ie, we never transition from user 32-bit to kernel 64-bit with
517 * a 32-bit magic page around.
518 */
519 if (vcpu->arch.magic_page_pa &&
520 !(old_msr & MSR_PR) && !(old_msr & MSR_SF) && (msr & MSR_SF)) {
521 /* going from RTAS to normal kernel code */
522 kvmppc_mmu_pte_flush(vcpu, (uint32_t)vcpu->arch.magic_page_pa,
523 ~0xFFFUL);
524 }
525
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000526 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200527 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000528 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
Simon Guo13989b62018-05-23 15:01:59 +0800529
530#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
531 if (kvmppc_get_msr(vcpu) & MSR_TM)
532 kvmppc_handle_lost_math_exts(vcpu);
533#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000534}
535
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530536void kvmppc_set_pvr_pr(struct kvm_vcpu *vcpu, u32 pvr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000537{
538 u32 host_pvr;
539
540 vcpu->arch.hflags &= ~BOOK3S_HFLAG_SLB;
541 vcpu->arch.pvr = pvr;
542#ifdef CONFIG_PPC_BOOK3S_64
543 if ((pvr >= 0x330000) && (pvr < 0x70330000)) {
544 kvmppc_mmu_book3s_64_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200545 if (!to_book3s(vcpu)->hior_explicit)
546 to_book3s(vcpu)->hior = 0xfff00000;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000547 to_book3s(vcpu)->msr_mask = 0xffffffffffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200548 vcpu->arch.cpu_type = KVM_CPU_3S_64;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000549 } else
550#endif
551 {
552 kvmppc_mmu_book3s_32_init(vcpu);
Alexander Graf1022fc32011-09-14 21:45:23 +0200553 if (!to_book3s(vcpu)->hior_explicit)
554 to_book3s(vcpu)->hior = 0;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000555 to_book3s(vcpu)->msr_mask = 0xffffffffULL;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200556 vcpu->arch.cpu_type = KVM_CPU_3S_32;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000557 }
558
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200559 kvmppc_sanity_check(vcpu);
560
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000561 /* If we are in hypervisor level on 970, we can tell the CPU to
562 * treat DCBZ as 32 bytes store */
563 vcpu->arch.hflags &= ~BOOK3S_HFLAG_DCBZ32;
564 if (vcpu->arch.mmu.is_dcbz32(vcpu) && (mfmsr() & MSR_HV) &&
565 !strcmp(cur_cpu_spec->platform, "ppc970"))
566 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
567
568 /* Cell performs badly if MSR_FEx are set. So let's hope nobody
569 really needs them in a VM on Cell and force disable them. */
570 if (!strcmp(cur_cpu_spec->platform, "ppc-cell-be"))
571 to_book3s(vcpu)->msr_mask &= ~(MSR_FE0 | MSR_FE1);
572
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000573 /*
574 * If they're asking for POWER6 or later, set the flag
575 * indicating that we can do multiple large page sizes
576 * and 1TB segments.
577 * Also set the flag that indicates that tlbie has the large
578 * page bit in the RB operand instead of the instruction.
579 */
580 switch (PVR_VER(pvr)) {
581 case PVR_POWER6:
582 case PVR_POWER7:
583 case PVR_POWER7p:
584 case PVR_POWER8:
Thomas Huth2365f6b2016-09-21 13:53:46 +0200585 case PVR_POWER8E:
586 case PVR_POWER8NVL:
Paul Mackerrasa4a0f252013-09-20 14:52:44 +1000587 vcpu->arch.hflags |= BOOK3S_HFLAG_MULTI_PGSIZE |
588 BOOK3S_HFLAG_NEW_TLBIE;
589 break;
590 }
591
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000592#ifdef CONFIG_PPC_BOOK3S_32
593 /* 32 bit Book3S always has 32 byte dcbz */
594 vcpu->arch.hflags |= BOOK3S_HFLAG_DCBZ32;
595#endif
596
597 /* On some CPUs we can execute paired single operations natively */
598 asm ( "mfpvr %0" : "=r"(host_pvr));
599 switch (host_pvr) {
600 case 0x00080200: /* lonestar 2.0 */
601 case 0x00088202: /* lonestar 2.2 */
602 case 0x70000100: /* gekko 1.0 */
603 case 0x00080100: /* gekko 2.0 */
604 case 0x00083203: /* gekko 2.3a */
605 case 0x00083213: /* gekko 2.3b */
606 case 0x00083204: /* gekko 2.4 */
607 case 0x00083214: /* gekko 2.4e (8SE) - retail HW2 */
608 case 0x00087200: /* broadway */
609 vcpu->arch.hflags |= BOOK3S_HFLAG_NATIVE_PS;
610 /* Enable HID2.PSE - in case we need it later */
611 mtspr(SPRN_HID2_GEKKO, mfspr(SPRN_HID2_GEKKO) | (1 << 29));
612 }
613}
614
615/* Book3s_32 CPUs always have 32 bytes cache line size, which Linux assumes. To
616 * make Book3s_32 Linux work on Book3s_64, we have to make sure we trap dcbz to
617 * emulate 32 bytes dcbz length.
618 *
619 * The Book3s_64 inventors also realized this case and implemented a special bit
620 * in the HID5 register, which is a hypervisor ressource. Thus we can't use it.
621 *
622 * My approach here is to patch the dcbz instruction on executing pages.
623 */
624static void kvmppc_patch_dcbz(struct kvm_vcpu *vcpu, struct kvmppc_pte *pte)
625{
626 struct page *hpage;
627 u64 hpage_offset;
628 u32 *page;
629 int i;
630
631 hpage = gfn_to_page(vcpu->kvm, pte->raddr >> PAGE_SHIFT);
Xiao Guangrong32cad842012-08-03 15:42:52 +0800632 if (is_error_page(hpage))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000633 return;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000634
635 hpage_offset = pte->raddr & ~PAGE_MASK;
636 hpage_offset &= ~0xFFFULL;
637 hpage_offset /= 4;
638
639 get_page(hpage);
Cong Wang2480b202011-11-25 23:14:16 +0800640 page = kmap_atomic(hpage);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000641
642 /* patch dcbz into reserved instruction, so we trap */
643 for (i=hpage_offset; i < hpage_offset + (HW_PAGE_SIZE / 4); i++)
Alexander Grafcd087ee2014-04-24 13:52:01 +0200644 if ((be32_to_cpu(page[i]) & 0xff0007ff) == INS_DCBZ)
645 page[i] &= cpu_to_be32(0xfffffff7);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000646
Cong Wang2480b202011-11-25 23:14:16 +0800647 kunmap_atomic(page);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000648 put_page(hpage);
649}
650
Yaowei Bai378b4172015-11-16 11:10:24 +0800651static bool kvmppc_visible_gpa(struct kvm_vcpu *vcpu, gpa_t gpa)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000652{
653 ulong mp_pa = vcpu->arch.magic_page_pa;
654
Alexander Graf5deb8e72014-04-24 13:46:24 +0200655 if (!(kvmppc_get_msr(vcpu) & MSR_SF))
Benjamin Herrenschmidtbbcc9c02012-03-13 21:52:44 +0000656 mp_pa = (uint32_t)mp_pa;
657
Alexander Graf89b68c92014-07-13 16:37:12 +0200658 gpa &= ~0xFFFULL;
659 if (unlikely(mp_pa) && unlikely((mp_pa & KVM_PAM) == (gpa & KVM_PAM))) {
Yaowei Bai378b4172015-11-16 11:10:24 +0800660 return true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000661 }
662
Alexander Graf89b68c92014-07-13 16:37:12 +0200663 return kvm_is_visible_gfn(vcpu->kvm, gpa >> PAGE_SHIFT);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000664}
665
666int kvmppc_handle_pagefault(struct kvm_run *run, struct kvm_vcpu *vcpu,
667 ulong eaddr, int vec)
668{
669 bool data = (vec == BOOK3S_INTERRUPT_DATA_STORAGE);
Paul Mackerras93b159b2013-09-20 14:52:51 +1000670 bool iswrite = false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000671 int r = RESUME_GUEST;
672 int relocated;
673 int page_found = 0;
Alexey Kardashevskiy96df2262017-03-24 17:49:22 +1100674 struct kvmppc_pte pte = { 0 };
Alexander Graf5deb8e72014-04-24 13:46:24 +0200675 bool dr = (kvmppc_get_msr(vcpu) & MSR_DR) ? true : false;
676 bool ir = (kvmppc_get_msr(vcpu) & MSR_IR) ? true : false;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000677 u64 vsid;
678
679 relocated = data ? dr : ir;
Paul Mackerras93b159b2013-09-20 14:52:51 +1000680 if (data && (vcpu->arch.fault_dsisr & DSISR_ISSTORE))
681 iswrite = true;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000682
683 /* Resolve real address if translation turned on */
684 if (relocated) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000685 page_found = vcpu->arch.mmu.xlate(vcpu, eaddr, &pte, data, iswrite);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000686 } else {
687 pte.may_execute = true;
688 pte.may_read = true;
689 pte.may_write = true;
690 pte.raddr = eaddr & KVM_PAM;
691 pte.eaddr = eaddr;
692 pte.vpage = eaddr >> 12;
Paul Mackerrasc9029c32013-09-20 14:52:45 +1000693 pte.page_size = MMU_PAGE_64K;
Alexey Kardashevskiy6c7d47c2017-11-22 14:42:21 +1100694 pte.wimg = HPTE_R_M;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000695 }
696
Alexander Graf5deb8e72014-04-24 13:46:24 +0200697 switch (kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000698 case 0:
699 pte.vpage |= ((u64)VSID_REAL << (SID_SHIFT - 12));
700 break;
701 case MSR_DR:
Alexander Grafc01e3f62014-07-11 02:58:58 +0200702 if (!data &&
703 (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK) &&
704 ((pte.raddr & SPLIT_HACK_MASK) == SPLIT_HACK_OFFS))
705 pte.raddr &= ~SPLIT_HACK_MASK;
706 /* fall through */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000707 case MSR_IR:
708 vcpu->arch.mmu.esid_to_vsid(vcpu, eaddr >> SID_SHIFT, &vsid);
709
Alexander Graf5deb8e72014-04-24 13:46:24 +0200710 if ((kvmppc_get_msr(vcpu) & (MSR_DR|MSR_IR)) == MSR_DR)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000711 pte.vpage |= ((u64)VSID_REAL_DR << (SID_SHIFT - 12));
712 else
713 pte.vpage |= ((u64)VSID_REAL_IR << (SID_SHIFT - 12));
714 pte.vpage |= vsid;
715
716 if (vsid == -1)
717 page_found = -EINVAL;
718 break;
719 }
720
721 if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
722 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
723 /*
724 * If we do the dcbz hack, we have to NX on every execution,
725 * so we can patch the executing code. This renders our guest
726 * NX-less.
727 */
728 pte.may_execute = !data;
729 }
730
Paul Mackerras916ccad2018-06-07 18:04:37 +1000731 if (page_found == -ENOENT || page_found == -EPERM) {
732 /* Page not found in guest PTE entries, or protection fault */
733 u64 flags;
734
735 if (page_found == -EPERM)
736 flags = DSISR_PROTFAULT;
737 else
738 flags = DSISR_NOHPTE;
739 if (data) {
740 flags |= vcpu->arch.fault_dsisr & DSISR_ISSTORE;
741 kvmppc_core_queue_data_storage(vcpu, eaddr, flags);
742 } else {
743 kvmppc_core_queue_inst_storage(vcpu, flags);
744 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000745 } else if (page_found == -EINVAL) {
746 /* Page not found in guest SLB */
Alexander Graf5deb8e72014-04-24 13:46:24 +0200747 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000748 kvmppc_book3s_queue_irqprio(vcpu, vec + 0x80);
Alexey Kardashevskiy9eecec12017-03-24 17:47:13 +1100749 } else if (kvmppc_visible_gpa(vcpu, pte.raddr)) {
Paul Mackerras93b159b2013-09-20 14:52:51 +1000750 if (data && !(vcpu->arch.fault_dsisr & DSISR_NOHPTE)) {
751 /*
752 * There is already a host HPTE there, presumably
753 * a read-only one for a page the guest thinks
754 * is writable, so get rid of it first.
755 */
756 kvmppc_mmu_unmap_page(vcpu, &pte);
757 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000758 /* The guest's PTE is not mapped yet. Map on the host */
Alexey Kardashevskiybd9166f2017-03-24 17:48:10 +1100759 if (kvmppc_mmu_map_page(vcpu, &pte, iswrite) == -EIO) {
760 /* Exit KVM if mapping failed */
761 run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
762 return RESUME_HOST;
763 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000764 if (data)
765 vcpu->stat.sp_storage++;
766 else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
Paul Mackerras93b159b2013-09-20 14:52:51 +1000767 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32)))
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000768 kvmppc_patch_dcbz(vcpu, &pte);
769 } else {
770 /* MMIO */
771 vcpu->stat.mmio_exits++;
772 vcpu->arch.paddr_accessed = pte.raddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100773 vcpu->arch.vaddr_accessed = pte.eaddr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000774 r = kvmppc_emulate_mmio(run, vcpu);
775 if ( r == RESUME_HOST_NV )
776 r = RESUME_HOST;
777 }
778
779 return r;
780}
781
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000782/* Give up external provider (FPU, Altivec, VSX) */
783void kvmppc_giveup_ext(struct kvm_vcpu *vcpu, ulong msr)
784{
785 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000786
Paul Mackerras28c483b2012-11-04 18:16:46 +0000787 /*
788 * VSX instructions can access FP and vector registers, so if
789 * we are giving up VSX, make sure we give up FP and VMX as well.
790 */
791 if (msr & MSR_VSX)
792 msr |= MSR_FP | MSR_VEC;
793
794 msr &= vcpu->arch.guest_owned_ext;
795 if (!msr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000796 return;
797
798#ifdef DEBUG_EXT
799 printk(KERN_INFO "Giving up ext 0x%lx\n", msr);
800#endif
801
Paul Mackerras28c483b2012-11-04 18:16:46 +0000802 if (msr & MSR_FP) {
803 /*
804 * Note that on CPUs with VSX, giveup_fpu stores
805 * both the traditional FP registers and the added VSX
Paul Mackerrasde79f7b2013-09-10 20:20:42 +1000806 * registers into thread.fp_state.fpr[].
Paul Mackerras28c483b2012-11-04 18:16:46 +0000807 */
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100808 if (t->regs->msr & MSR_FP)
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000809 giveup_fpu(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100810 t->fp_save_area = NULL;
Paul Mackerras28c483b2012-11-04 18:16:46 +0000811 }
812
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000813#ifdef CONFIG_ALTIVEC
Paul Mackerras28c483b2012-11-04 18:16:46 +0000814 if (msr & MSR_VEC) {
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000815 if (current->thread.regs->msr & MSR_VEC)
816 giveup_altivec(current);
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100817 t->vr_save_area = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000818 }
Paul Mackerras28c483b2012-11-04 18:16:46 +0000819#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000820
Paul Mackerras28c483b2012-11-04 18:16:46 +0000821 vcpu->arch.guest_owned_ext &= ~(msr | MSR_VSX);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000822 kvmppc_recalc_shadow_msr(vcpu);
823}
824
Alexander Graf616dff82014-04-29 16:48:44 +0200825/* Give up facility (TAR / EBB / DSCR) */
Simon Guo7284ca82018-05-23 15:02:07 +0800826void kvmppc_giveup_fac(struct kvm_vcpu *vcpu, ulong fac)
Alexander Graf616dff82014-04-29 16:48:44 +0200827{
828#ifdef CONFIG_PPC_BOOK3S_64
829 if (!(vcpu->arch.shadow_fscr & (1ULL << fac))) {
830 /* Facility not available to the guest, ignore giveup request*/
831 return;
832 }
Alexander Grafe14e7a12014-04-22 12:26:58 +0200833
834 switch (fac) {
835 case FSCR_TAR_LG:
836 vcpu->arch.tar = mfspr(SPRN_TAR);
837 mtspr(SPRN_TAR, current->thread.tar);
838 vcpu->arch.shadow_fscr &= ~FSCR_TAR;
839 break;
840 }
Alexander Graf616dff82014-04-29 16:48:44 +0200841#endif
842}
843
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000844/* Handle external providers (FPU, Altivec, VSX) */
845static int kvmppc_handle_ext(struct kvm_vcpu *vcpu, unsigned int exit_nr,
846 ulong msr)
847{
848 struct thread_struct *t = &current->thread;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000849
850 /* When we have paired singles, we emulate in software */
851 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE)
852 return RESUME_GUEST;
853
Alexander Graf5deb8e72014-04-24 13:46:24 +0200854 if (!(kvmppc_get_msr(vcpu) & msr)) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000855 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
856 return RESUME_GUEST;
857 }
858
Paul Mackerras28c483b2012-11-04 18:16:46 +0000859 if (msr == MSR_VSX) {
860 /* No VSX? Give an illegal instruction interrupt */
861#ifdef CONFIG_VSX
862 if (!cpu_has_feature(CPU_FTR_VSX))
863#endif
864 {
865 kvmppc_core_queue_program(vcpu, SRR1_PROGILL);
866 return RESUME_GUEST;
867 }
868
869 /*
870 * We have to load up all the FP and VMX registers before
871 * we can let the guest use VSX instructions.
872 */
873 msr = MSR_FP | MSR_VEC | MSR_VSX;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000874 }
875
Paul Mackerras28c483b2012-11-04 18:16:46 +0000876 /* See if we already own all the ext(s) needed */
877 msr &= ~vcpu->arch.guest_owned_ext;
878 if (!msr)
879 return RESUME_GUEST;
880
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000881#ifdef DEBUG_EXT
882 printk(KERN_INFO "Loading up ext 0x%lx\n", msr);
883#endif
884
Paul Mackerras28c483b2012-11-04 18:16:46 +0000885 if (msr & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530886 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100887 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100888 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100889 disable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100890 t->fp_save_area = &vcpu->arch.fp;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530891 preempt_enable();
Paul Mackerras28c483b2012-11-04 18:16:46 +0000892 }
893
894 if (msr & MSR_VEC) {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000895#ifdef CONFIG_ALTIVEC
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530896 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100897 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100898 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100899 disable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100900 t->vr_save_area = &vcpu->arch.vr;
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530901 preempt_enable();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000902#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000903 }
904
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100905 t->regs->msr |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000906 vcpu->arch.guest_owned_ext |= msr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +0000907 kvmppc_recalc_shadow_msr(vcpu);
908
909 return RESUME_GUEST;
910}
911
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000912/*
913 * Kernel code using FP or VMX could have flushed guest state to
914 * the thread_struct; if so, get it back now.
915 */
916static void kvmppc_handle_lost_ext(struct kvm_vcpu *vcpu)
917{
918 unsigned long lost_ext;
919
920 lost_ext = vcpu->arch.guest_owned_ext & ~current->thread.regs->msr;
921 if (!lost_ext)
922 return;
923
Paul Mackerras09548fd2013-10-15 20:43:01 +1100924 if (lost_ext & MSR_FP) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530925 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100926 enable_kernel_fp();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100927 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100928 disable_kernel_fp();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530929 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100930 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000931#ifdef CONFIG_ALTIVEC
Paul Mackerras09548fd2013-10-15 20:43:01 +1100932 if (lost_ext & MSR_VEC) {
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530933 preempt_disable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100934 enable_kernel_altivec();
Paul Mackerras99dae3b2013-10-15 20:43:03 +1100935 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100936 disable_kernel_altivec();
Aneesh Kumar K.V7562c4f2014-05-04 22:56:08 +0530937 preempt_enable();
Paul Mackerras09548fd2013-10-15 20:43:01 +1100938 }
Paul Mackerrasf2481772013-09-20 14:52:42 +1000939#endif
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +1000940 current->thread.regs->msr |= lost_ext;
941}
942
Alexander Graf616dff82014-04-29 16:48:44 +0200943#ifdef CONFIG_PPC_BOOK3S_64
944
Simon Guo533082a2018-05-23 15:02:00 +0800945void kvmppc_trigger_fac_interrupt(struct kvm_vcpu *vcpu, ulong fac)
Alexander Graf616dff82014-04-29 16:48:44 +0200946{
947 /* Inject the Interrupt Cause field and trigger a guest interrupt */
948 vcpu->arch.fscr &= ~(0xffULL << 56);
949 vcpu->arch.fscr |= (fac << 56);
950 kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FAC_UNAVAIL);
951}
952
953static void kvmppc_emulate_fac(struct kvm_vcpu *vcpu, ulong fac)
954{
955 enum emulation_result er = EMULATE_FAIL;
956
957 if (!(kvmppc_get_msr(vcpu) & MSR_PR))
958 er = kvmppc_emulate_instruction(vcpu->run, vcpu);
959
960 if ((er != EMULATE_DONE) && (er != EMULATE_AGAIN)) {
961 /* Couldn't emulate, trigger interrupt in guest */
962 kvmppc_trigger_fac_interrupt(vcpu, fac);
963 }
964}
965
966/* Enable facilities (TAR, EBB, DSCR) for the guest */
967static int kvmppc_handle_fac(struct kvm_vcpu *vcpu, ulong fac)
968{
Alexander Graf9916d572014-04-29 17:54:40 +0200969 bool guest_fac_enabled;
Alexander Graf616dff82014-04-29 16:48:44 +0200970 BUG_ON(!cpu_has_feature(CPU_FTR_ARCH_207S));
971
Alexander Graf9916d572014-04-29 17:54:40 +0200972 /*
973 * Not every facility is enabled by FSCR bits, check whether the
974 * guest has this facility enabled at all.
975 */
976 switch (fac) {
977 case FSCR_TAR_LG:
978 case FSCR_EBB_LG:
979 guest_fac_enabled = (vcpu->arch.fscr & (1ULL << fac));
980 break;
981 case FSCR_TM_LG:
982 guest_fac_enabled = kvmppc_get_msr(vcpu) & MSR_TM;
983 break;
984 default:
985 guest_fac_enabled = false;
986 break;
987 }
988
989 if (!guest_fac_enabled) {
Alexander Graf616dff82014-04-29 16:48:44 +0200990 /* Facility not enabled by the guest */
991 kvmppc_trigger_fac_interrupt(vcpu, fac);
992 return RESUME_GUEST;
993 }
994
995 switch (fac) {
Alexander Grafe14e7a12014-04-22 12:26:58 +0200996 case FSCR_TAR_LG:
997 /* TAR switching isn't lazy in Linux yet */
998 current->thread.tar = mfspr(SPRN_TAR);
999 mtspr(SPRN_TAR, vcpu->arch.tar);
1000 vcpu->arch.shadow_fscr |= FSCR_TAR;
1001 break;
Alexander Graf616dff82014-04-29 16:48:44 +02001002 default:
1003 kvmppc_emulate_fac(vcpu, fac);
1004 break;
1005 }
1006
Simon Guo19c585e2018-05-23 15:02:02 +08001007#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1008 /* Since we disabled MSR_TM at privilege state, the mfspr instruction
1009 * for TM spr can trigger TM fac unavailable. In this case, the
1010 * emulation is handled by kvmppc_emulate_fac(), which invokes
1011 * kvmppc_emulate_mfspr() finally. But note the mfspr can include
1012 * RT for NV registers. So it need to restore those NV reg to reflect
1013 * the update.
1014 */
1015 if ((fac == FSCR_TM_LG) && !(kvmppc_get_msr(vcpu) & MSR_PR))
1016 return RESUME_GUEST_NV;
1017#endif
1018
Alexander Graf616dff82014-04-29 16:48:44 +02001019 return RESUME_GUEST;
1020}
Alexander Graf8e6afa32014-07-31 10:21:59 +02001021
1022void kvmppc_set_fscr(struct kvm_vcpu *vcpu, u64 fscr)
1023{
1024 if ((vcpu->arch.fscr & FSCR_TAR) && !(fscr & FSCR_TAR)) {
1025 /* TAR got dropped, drop it in shadow too */
1026 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
Simon Guo7284ca82018-05-23 15:02:07 +08001027 } else if (!(vcpu->arch.fscr & FSCR_TAR) && (fscr & FSCR_TAR)) {
1028 vcpu->arch.fscr = fscr;
1029 kvmppc_handle_fac(vcpu, FSCR_TAR_LG);
1030 return;
Alexander Graf8e6afa32014-07-31 10:21:59 +02001031 }
Simon Guo7284ca82018-05-23 15:02:07 +08001032
Alexander Graf8e6afa32014-07-31 10:21:59 +02001033 vcpu->arch.fscr = fscr;
1034}
Alexander Graf616dff82014-04-29 16:48:44 +02001035#endif
1036
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001037static void kvmppc_setup_debug(struct kvm_vcpu *vcpu)
1038{
1039 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1040 u64 msr = kvmppc_get_msr(vcpu);
1041
1042 kvmppc_set_msr(vcpu, msr | MSR_SE);
1043 }
1044}
1045
1046static void kvmppc_clear_debug(struct kvm_vcpu *vcpu)
1047{
1048 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1049 u64 msr = kvmppc_get_msr(vcpu);
1050
1051 kvmppc_set_msr(vcpu, msr & ~MSR_SE);
1052 }
1053}
1054
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001055static int kvmppc_exit_pr_progint(struct kvm_run *run, struct kvm_vcpu *vcpu,
1056 unsigned int exit_nr)
1057{
1058 enum emulation_result er;
1059 ulong flags;
1060 u32 last_inst;
1061 int emul, r;
1062
1063 /*
1064 * shadow_srr1 only contains valid flags if we came here via a program
1065 * exception. The other exceptions (emulation assist, FP unavailable,
1066 * etc.) do not provide flags in SRR1, so use an illegal-instruction
1067 * exception when injecting a program interrupt into the guest.
1068 */
1069 if (exit_nr == BOOK3S_INTERRUPT_PROGRAM)
1070 flags = vcpu->arch.shadow_srr1 & 0x1f0000ull;
1071 else
1072 flags = SRR1_PROGILL;
1073
1074 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
1075 if (emul != EMULATE_DONE)
1076 return RESUME_GUEST;
1077
1078 if (kvmppc_get_msr(vcpu) & MSR_PR) {
1079#ifdef EXIT_DEBUG
1080 pr_info("Userspace triggered 0x700 exception at\n 0x%lx (0x%x)\n",
1081 kvmppc_get_pc(vcpu), last_inst);
1082#endif
1083 if ((last_inst & 0xff0007ff) != (INS_DCBZ & 0xfffffff7)) {
1084 kvmppc_core_queue_program(vcpu, flags);
1085 return RESUME_GUEST;
1086 }
1087 }
1088
1089 vcpu->stat.emulated_inst_exits++;
1090 er = kvmppc_emulate_instruction(run, vcpu);
1091 switch (er) {
1092 case EMULATE_DONE:
1093 r = RESUME_GUEST_NV;
1094 break;
1095 case EMULATE_AGAIN:
1096 r = RESUME_GUEST;
1097 break;
1098 case EMULATE_FAIL:
1099 pr_crit("%s: emulation at %lx failed (%08x)\n",
1100 __func__, kvmppc_get_pc(vcpu), last_inst);
1101 kvmppc_core_queue_program(vcpu, flags);
1102 r = RESUME_GUEST;
1103 break;
1104 case EMULATE_DO_MMIO:
1105 run->exit_reason = KVM_EXIT_MMIO;
1106 r = RESUME_HOST_NV;
1107 break;
1108 case EMULATE_EXIT_USER:
1109 r = RESUME_HOST_NV;
1110 break;
1111 default:
1112 BUG();
1113 }
1114
1115 return r;
1116}
1117
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301118int kvmppc_handle_exit_pr(struct kvm_run *run, struct kvm_vcpu *vcpu,
1119 unsigned int exit_nr)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001120{
1121 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +02001122 int s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001123
1124 vcpu->stat.sum_exits++;
1125
1126 run->exit_reason = KVM_EXIT_UNKNOWN;
1127 run->ready_for_interrupt_injection = 1;
1128
Alexander Grafbd2be682012-08-13 01:04:19 +02001129 /* We get here with MSR.EE=1 */
Alexander Graf3b1d9d72012-04-30 10:56:12 +02001130
Alexander Graf97c95052012-08-02 15:10:00 +02001131 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001132 guest_exit();
Alexander Grafc63ddcb2012-08-12 11:27:49 +02001133
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001134 switch (exit_nr) {
1135 case BOOK3S_INTERRUPT_INST_STORAGE:
Alexander Graf468a12c2011-12-09 14:44:13 +01001136 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001137 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001138 vcpu->stat.pf_instruc++;
1139
Alexander Grafc01e3f62014-07-11 02:58:58 +02001140 if (kvmppc_is_split_real(vcpu))
1141 kvmppc_fixup_split_real(vcpu);
1142
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001143#ifdef CONFIG_PPC_BOOK3S_32
1144 /* We set segments as unused segments when invalidating them. So
1145 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001146 {
1147 struct kvmppc_book3s_shadow_vcpu *svcpu;
1148 u32 sr;
1149
1150 svcpu = svcpu_get(vcpu);
1151 sr = svcpu->sr[kvmppc_get_pc(vcpu) >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001152 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001153 if (sr == SR_INVALID) {
1154 kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu));
1155 r = RESUME_GUEST;
1156 break;
1157 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001158 }
1159#endif
1160
1161 /* only care about PTEG not found errors, but leave NX alone */
Alexander Graf468a12c2011-12-09 14:44:13 +01001162 if (shadow_srr1 & 0x40000000) {
Paul Mackerras93b159b2013-09-20 14:52:51 +10001163 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001164 r = kvmppc_handle_pagefault(run, vcpu, kvmppc_get_pc(vcpu), exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001165 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001166 vcpu->stat.sp_instruc++;
1167 } else if (vcpu->arch.mmu.is_dcbz32(vcpu) &&
1168 (!(vcpu->arch.hflags & BOOK3S_HFLAG_DCBZ32))) {
1169 /*
1170 * XXX If we do the dcbz hack we use the NX bit to flush&patch the page,
1171 * so we can't use the NX bit inside the guest. Let's cross our fingers,
1172 * that no guest that needs the dcbz hack does NX.
1173 */
1174 kvmppc_mmu_pte_flush(vcpu, kvmppc_get_pc(vcpu), ~0xFFFUL);
1175 r = RESUME_GUEST;
1176 } else {
Paul Mackerras916ccad2018-06-07 18:04:37 +10001177 kvmppc_core_queue_inst_storage(vcpu,
1178 shadow_srr1 & 0x58000000);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001179 r = RESUME_GUEST;
1180 }
1181 break;
Alexander Graf468a12c2011-12-09 14:44:13 +01001182 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001183 case BOOK3S_INTERRUPT_DATA_STORAGE:
1184 {
1185 ulong dar = kvmppc_get_fault_dar(vcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001186 u32 fault_dsisr = vcpu->arch.fault_dsisr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001187 vcpu->stat.pf_storage++;
1188
1189#ifdef CONFIG_PPC_BOOK3S_32
1190 /* We set segments as unused segments when invalidating them. So
1191 * treat the respective fault as segment fault. */
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001192 {
1193 struct kvmppc_book3s_shadow_vcpu *svcpu;
1194 u32 sr;
1195
1196 svcpu = svcpu_get(vcpu);
1197 sr = svcpu->sr[dar >> SID_SHIFT];
Alexander Graf468a12c2011-12-09 14:44:13 +01001198 svcpu_put(svcpu);
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001199 if (sr == SR_INVALID) {
1200 kvmppc_mmu_map_segment(vcpu, dar);
1201 r = RESUME_GUEST;
1202 break;
1203 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001204 }
1205#endif
1206
Paul Mackerras93b159b2013-09-20 14:52:51 +10001207 /*
1208 * We need to handle missing shadow PTEs, and
1209 * protection faults due to us mapping a page read-only
1210 * when the guest thinks it is writable.
1211 */
1212 if (fault_dsisr & (DSISR_NOHPTE | DSISR_PROTFAULT)) {
1213 int idx = srcu_read_lock(&vcpu->kvm->srcu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001214 r = kvmppc_handle_pagefault(run, vcpu, dar, exit_nr);
Paul Mackerras93b159b2013-09-20 14:52:51 +10001215 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001216 } else {
Paul Mackerras916ccad2018-06-07 18:04:37 +10001217 kvmppc_core_queue_data_storage(vcpu, dar, fault_dsisr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001218 r = RESUME_GUEST;
1219 }
1220 break;
1221 }
1222 case BOOK3S_INTERRUPT_DATA_SEGMENT:
1223 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_fault_dar(vcpu)) < 0) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001224 kvmppc_set_dar(vcpu, kvmppc_get_fault_dar(vcpu));
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001225 kvmppc_book3s_queue_irqprio(vcpu,
1226 BOOK3S_INTERRUPT_DATA_SEGMENT);
1227 }
1228 r = RESUME_GUEST;
1229 break;
1230 case BOOK3S_INTERRUPT_INST_SEGMENT:
1231 if (kvmppc_mmu_map_segment(vcpu, kvmppc_get_pc(vcpu)) < 0) {
1232 kvmppc_book3s_queue_irqprio(vcpu,
1233 BOOK3S_INTERRUPT_INST_SEGMENT);
1234 }
1235 r = RESUME_GUEST;
1236 break;
1237 /* We're good on these - the host merely wanted to get our attention */
1238 case BOOK3S_INTERRUPT_DECREMENTER:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001239 case BOOK3S_INTERRUPT_HV_DECREMENTER:
Paul Mackerras40688902014-01-08 21:25:36 +11001240 case BOOK3S_INTERRUPT_DOORBELL:
Alexander Graf568fccc2014-06-16 16:37:38 +02001241 case BOOK3S_INTERRUPT_H_DOORBELL:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001242 vcpu->stat.dec_exits++;
1243 r = RESUME_GUEST;
1244 break;
1245 case BOOK3S_INTERRUPT_EXTERNAL:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001246 case BOOK3S_INTERRUPT_EXTERNAL_LEVEL:
1247 case BOOK3S_INTERRUPT_EXTERNAL_HV:
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001248 case BOOK3S_INTERRUPT_H_VIRT:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001249 vcpu->stat.ext_intr_exits++;
1250 r = RESUME_GUEST;
1251 break;
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001252 case BOOK3S_INTERRUPT_HMI:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001253 case BOOK3S_INTERRUPT_PERFMON:
Cameron Kaiserb71dc512018-06-05 07:48:55 -07001254 case BOOK3S_INTERRUPT_SYSTEM_RESET:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001255 r = RESUME_GUEST;
1256 break;
1257 case BOOK3S_INTERRUPT_PROGRAM:
Alexander Graf4f225ae2012-03-13 23:05:16 +01001258 case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001259 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001260 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001261 case BOOK3S_INTERRUPT_SYSCALL:
Mihai Caraman51f04722014-07-23 19:06:21 +03001262 {
1263 u32 last_sc;
1264 int emul;
1265
1266 /* Get last sc for papr */
1267 if (vcpu->arch.papr_enabled) {
1268 /* The sc instuction points SRR0 to the next inst */
1269 emul = kvmppc_get_last_inst(vcpu, INST_SC, &last_sc);
1270 if (emul != EMULATE_DONE) {
1271 kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) - 4);
1272 r = RESUME_GUEST;
1273 break;
1274 }
1275 }
1276
Alexander Grafa668f2b2011-08-08 17:26:24 +02001277 if (vcpu->arch.papr_enabled &&
Mihai Caraman51f04722014-07-23 19:06:21 +03001278 (last_sc == 0x44000022) &&
Alexander Graf5deb8e72014-04-24 13:46:24 +02001279 !(kvmppc_get_msr(vcpu) & MSR_PR)) {
Alexander Grafa668f2b2011-08-08 17:26:24 +02001280 /* SC 1 papr hypercalls */
1281 ulong cmd = kvmppc_get_gpr(vcpu, 3);
1282 int i;
1283
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05301284#ifdef CONFIG_PPC_BOOK3S_64
Alexander Grafa668f2b2011-08-08 17:26:24 +02001285 if (kvmppc_h_pr(vcpu, cmd) == EMULATE_DONE) {
1286 r = RESUME_GUEST;
1287 break;
1288 }
Andreas Schwab96f38d72011-11-08 07:17:39 +00001289#endif
Alexander Grafa668f2b2011-08-08 17:26:24 +02001290
1291 run->papr_hcall.nr = cmd;
1292 for (i = 0; i < 9; ++i) {
1293 ulong gpr = kvmppc_get_gpr(vcpu, 4 + i);
1294 run->papr_hcall.args[i] = gpr;
1295 }
1296 run->exit_reason = KVM_EXIT_PAPR_HCALL;
1297 vcpu->arch.hcall_needed = 1;
1298 r = RESUME_HOST;
1299 } else if (vcpu->arch.osi_enabled &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001300 (((u32)kvmppc_get_gpr(vcpu, 3)) == OSI_SC_MAGIC_R3) &&
1301 (((u32)kvmppc_get_gpr(vcpu, 4)) == OSI_SC_MAGIC_R4)) {
1302 /* MOL hypercalls */
1303 u64 *gprs = run->osi.gprs;
1304 int i;
1305
1306 run->exit_reason = KVM_EXIT_OSI;
1307 for (i = 0; i < 32; i++)
1308 gprs[i] = kvmppc_get_gpr(vcpu, i);
1309 vcpu->arch.osi_needed = 1;
1310 r = RESUME_HOST_NV;
Alexander Graf5deb8e72014-04-24 13:46:24 +02001311 } else if (!(kvmppc_get_msr(vcpu) & MSR_PR) &&
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001312 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1313 /* KVM PV hypercalls */
1314 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1315 r = RESUME_GUEST;
1316 } else {
1317 /* Guest syscalls */
1318 vcpu->stat.syscall_exits++;
1319 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1320 r = RESUME_GUEST;
1321 }
1322 break;
Mihai Caraman51f04722014-07-23 19:06:21 +03001323 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001324 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1325 case BOOK3S_INTERRUPT_ALTIVEC:
1326 case BOOK3S_INTERRUPT_VSX:
1327 {
1328 int ext_msr = 0;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001329 int emul;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001330 u32 last_inst;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001331
Mihai Caraman9a26af62014-07-23 19:06:20 +03001332 if (vcpu->arch.hflags & BOOK3S_HFLAG_PAIRED_SINGLE) {
1333 /* Do paired single instruction emulation */
Mihai Caraman51f04722014-07-23 19:06:21 +03001334 emul = kvmppc_get_last_inst(vcpu, INST_GENERIC,
1335 &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001336 if (emul == EMULATE_DONE)
Thomas Huthfcd4f3c2017-01-25 13:27:22 +01001337 r = kvmppc_exit_pr_progint(run, vcpu, exit_nr);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001338 else
1339 r = RESUME_GUEST;
1340
1341 break;
1342 }
1343
1344 /* Enable external provider */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001345 switch (exit_nr) {
Mihai Caraman9a26af62014-07-23 19:06:20 +03001346 case BOOK3S_INTERRUPT_FP_UNAVAIL:
1347 ext_msr = MSR_FP;
1348 break;
1349
1350 case BOOK3S_INTERRUPT_ALTIVEC:
1351 ext_msr = MSR_VEC;
1352 break;
1353
1354 case BOOK3S_INTERRUPT_VSX:
1355 ext_msr = MSR_VSX;
1356 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001357 }
1358
Mihai Caraman9a26af62014-07-23 19:06:20 +03001359 r = kvmppc_handle_ext(vcpu, exit_nr, ext_msr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001360 break;
1361 }
1362 case BOOK3S_INTERRUPT_ALIGNMENT:
Mihai Caraman9a26af62014-07-23 19:06:20 +03001363 {
Mihai Caraman51f04722014-07-23 19:06:21 +03001364 u32 last_inst;
1365 int emul = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caraman9a26af62014-07-23 19:06:20 +03001366
1367 if (emul == EMULATE_DONE) {
Alexander Graf5deb8e72014-04-24 13:46:24 +02001368 u32 dsisr;
1369 u64 dar;
1370
1371 dsisr = kvmppc_alignment_dsisr(vcpu, last_inst);
1372 dar = kvmppc_alignment_dar(vcpu, last_inst);
1373
1374 kvmppc_set_dsisr(vcpu, dsisr);
1375 kvmppc_set_dar(vcpu, dar);
1376
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001377 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1378 }
1379 r = RESUME_GUEST;
1380 break;
Mihai Caraman9a26af62014-07-23 19:06:20 +03001381 }
Alexander Graf616dff82014-04-29 16:48:44 +02001382#ifdef CONFIG_PPC_BOOK3S_64
1383 case BOOK3S_INTERRUPT_FAC_UNAVAIL:
Simon Guo19c585e2018-05-23 15:02:02 +08001384 r = kvmppc_handle_fac(vcpu, vcpu->arch.shadow_fscr >> 56);
Alexander Graf616dff82014-04-29 16:48:44 +02001385 break;
1386#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001387 case BOOK3S_INTERRUPT_MACHINE_CHECK:
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001388 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1389 r = RESUME_GUEST;
1390 break;
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001391 case BOOK3S_INTERRUPT_TRACE:
1392 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP) {
1393 run->exit_reason = KVM_EXIT_DEBUG;
1394 r = RESUME_HOST;
1395 } else {
1396 kvmppc_book3s_queue_irqprio(vcpu, exit_nr);
1397 r = RESUME_GUEST;
1398 }
1399 break;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001400 default:
Alexander Graf468a12c2011-12-09 14:44:13 +01001401 {
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001402 ulong shadow_srr1 = vcpu->arch.shadow_srr1;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001403 /* Ugh - bork here! What did we get? */
1404 printk(KERN_EMERG "exit_nr=0x%x | pc=0x%lx | msr=0x%lx\n",
Alexander Graf468a12c2011-12-09 14:44:13 +01001405 exit_nr, kvmppc_get_pc(vcpu), shadow_srr1);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001406 r = RESUME_HOST;
1407 BUG();
1408 break;
1409 }
Alexander Graf468a12c2011-12-09 14:44:13 +01001410 }
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001411
1412 if (!(r & RESUME_HOST)) {
1413 /* To avoid clobbering exit_reason, only check for signals if
1414 * we aren't already exiting to userspace for some other
1415 * reason. */
Alexander Grafe371f712011-12-19 13:36:55 +01001416
1417 /*
1418 * Interrupts could be timers for the guest which we have to
1419 * inject again, so let's postpone them until we're in the guest
1420 * and if we really did time things so badly, then we just exit
1421 * again due to a host external interrupt.
1422 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001423 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001424 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001425 r = s;
Scott Wood6c85f522014-01-09 19:18:40 -06001426 else {
1427 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001428 kvmppc_fix_ee_before_entry();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001429 }
Scott Wood6c85f522014-01-09 19:18:40 -06001430
Paul Mackerras9d1ffdd2013-08-06 14:14:33 +10001431 kvmppc_handle_lost_ext(vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001432 }
1433
1434 trace_kvm_book3s_reenter(r, vcpu);
1435
1436 return r;
1437}
1438
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301439static int kvm_arch_vcpu_ioctl_get_sregs_pr(struct kvm_vcpu *vcpu,
1440 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001441{
1442 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1443 int i;
1444
1445 sregs->pvr = vcpu->arch.pvr;
1446
1447 sregs->u.s.sdr1 = to_book3s(vcpu)->sdr1;
1448 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
1449 for (i = 0; i < 64; i++) {
1450 sregs->u.s.ppc64.slb[i].slbe = vcpu->arch.slb[i].orige | i;
1451 sregs->u.s.ppc64.slb[i].slbv = vcpu->arch.slb[i].origv;
1452 }
1453 } else {
1454 for (i = 0; i < 16; i++)
Alexander Graf5deb8e72014-04-24 13:46:24 +02001455 sregs->u.s.ppc32.sr[i] = kvmppc_get_sr(vcpu, i);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001456
1457 for (i = 0; i < 8; i++) {
1458 sregs->u.s.ppc32.ibat[i] = vcpu3s->ibat[i].raw;
1459 sregs->u.s.ppc32.dbat[i] = vcpu3s->dbat[i].raw;
1460 }
1461 }
1462
1463 return 0;
1464}
1465
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301466static int kvm_arch_vcpu_ioctl_set_sregs_pr(struct kvm_vcpu *vcpu,
1467 struct kvm_sregs *sregs)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001468{
1469 struct kvmppc_vcpu_book3s *vcpu3s = to_book3s(vcpu);
1470 int i;
1471
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301472 kvmppc_set_pvr_pr(vcpu, sregs->pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001473
1474 vcpu3s->sdr1 = sregs->u.s.sdr1;
Greg Kurzf4093ee2017-10-16 12:29:44 +02001475#ifdef CONFIG_PPC_BOOK3S_64
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001476 if (vcpu->arch.hflags & BOOK3S_HFLAG_SLB) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001477 /* Flush all SLB entries */
1478 vcpu->arch.mmu.slbmte(vcpu, 0, 0);
1479 vcpu->arch.mmu.slbia(vcpu);
1480
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001481 for (i = 0; i < 64; i++) {
Greg Kurzf4093ee2017-10-16 12:29:44 +02001482 u64 rb = sregs->u.s.ppc64.slb[i].slbe;
1483 u64 rs = sregs->u.s.ppc64.slb[i].slbv;
1484
1485 if (rb & SLB_ESID_V)
1486 vcpu->arch.mmu.slbmte(vcpu, rs, rb);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001487 }
Greg Kurzf4093ee2017-10-16 12:29:44 +02001488 } else
1489#endif
1490 {
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001491 for (i = 0; i < 16; i++) {
1492 vcpu->arch.mmu.mtsrin(vcpu, i, sregs->u.s.ppc32.sr[i]);
1493 }
1494 for (i = 0; i < 8; i++) {
1495 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), false,
1496 (u32)sregs->u.s.ppc32.ibat[i]);
1497 kvmppc_set_bat(vcpu, &(vcpu3s->ibat[i]), true,
1498 (u32)(sregs->u.s.ppc32.ibat[i] >> 32));
1499 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), false,
1500 (u32)sregs->u.s.ppc32.dbat[i]);
1501 kvmppc_set_bat(vcpu, &(vcpu3s->dbat[i]), true,
1502 (u32)(sregs->u.s.ppc32.dbat[i] >> 32));
1503 }
1504 }
1505
1506 /* Flush the MMU after messing with the segments */
1507 kvmppc_mmu_pte_flush(vcpu, 0, 0);
1508
1509 return 0;
1510}
1511
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301512static int kvmppc_get_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1513 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001514{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001515 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001516
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001517 switch (id) {
Madhavan Srinivasana59c1d92014-09-09 22:37:35 +05301518 case KVM_REG_PPC_DEBUG_INST:
1519 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
1520 break;
Paul Mackerras31f34382011-12-12 12:26:50 +00001521 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001522 *val = get_reg_val(id, to_book3s(vcpu)->hior);
Paul Mackerras31f34382011-12-12 12:26:50 +00001523 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001524 case KVM_REG_PPC_VTB:
1525 *val = get_reg_val(id, to_book3s(vcpu)->vtb);
1526 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301527 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001528 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301529 /*
1530 * We are only interested in the LPCR_ILE bit
1531 */
1532 if (vcpu->arch.intr_msr & MSR_LE)
1533 *val = get_reg_val(id, LPCR_ILE);
1534 else
1535 *val = get_reg_val(id, 0);
1536 break;
Simon Guodeeb8792018-05-23 15:02:12 +08001537#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1538 case KVM_REG_PPC_TFHAR:
1539 *val = get_reg_val(id, vcpu->arch.tfhar);
1540 break;
1541 case KVM_REG_PPC_TFIAR:
1542 *val = get_reg_val(id, vcpu->arch.tfiar);
1543 break;
1544 case KVM_REG_PPC_TEXASR:
1545 *val = get_reg_val(id, vcpu->arch.texasr);
1546 break;
1547 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1548 *val = get_reg_val(id,
1549 vcpu->arch.gpr_tm[id-KVM_REG_PPC_TM_GPR0]);
1550 break;
1551 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1552 {
1553 int i, j;
1554
1555 i = id - KVM_REG_PPC_TM_VSR0;
1556 if (i < 32)
1557 for (j = 0; j < TS_FPRWIDTH; j++)
1558 val->vsxval[j] = vcpu->arch.fp_tm.fpr[i][j];
1559 else {
1560 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1561 val->vval = vcpu->arch.vr_tm.vr[i-32];
1562 else
1563 r = -ENXIO;
1564 }
1565 break;
1566 }
1567 case KVM_REG_PPC_TM_CR:
1568 *val = get_reg_val(id, vcpu->arch.cr_tm);
1569 break;
1570 case KVM_REG_PPC_TM_XER:
1571 *val = get_reg_val(id, vcpu->arch.xer_tm);
1572 break;
1573 case KVM_REG_PPC_TM_LR:
1574 *val = get_reg_val(id, vcpu->arch.lr_tm);
1575 break;
1576 case KVM_REG_PPC_TM_CTR:
1577 *val = get_reg_val(id, vcpu->arch.ctr_tm);
1578 break;
1579 case KVM_REG_PPC_TM_FPSCR:
1580 *val = get_reg_val(id, vcpu->arch.fp_tm.fpscr);
1581 break;
1582 case KVM_REG_PPC_TM_AMR:
1583 *val = get_reg_val(id, vcpu->arch.amr_tm);
1584 break;
1585 case KVM_REG_PPC_TM_PPR:
1586 *val = get_reg_val(id, vcpu->arch.ppr_tm);
1587 break;
1588 case KVM_REG_PPC_TM_VRSAVE:
1589 *val = get_reg_val(id, vcpu->arch.vrsave_tm);
1590 break;
1591 case KVM_REG_PPC_TM_VSCR:
1592 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1593 *val = get_reg_val(id, vcpu->arch.vr_tm.vscr.u[3]);
1594 else
1595 r = -ENXIO;
1596 break;
1597 case KVM_REG_PPC_TM_DSCR:
1598 *val = get_reg_val(id, vcpu->arch.dscr_tm);
1599 break;
1600 case KVM_REG_PPC_TM_TAR:
1601 *val = get_reg_val(id, vcpu->arch.tar_tm);
1602 break;
1603#endif
Paul Mackerras31f34382011-12-12 12:26:50 +00001604 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001605 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001606 break;
1607 }
1608
1609 return r;
1610}
1611
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301612static void kvmppc_set_lpcr_pr(struct kvm_vcpu *vcpu, u64 new_lpcr)
1613{
1614 if (new_lpcr & LPCR_ILE)
1615 vcpu->arch.intr_msr |= MSR_LE;
1616 else
1617 vcpu->arch.intr_msr &= ~MSR_LE;
1618}
1619
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301620static int kvmppc_set_one_reg_pr(struct kvm_vcpu *vcpu, u64 id,
1621 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001622{
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001623 int r = 0;
Paul Mackerras31f34382011-12-12 12:26:50 +00001624
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001625 switch (id) {
Paul Mackerras31f34382011-12-12 12:26:50 +00001626 case KVM_REG_PPC_HIOR:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001627 to_book3s(vcpu)->hior = set_reg_val(id, *val);
1628 to_book3s(vcpu)->hior_explicit = true;
Paul Mackerras31f34382011-12-12 12:26:50 +00001629 break;
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001630 case KVM_REG_PPC_VTB:
1631 to_book3s(vcpu)->vtb = set_reg_val(id, *val);
1632 break;
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301633 case KVM_REG_PPC_LPCR:
Alexey Kardashevskiya0840242014-07-19 17:59:34 +10001634 case KVM_REG_PPC_LPCR_64:
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301635 kvmppc_set_lpcr_pr(vcpu, set_reg_val(id, *val));
1636 break;
Simon Guodeeb8792018-05-23 15:02:12 +08001637#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1638 case KVM_REG_PPC_TFHAR:
1639 vcpu->arch.tfhar = set_reg_val(id, *val);
1640 break;
1641 case KVM_REG_PPC_TFIAR:
1642 vcpu->arch.tfiar = set_reg_val(id, *val);
1643 break;
1644 case KVM_REG_PPC_TEXASR:
1645 vcpu->arch.texasr = set_reg_val(id, *val);
1646 break;
1647 case KVM_REG_PPC_TM_GPR0 ... KVM_REG_PPC_TM_GPR31:
1648 vcpu->arch.gpr_tm[id - KVM_REG_PPC_TM_GPR0] =
1649 set_reg_val(id, *val);
1650 break;
1651 case KVM_REG_PPC_TM_VSR0 ... KVM_REG_PPC_TM_VSR63:
1652 {
1653 int i, j;
1654
1655 i = id - KVM_REG_PPC_TM_VSR0;
1656 if (i < 32)
1657 for (j = 0; j < TS_FPRWIDTH; j++)
1658 vcpu->arch.fp_tm.fpr[i][j] = val->vsxval[j];
1659 else
1660 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1661 vcpu->arch.vr_tm.vr[i-32] = val->vval;
1662 else
1663 r = -ENXIO;
1664 break;
1665 }
1666 case KVM_REG_PPC_TM_CR:
1667 vcpu->arch.cr_tm = set_reg_val(id, *val);
1668 break;
1669 case KVM_REG_PPC_TM_XER:
1670 vcpu->arch.xer_tm = set_reg_val(id, *val);
1671 break;
1672 case KVM_REG_PPC_TM_LR:
1673 vcpu->arch.lr_tm = set_reg_val(id, *val);
1674 break;
1675 case KVM_REG_PPC_TM_CTR:
1676 vcpu->arch.ctr_tm = set_reg_val(id, *val);
1677 break;
1678 case KVM_REG_PPC_TM_FPSCR:
1679 vcpu->arch.fp_tm.fpscr = set_reg_val(id, *val);
1680 break;
1681 case KVM_REG_PPC_TM_AMR:
1682 vcpu->arch.amr_tm = set_reg_val(id, *val);
1683 break;
1684 case KVM_REG_PPC_TM_PPR:
1685 vcpu->arch.ppr_tm = set_reg_val(id, *val);
1686 break;
1687 case KVM_REG_PPC_TM_VRSAVE:
1688 vcpu->arch.vrsave_tm = set_reg_val(id, *val);
1689 break;
1690 case KVM_REG_PPC_TM_VSCR:
1691 if (cpu_has_feature(CPU_FTR_ALTIVEC))
1692 vcpu->arch.vr.vscr.u[3] = set_reg_val(id, *val);
1693 else
1694 r = -ENXIO;
1695 break;
1696 case KVM_REG_PPC_TM_DSCR:
1697 vcpu->arch.dscr_tm = set_reg_val(id, *val);
1698 break;
1699 case KVM_REG_PPC_TM_TAR:
1700 vcpu->arch.tar_tm = set_reg_val(id, *val);
1701 break;
1702#endif
Paul Mackerras31f34382011-12-12 12:26:50 +00001703 default:
Paul Mackerrasa136a8b2012-09-25 20:31:56 +00001704 r = -EINVAL;
Paul Mackerras31f34382011-12-12 12:26:50 +00001705 break;
1706 }
1707
1708 return r;
1709}
1710
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301711static struct kvm_vcpu *kvmppc_core_vcpu_create_pr(struct kvm *kvm,
1712 unsigned int id)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001713{
1714 struct kvmppc_vcpu_book3s *vcpu_book3s;
1715 struct kvm_vcpu *vcpu;
1716 int err = -ENOMEM;
1717 unsigned long p;
1718
Paul Mackerras3ff95502013-09-20 14:52:49 +10001719 vcpu = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
1720 if (!vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001721 goto out;
1722
Paul Mackerras3ff95502013-09-20 14:52:49 +10001723 vcpu_book3s = vzalloc(sizeof(struct kvmppc_vcpu_book3s));
1724 if (!vcpu_book3s)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001725 goto free_vcpu;
Paul Mackerras3ff95502013-09-20 14:52:49 +10001726 vcpu->arch.book3s = vcpu_book3s;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001727
Alexander Grafab784752014-04-06 23:31:48 +02001728#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001729 vcpu->arch.shadow_vcpu =
1730 kzalloc(sizeof(*vcpu->arch.shadow_vcpu), GFP_KERNEL);
1731 if (!vcpu->arch.shadow_vcpu)
1732 goto free_vcpu3s;
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001733#endif
Paul Mackerras3ff95502013-09-20 14:52:49 +10001734
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001735 err = kvm_vcpu_init(vcpu, kvm, id);
1736 if (err)
1737 goto free_shadow_vcpu;
1738
Thadeu Lima de Souza Cascardo7c7b4062013-07-17 12:10:29 -03001739 err = -ENOMEM;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001740 p = __get_free_page(GFP_KERNEL|__GFP_ZERO);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001741 if (!p)
1742 goto uninit_vcpu;
Alexander Graf89b68c92014-07-13 16:37:12 +02001743 vcpu->arch.shared = (void *)p;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001744#ifdef CONFIG_PPC_BOOK3S_64
Alexander Graf5deb8e72014-04-24 13:46:24 +02001745 /* Always start the shared struct in native endian mode */
1746#ifdef __BIG_ENDIAN__
1747 vcpu->arch.shared_big_endian = true;
1748#else
1749 vcpu->arch.shared_big_endian = false;
1750#endif
1751
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001752 /*
1753 * Default to the same as the host if we're on sufficiently
1754 * recent machine that we have 1TB segments;
1755 * otherwise default to PPC970FX.
1756 */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001757 vcpu->arch.pvr = 0x3C0301;
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001758 if (mmu_has_feature(MMU_FTR_1T_SEGMENT))
1759 vcpu->arch.pvr = mfspr(SPRN_PVR);
Aneesh Kumar K.Ve5ee5422014-05-05 08:39:44 +05301760 vcpu->arch.intr_msr = MSR_SF;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001761#else
1762 /* default to book3s_32 (750) */
1763 vcpu->arch.pvr = 0x84202;
1764#endif
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301765 kvmppc_set_pvr_pr(vcpu, vcpu->arch.pvr);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001766 vcpu->arch.slb_nr = 64;
1767
Alexander Graf94810ba2014-04-24 13:04:01 +02001768 vcpu->arch.shadow_msr = MSR_USER64 & ~MSR_LE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001769
1770 err = kvmppc_mmu_init(vcpu);
1771 if (err < 0)
1772 goto uninit_vcpu;
1773
1774 return vcpu;
1775
1776uninit_vcpu:
1777 kvm_vcpu_uninit(vcpu);
1778free_shadow_vcpu:
Alexander Grafab784752014-04-06 23:31:48 +02001779#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001780 kfree(vcpu->arch.shadow_vcpu);
1781free_vcpu3s:
Paul Mackerrasa2d56022013-09-20 14:52:43 +10001782#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001783 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001784free_vcpu:
1785 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001786out:
1787 return ERR_PTR(err);
1788}
1789
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301790static void kvmppc_core_vcpu_free_pr(struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001791{
1792 struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
1793
1794 free_page((unsigned long)vcpu->arch.shared & PAGE_MASK);
1795 kvm_vcpu_uninit(vcpu);
Alexander Grafab784752014-04-06 23:31:48 +02001796#ifdef CONFIG_KVM_BOOK3S_32_HANDLER
Paul Mackerras3ff95502013-09-20 14:52:49 +10001797 kfree(vcpu->arch.shadow_vcpu);
1798#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001799 vfree(vcpu_book3s);
Paul Mackerras3ff95502013-09-20 14:52:49 +10001800 kmem_cache_free(kvm_vcpu_cache, vcpu);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001801}
1802
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301803static int kvmppc_vcpu_run_pr(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001804{
1805 int ret;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001806#ifdef CONFIG_ALTIVEC
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001807 unsigned long uninitialized_var(vrsave);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001808#endif
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001809
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001810 /* Check if we can run the vcpu at all */
1811 if (!vcpu->arch.sane) {
1812 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
Alexander Graf7d827142011-12-09 15:46:21 +01001813 ret = -EINVAL;
1814 goto out;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001815 }
1816
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001817 kvmppc_setup_debug(vcpu);
1818
Alexander Grafe371f712011-12-19 13:36:55 +01001819 /*
1820 * Interrupts could be timers for the guest which we have to inject
1821 * again, so let's postpone them until we're in the guest and if we
1822 * really did time things so badly, then we just exit again due to
1823 * a host external interrupt.
1824 */
Alexander Graf7ee78852012-08-13 12:44:41 +02001825 ret = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001826 if (ret <= 0)
Alexander Graf7d827142011-12-09 15:46:21 +01001827 goto out;
Scott Wood6c85f522014-01-09 19:18:40 -06001828 /* interrupts now hard-disabled */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001829
Anton Blanchardc2085052015-10-29 11:44:08 +11001830 /* Save FPU, Altivec and VSX state */
1831 giveup_all(current);
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001832
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001833 /* Preload FPU if it's enabled */
Alexander Graf5deb8e72014-04-24 13:46:24 +02001834 if (kvmppc_get_msr(vcpu) & MSR_FP)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001835 kvmppc_handle_ext(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL, MSR_FP);
1836
Scott Wood5f1c2482013-07-10 17:47:39 -05001837 kvmppc_fix_ee_before_entry();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +00001838
1839 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
1840
Laurent Vivier11dd6ac2016-04-08 18:05:00 +02001841 kvmppc_clear_debug(vcpu);
1842
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001843 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +02001844 We also get here with interrupts enabled. */
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001845
Paul Mackerras28c483b2012-11-04 18:16:46 +00001846 /* Make sure we save the guest FPU/Altivec/VSX state */
1847 kvmppc_giveup_ext(vcpu, MSR_FP | MSR_VEC | MSR_VSX);
1848
Alexander Grafe14e7a12014-04-22 12:26:58 +02001849 /* Make sure we save the guest TAR/EBB/DSCR state */
1850 kvmppc_giveup_fac(vcpu, FSCR_TAR_LG);
1851
Alexander Graf7d827142011-12-09 15:46:21 +01001852out:
Alexander Graf0652eaa2012-08-12 11:34:21 +02001853 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00001854 return ret;
1855}
1856
Paul Mackerras82ed3612011-12-15 02:03:22 +00001857/*
1858 * Get (and clear) the dirty memory log for a memory slot.
1859 */
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301860static int kvm_vm_ioctl_get_dirty_log_pr(struct kvm *kvm,
1861 struct kvm_dirty_log *log)
Paul Mackerras82ed3612011-12-15 02:03:22 +00001862{
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001863 struct kvm_memslots *slots;
Paul Mackerras82ed3612011-12-15 02:03:22 +00001864 struct kvm_memory_slot *memslot;
1865 struct kvm_vcpu *vcpu;
1866 ulong ga, ga_end;
1867 int is_dirty = 0;
1868 int r;
1869 unsigned long n;
1870
1871 mutex_lock(&kvm->slots_lock);
1872
1873 r = kvm_get_dirty_log(kvm, log, &is_dirty);
1874 if (r)
1875 goto out;
1876
1877 /* If nothing is dirty, don't bother messing with page tables. */
1878 if (is_dirty) {
Paolo Bonzini9f6b8022015-05-17 16:20:07 +02001879 slots = kvm_memslots(kvm);
1880 memslot = id_to_memslot(slots, log->slot);
Paul Mackerras82ed3612011-12-15 02:03:22 +00001881
1882 ga = memslot->base_gfn << PAGE_SHIFT;
1883 ga_end = ga + (memslot->npages << PAGE_SHIFT);
1884
1885 kvm_for_each_vcpu(n, vcpu, kvm)
1886 kvmppc_mmu_pte_pflush(vcpu, ga, ga_end);
1887
1888 n = kvm_dirty_bitmap_bytes(memslot);
1889 memset(memslot->dirty_bitmap, 0, n);
1890 }
1891
1892 r = 0;
1893out:
1894 mutex_unlock(&kvm->slots_lock);
1895 return r;
1896}
1897
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301898static void kvmppc_core_flush_memslot_pr(struct kvm *kvm,
1899 struct kvm_memory_slot *memslot)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001900{
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301901 return;
1902}
1903
1904static int kvmppc_core_prepare_memory_region_pr(struct kvm *kvm,
1905 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001906 const struct kvm_userspace_memory_region *mem)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301907{
1908 return 0;
1909}
1910
1911static void kvmppc_core_commit_memory_region_pr(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001912 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001913 const struct kvm_memory_slot *old,
1914 const struct kvm_memory_slot *new)
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301915{
1916 return;
1917}
1918
1919static void kvmppc_core_free_memslot_pr(struct kvm_memory_slot *free,
1920 struct kvm_memory_slot *dont)
1921{
1922 return;
1923}
1924
1925static int kvmppc_core_create_memslot_pr(struct kvm_memory_slot *slot,
1926 unsigned long npages)
1927{
1928 return 0;
1929}
1930
1931
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001932#ifdef CONFIG_PPC64
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301933static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1934 struct kvm_ppc_smmu_info *info)
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001935{
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001936 long int i;
1937 struct kvm_vcpu *vcpu;
1938
1939 info->flags = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001940
1941 /* SLB is always 64 entries */
1942 info->slb_size = 64;
1943
1944 /* Standard 4k base page size segment */
1945 info->sps[0].page_shift = 12;
1946 info->sps[0].slb_enc = 0;
1947 info->sps[0].enc[0].page_shift = 12;
1948 info->sps[0].enc[0].pte_enc = 0;
1949
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001950 /*
1951 * 64k large page size.
1952 * We only want to put this in if the CPUs we're emulating
1953 * support it, but unfortunately we don't have a vcpu easily
1954 * to hand here to test. Just pick the first vcpu, and if
1955 * that doesn't exist yet, report the minimum capability,
1956 * i.e., no 64k pages.
1957 * 1T segment support goes along with 64k pages.
1958 */
1959 i = 1;
1960 vcpu = kvm_get_vcpu(kvm, 0);
1961 if (vcpu && (vcpu->arch.hflags & BOOK3S_HFLAG_MULTI_PGSIZE)) {
1962 info->flags = KVM_PPC_1T_SEGMENTS;
1963 info->sps[i].page_shift = 16;
1964 info->sps[i].slb_enc = SLB_VSID_L | SLB_VSID_LP_01;
1965 info->sps[i].enc[0].page_shift = 16;
1966 info->sps[i].enc[0].pte_enc = 1;
1967 ++i;
1968 }
1969
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001970 /* Standard 16M large page size segment */
Paul Mackerrasa4a0f252013-09-20 14:52:44 +10001971 info->sps[i].page_shift = 24;
1972 info->sps[i].slb_enc = SLB_VSID_L;
1973 info->sps[i].enc[0].page_shift = 24;
1974 info->sps[i].enc[0].pte_enc = 0;
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001975
1976 return 0;
1977}
Paul Mackerras9617a0b2018-05-30 15:47:17 +10001978
1979static int kvm_configure_mmu_pr(struct kvm *kvm, struct kvm_ppc_mmuv3_cfg *cfg)
1980{
1981 if (!cpu_has_feature(CPU_FTR_ARCH_300))
1982 return -ENODEV;
1983 /* Require flags and process table base and size to all be zero. */
1984 if (cfg->flags || cfg->process_table)
1985 return -EINVAL;
1986 return 0;
1987}
1988
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301989#else
1990static int kvm_vm_ioctl_get_smmu_info_pr(struct kvm *kvm,
1991 struct kvm_ppc_smmu_info *info)
1992{
1993 /* We should not get called */
1994 BUG();
1995}
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +00001996#endif /* CONFIG_PPC64 */
1997
Ian Munsiea413f472012-12-03 18:36:13 +00001998static unsigned int kvm_global_user_count = 0;
1999static DEFINE_SPINLOCK(kvm_global_user_count_lock);
2000
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302001static int kvmppc_core_init_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002002{
Paul Mackerras9308ab82013-09-20 14:52:48 +10002003 mutex_init(&kvm->arch.hpt_mutex);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00002004
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002005#ifdef CONFIG_PPC_BOOK3S_64
2006 /* Start out with the default set of hcalls enabled */
2007 kvmppc_pr_init_default_hcalls(kvm);
2008#endif
2009
Ian Munsiea413f472012-12-03 18:36:13 +00002010 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2011 spin_lock(&kvm_global_user_count_lock);
2012 if (++kvm_global_user_count == 1)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10002013 pseries_disable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00002014 spin_unlock(&kvm_global_user_count_lock);
2015 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002016 return 0;
2017}
2018
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302019static void kvmppc_core_destroy_vm_pr(struct kvm *kvm)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002020{
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +00002021#ifdef CONFIG_PPC64
2022 WARN_ON(!list_empty(&kvm->arch.spapr_tce_tables));
2023#endif
Ian Munsiea413f472012-12-03 18:36:13 +00002024
2025 if (firmware_has_feature(FW_FEATURE_SET_MODE)) {
2026 spin_lock(&kvm_global_user_count_lock);
2027 BUG_ON(kvm_global_user_count == 0);
2028 if (--kvm_global_user_count == 0)
Benjamin Herrenschmidtd3cbff12016-07-05 15:03:49 +10002029 pseries_enable_reloc_on_exc();
Ian Munsiea413f472012-12-03 18:36:13 +00002030 spin_unlock(&kvm_global_user_count_lock);
2031 }
Paul Mackerrasf9e05542011-06-29 00:19:22 +00002032}
2033
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302034static int kvmppc_core_check_processor_compat_pr(void)
2035{
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002036 /*
Paul Mackerrasec531d02018-05-18 21:49:28 +10002037 * PR KVM can work on POWER9 inside a guest partition
2038 * running in HPT mode. It can't work if we are using
2039 * radix translation (because radix provides no way for
2040 * a process to have unique translations in quadrant 3)
2041 * or in a bare-metal HPT-mode host (because POWER9
2042 * uses a modified HPTE format which the PR KVM code
2043 * has not been adapted to use).
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002044 */
Paul Mackerrasec531d02018-05-18 21:49:28 +10002045 if (cpu_has_feature(CPU_FTR_ARCH_300) &&
2046 (radix_enabled() || cpu_has_feature(CPU_FTR_HVMODE)))
Aneesh Kumar K.V50de5962016-04-29 23:25:43 +10002047 return -EIO;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302048 return 0;
2049}
2050
2051static long kvm_arch_vm_ioctl_pr(struct file *filp,
2052 unsigned int ioctl, unsigned long arg)
2053{
2054 return -ENOTTY;
2055}
2056
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302057static struct kvmppc_ops kvm_ops_pr = {
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302058 .get_sregs = kvm_arch_vcpu_ioctl_get_sregs_pr,
2059 .set_sregs = kvm_arch_vcpu_ioctl_set_sregs_pr,
2060 .get_one_reg = kvmppc_get_one_reg_pr,
2061 .set_one_reg = kvmppc_set_one_reg_pr,
2062 .vcpu_load = kvmppc_core_vcpu_load_pr,
2063 .vcpu_put = kvmppc_core_vcpu_put_pr,
2064 .set_msr = kvmppc_set_msr_pr,
2065 .vcpu_run = kvmppc_vcpu_run_pr,
2066 .vcpu_create = kvmppc_core_vcpu_create_pr,
2067 .vcpu_free = kvmppc_core_vcpu_free_pr,
2068 .check_requests = kvmppc_core_check_requests_pr,
2069 .get_dirty_log = kvm_vm_ioctl_get_dirty_log_pr,
2070 .flush_memslot = kvmppc_core_flush_memslot_pr,
2071 .prepare_memory_region = kvmppc_core_prepare_memory_region_pr,
2072 .commit_memory_region = kvmppc_core_commit_memory_region_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302073 .unmap_hva_range = kvm_unmap_hva_range_pr,
2074 .age_hva = kvm_age_hva_pr,
2075 .test_age_hva = kvm_test_age_hva_pr,
2076 .set_spte_hva = kvm_set_spte_hva_pr,
2077 .mmu_destroy = kvmppc_mmu_destroy_pr,
2078 .free_memslot = kvmppc_core_free_memslot_pr,
2079 .create_memslot = kvmppc_core_create_memslot_pr,
2080 .init_vm = kvmppc_core_init_vm_pr,
2081 .destroy_vm = kvmppc_core_destroy_vm_pr,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302082 .get_smmu_info = kvm_vm_ioctl_get_smmu_info_pr,
2083 .emulate_op = kvmppc_core_emulate_op_pr,
2084 .emulate_mtspr = kvmppc_core_emulate_mtspr_pr,
2085 .emulate_mfspr = kvmppc_core_emulate_mfspr_pr,
2086 .fast_vcpu_kick = kvm_vcpu_kick,
2087 .arch_vm_ioctl = kvm_arch_vm_ioctl_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002088#ifdef CONFIG_PPC_BOOK3S_64
2089 .hcall_implemented = kvmppc_hcall_impl_pr,
Paul Mackerras9617a0b2018-05-30 15:47:17 +10002090 .configure_mmu = kvm_configure_mmu_pr,
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002091#endif
Simon Guo2e6baa42018-05-21 13:24:22 +08002092 .giveup_ext = kvmppc_giveup_ext,
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302093};
2094
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302095
2096int kvmppc_book3s_init_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002097{
2098 int r;
2099
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302100 r = kvmppc_core_check_processor_compat_pr();
2101 if (r < 0)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002102 return r;
2103
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302104 kvm_ops_pr.owner = THIS_MODULE;
2105 kvmppc_pr_ops = &kvm_ops_pr;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002106
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302107 r = kvmppc_mmu_hpte_sysinit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002108 return r;
2109}
2110
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302111void kvmppc_book3s_exit_pr(void)
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002112{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302113 kvmppc_pr_ops = NULL;
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002114 kvmppc_mmu_hpte_sysexit();
Paul Mackerrasf05ed4d2011-06-29 00:17:58 +00002115}
2116
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302117/*
2118 * We only support separate modules for book3s 64
2119 */
2120#ifdef CONFIG_PPC_BOOK3S_64
2121
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302122module_init(kvmppc_book3s_init_pr);
2123module_exit(kvmppc_book3s_exit_pr);
Aneesh Kumar K.V2ba9f0d2013-10-07 22:17:59 +05302124
2125MODULE_LICENSE("GPL");
Alexander Graf398a76c2013-12-09 13:53:42 +01002126MODULE_ALIAS_MISCDEV(KVM_MINOR);
2127MODULE_ALIAS("devname:kvm");
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302128#endif