blob: 876d4f294fdd89fc160439073a3c04c9c8e81174 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
Linus Torvalds7c0f6ba2016-12-24 11:46:01 -080033#include <linux/uaccess.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050034#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053043
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060047unsigned long kvmppc_booke_handlers;
48
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "mmio", VCPU_STAT(mmio_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050055 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
57 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
58 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
59 { "sysc", VCPU_STAT(syscall_exits) },
60 { "isi", VCPU_STAT(isi_exits) },
61 { "dsi", VCPU_STAT(dsi_exits) },
62 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010065 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Paolo Bonzini62bea5b2015-09-15 18:27:57 +020066 { "halt_attempted_poll", VCPU_STAT(halt_attempted_poll) },
Christian Borntraeger3491caf2016-05-13 12:16:35 +020067 { "halt_poll_invalid", VCPU_STAT(halt_poll_invalid) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050068 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000069 { "doorbell", VCPU_STAT(dbell_exits) },
70 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020071 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050072 { NULL }
73};
74
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050075/* TODO: use vcpu_printf() */
76void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
77{
78 int i;
79
Alexander Graf666e7252010-07-29 14:47:43 +020080 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060081 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020082 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
83 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050084
85 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
86
87 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060088 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010089 kvmppc_get_gpr(vcpu, i),
90 kvmppc_get_gpr(vcpu, i+1),
91 kvmppc_get_gpr(vcpu, i+2),
92 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050093 }
94}
95
Scott Wood4cd35f62011-06-14 18:34:31 -050096#ifdef CONFIG_SPE
97void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
98{
99 preempt_disable();
100 enable_kernel_spe();
101 kvmppc_save_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100102 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -0500103 vcpu->arch.shadow_msr &= ~MSR_SPE;
104 preempt_enable();
105}
106
107static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
108{
109 preempt_disable();
110 enable_kernel_spe();
111 kvmppc_load_guest_spe(vcpu);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100112 disable_kernel_spe();
Scott Wood4cd35f62011-06-14 18:34:31 -0500113 vcpu->arch.shadow_msr |= MSR_SPE;
114 preempt_enable();
115}
116
117static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
118{
119 if (vcpu->arch.shared->msr & MSR_SPE) {
120 if (!(vcpu->arch.shadow_msr & MSR_SPE))
121 kvmppc_vcpu_enable_spe(vcpu);
122 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
123 kvmppc_vcpu_disable_spe(vcpu);
124 }
125}
126#else
127static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
128{
129}
130#endif
131
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300132/*
133 * Load up guest vcpu FP state if it's needed.
134 * It also set the MSR_FP in thread so that host know
135 * we're holding FPU, and then host can help to save
136 * guest vcpu FP state if other threads require to use FPU.
137 * This simulates an FP unavailable fault.
138 *
139 * It requires to be called with preemption disabled.
140 */
141static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
142{
143#ifdef CONFIG_PPC_FPU
144 if (!(current->thread.regs->msr & MSR_FP)) {
145 enable_kernel_fp();
146 load_fp_state(&vcpu->arch.fp);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100147 disable_kernel_fp();
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300148 current->thread.fp_save_area = &vcpu->arch.fp;
149 current->thread.regs->msr |= MSR_FP;
150 }
151#endif
152}
153
154/*
155 * Save guest vcpu FP state into thread.
156 * It requires to be called with preemption disabled.
157 */
158static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
159{
160#ifdef CONFIG_PPC_FPU
161 if (current->thread.regs->msr & MSR_FP)
162 giveup_fpu(current);
163 current->thread.fp_save_area = NULL;
164#endif
165}
166
Alexander Graf7a08c272012-08-16 13:10:16 +0200167static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
168{
169#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
170 /* We always treat the FP bit as enabled from the host
171 perspective, so only need to adjust the shadow MSR */
172 vcpu->arch.shadow_msr &= ~MSR_FP;
173 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
174#endif
175}
176
Mihai Caraman95d80a22014-08-20 16:36:23 +0300177/*
178 * Simulate AltiVec unavailable fault to load guest state
179 * from thread to AltiVec unit.
180 * It requires to be called with preemption disabled.
181 */
182static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
183{
184#ifdef CONFIG_ALTIVEC
185 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
186 if (!(current->thread.regs->msr & MSR_VEC)) {
187 enable_kernel_altivec();
188 load_vr_state(&vcpu->arch.vr);
Anton Blancharddc4fbba2015-10-29 11:44:05 +1100189 disable_kernel_altivec();
Mihai Caraman95d80a22014-08-20 16:36:23 +0300190 current->thread.vr_save_area = &vcpu->arch.vr;
191 current->thread.regs->msr |= MSR_VEC;
192 }
193 }
194#endif
195}
196
197/*
198 * Save guest vcpu AltiVec state into thread.
199 * It requires to be called with preemption disabled.
200 */
201static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
202{
203#ifdef CONFIG_ALTIVEC
204 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
205 if (current->thread.regs->msr & MSR_VEC)
206 giveup_altivec(current);
207 current->thread.vr_save_area = NULL;
208 }
209#endif
210}
211
Bharat Bhushance11e482013-07-04 12:27:47 +0530212static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
213{
214 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
215#ifndef CONFIG_KVM_BOOKE_HV
216 vcpu->arch.shadow_msr &= ~MSR_DE;
217 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
218#endif
219
220 /* Force enable debug interrupts when user space wants to debug */
221 if (vcpu->guest_debug) {
222#ifdef CONFIG_KVM_BOOKE_HV
223 /*
224 * Since there is no shadow MSR, sync MSR_DE into the guest
225 * visible MSR.
226 */
227 vcpu->arch.shared->msr |= MSR_DE;
228#else
229 vcpu->arch.shadow_msr |= MSR_DE;
230 vcpu->arch.shared->msr &= ~MSR_DE;
231#endif
232 }
233}
234
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500235/*
236 * Helper function for "full" MSR writes. No need to call this if only
237 * EE/CE/ME/DE/RI are changing.
238 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500239void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
240{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500241 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500242
Scott Woodd30f6e42011-12-20 15:34:43 +0000243#ifdef CONFIG_KVM_BOOKE_HV
244 new_msr |= MSR_GS;
245#endif
246
Scott Wood4cd35f62011-06-14 18:34:31 -0500247 vcpu->arch.shared->msr = new_msr;
248
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500249 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500250 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200251 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530252 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500253}
254
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600255static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
256 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600257{
Alexander Graf63460462012-08-08 00:44:52 +0200258 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600259 set_bit(priority, &vcpu->arch.pending_exceptions);
260}
261
Alexander Graf8de12012014-06-18 21:56:55 +0200262void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
263 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600264{
Liu Yudaf5e272010-02-02 19:44:35 +0800265 vcpu->arch.queued_dear = dear_flags;
266 vcpu->arch.queued_esr = esr_flags;
267 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
268}
269
Alexander Graf8de12012014-06-18 21:56:55 +0200270void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
271 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800272{
273 vcpu->arch.queued_dear = dear_flags;
274 vcpu->arch.queued_esr = esr_flags;
275 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
276}
277
Alexander Graf8de12012014-06-18 21:56:55 +0200278void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
279{
280 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
281}
282
283void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800284{
285 vcpu->arch.queued_esr = esr_flags;
286 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
287}
288
Alexander Graf011da892013-01-31 14:17:38 +0100289static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
290 ulong esr_flags)
291{
292 vcpu->arch.queued_dear = dear_flags;
293 vcpu->arch.queued_esr = esr_flags;
294 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
295}
296
Liu Yudaf5e272010-02-02 19:44:35 +0800297void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
298{
299 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600300 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600301}
302
Paul Mackerras307d9272017-03-22 21:02:08 +1100303void kvmppc_core_queue_fpunavail(struct kvm_vcpu *vcpu)
304{
305 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
306}
307
Laurentiu Tudorb2d7ecb2018-04-26 15:33:19 +0300308#ifdef CONFIG_ALTIVEC
309void kvmppc_core_queue_vec_unavail(struct kvm_vcpu *vcpu)
310{
311 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
312}
313#endif
314
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600315void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
316{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600317 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600318}
319
320int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
321{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600322 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600323}
324
Alexander Graf7706664d2009-12-21 20:21:24 +0100325void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
326{
327 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
328}
329
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600330void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
331 struct kvm_interrupt *irq)
332{
Alexander Grafc5335f12010-08-30 14:03:24 +0200333 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
334
335 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
336 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
337
338 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600339}
340
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000341void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200342{
343 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200344 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200345}
346
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000347static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
348{
349 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
350}
351
352static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
353{
354 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
355}
356
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530357void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
358{
359 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
360}
361
362void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
363{
364 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
365}
366
Scott Woodd30f6e42011-12-20 15:34:43 +0000367static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
368{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530369 kvmppc_set_srr0(vcpu, srr0);
370 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000371}
372
373static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
374{
375 vcpu->arch.csrr0 = srr0;
376 vcpu->arch.csrr1 = srr1;
377}
378
379static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
380{
381 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
382 vcpu->arch.dsrr0 = srr0;
383 vcpu->arch.dsrr1 = srr1;
384 } else {
385 set_guest_csrr(vcpu, srr0, srr1);
386 }
387}
388
389static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
390{
391 vcpu->arch.mcsrr0 = srr0;
392 vcpu->arch.mcsrr1 = srr1;
393}
394
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600395/* Deliver the interrupt of the corresponding priority, if possible. */
396static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
397 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500398{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600399 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000400 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100401 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200402 ulong crit_raw = vcpu->arch.shared->critical;
403 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
404 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200405 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000406 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000407 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200408
409 /* Truncate crit indicators in 32 bit mode */
410 if (!(vcpu->arch.shared->msr & MSR_SF)) {
411 crit_raw &= 0xffffffff;
412 crit_r1 &= 0xffffffff;
413 }
414
415 /* Critical section when crit == r1 */
416 crit = (crit_raw == crit_r1);
417 /* ... and we're in supervisor mode */
418 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500419
Alexander Grafc5335f12010-08-30 14:03:24 +0200420 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
421 priority = BOOKE_IRQPRIO_EXTERNAL;
422 keep_irq = true;
423 }
424
Scott Wood5df554ad2013-04-12 14:08:46 +0000425 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100426 update_epr = true;
427
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600428 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600429 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800430 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100431 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800432 update_dear = true;
433 /* fall through */
434 case BOOKE_IRQPRIO_INST_STORAGE:
435 case BOOKE_IRQPRIO_PROGRAM:
436 update_esr = true;
437 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600438 case BOOKE_IRQPRIO_ITLB_MISS:
439 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600440 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300441#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600442 case BOOKE_IRQPRIO_SPE_UNAVAIL:
443 case BOOKE_IRQPRIO_SPE_FP_DATA:
444 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300445#endif
446#ifdef CONFIG_ALTIVEC
447 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
448 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
449#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600450 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600451 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000452 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000453 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500454 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000455 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600456 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000457 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200458 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000459 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000460 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000461 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500462 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600463 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200464 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000465 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000466 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500467 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600468 case BOOKE_IRQPRIO_DECREMENTER:
469 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000470 keep_irq = true;
471 /* fall through */
472 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000473 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200474 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200475 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000476 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000477 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500478 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600479 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200480 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000481 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000482 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530483 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
484 int_class = INT_CLASS_DBG;
485 else
486 int_class = INT_CLASS_CRIT;
487
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500488 break;
489 }
490
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600491 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000492 switch (int_class) {
493 case INT_CLASS_NONCRIT:
494 set_guest_srr(vcpu, vcpu->arch.pc,
495 vcpu->arch.shared->msr);
496 break;
497 case INT_CLASS_CRIT:
498 set_guest_csrr(vcpu, vcpu->arch.pc,
499 vcpu->arch.shared->msr);
500 break;
501 case INT_CLASS_DBG:
502 set_guest_dsrr(vcpu, vcpu->arch.pc,
503 vcpu->arch.shared->msr);
504 break;
505 case INT_CLASS_MC:
506 set_guest_mcsrr(vcpu, vcpu->arch.pc,
507 vcpu->arch.shared->msr);
508 break;
509 }
510
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600511 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800512 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530513 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800514 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530515 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000516 if (update_epr == true) {
517 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
518 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000519 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
520 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
521 kvmppc_mpic_set_epr(vcpu);
522 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000523 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000524
525 new_msr &= msr_mask;
526#if defined(CONFIG_64BIT)
527 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
528 new_msr |= MSR_CM;
529#endif
530 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600531
Alexander Grafc5335f12010-08-30 14:03:24 +0200532 if (!keep_irq)
533 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600534 }
535
Scott Woodd30f6e42011-12-20 15:34:43 +0000536#ifdef CONFIG_KVM_BOOKE_HV
537 /*
538 * If an interrupt is pending but masked, raise a guest doorbell
539 * so that we are notified when the guest enables the relevant
540 * MSR bit.
541 */
542 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
543 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
544 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
545 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
546 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
547 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
548#endif
549
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600550 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500551}
552
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000553/*
554 * Return the number of jiffies until the next timeout. If the timeout is
555 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
556 * because the larger value can break the timer APIs.
557 */
558static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
559{
560 u64 tb, wdt_tb, wdt_ticks = 0;
561 u64 nr_jiffies = 0;
562 u32 period = TCR_GET_WP(vcpu->arch.tcr);
563
564 wdt_tb = 1ULL << (63 - period);
565 tb = get_tb();
566 /*
567 * The watchdog timeout will hapeen when TB bit corresponding
568 * to watchdog will toggle from 0 to 1.
569 */
570 if (tb & wdt_tb)
571 wdt_ticks = wdt_tb;
572
573 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
574
575 /* Convert timebase ticks to jiffies */
576 nr_jiffies = wdt_ticks;
577
578 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
579 nr_jiffies++;
580
581 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
582}
583
584static void arm_next_watchdog(struct kvm_vcpu *vcpu)
585{
586 unsigned long nr_jiffies;
587 unsigned long flags;
588
589 /*
590 * If TSR_ENW and TSR_WIS are not set then no need to exit to
591 * userspace, so clear the KVM_REQ_WATCHDOG request.
592 */
593 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
Radim Krčmář72875d82017-04-26 22:32:19 +0200594 kvm_clear_request(KVM_REQ_WATCHDOG, vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000595
596 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
597 nr_jiffies = watchdog_next_timeout(vcpu);
598 /*
599 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
600 * then do not run the watchdog timer as this can break timer APIs.
601 */
602 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
603 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
604 else
605 del_timer(&vcpu->arch.wdt_timer);
606 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
607}
608
Kees Cook86cb30e2017-10-17 20:21:24 -0700609void kvmppc_watchdog_func(struct timer_list *t)
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000610{
Kees Cook86cb30e2017-10-17 20:21:24 -0700611 struct kvm_vcpu *vcpu = from_timer(vcpu, t, arch.wdt_timer);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000612 u32 tsr, new_tsr;
613 int final;
614
615 do {
616 new_tsr = tsr = vcpu->arch.tsr;
617 final = 0;
618
619 /* Time out event */
620 if (tsr & TSR_ENW) {
621 if (tsr & TSR_WIS)
622 final = 1;
623 else
624 new_tsr = tsr | TSR_WIS;
625 } else {
626 new_tsr = tsr | TSR_ENW;
627 }
628 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
629
630 if (new_tsr & TSR_WIS) {
631 smp_wmb();
632 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
633 kvm_vcpu_kick(vcpu);
634 }
635
636 /*
637 * If this is final watchdog expiry and some action is required
638 * then exit to userspace.
639 */
640 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
641 vcpu->arch.watchdog_enabled) {
642 smp_wmb();
643 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
644 kvm_vcpu_kick(vcpu);
645 }
646
647 /*
648 * Stop running the watchdog timer after final expiration to
649 * prevent the host from being flooded with timers if the
650 * guest sets a short period.
651 * Timers will resume when TSR/TCR is updated next time.
652 */
653 if (!final)
654 arm_next_watchdog(vcpu);
655}
656
Scott Wooddfd4d472011-11-17 12:39:59 +0000657static void update_timer_ints(struct kvm_vcpu *vcpu)
658{
659 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
660 kvmppc_core_queue_dec(vcpu);
661 else
662 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000663
664 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
665 kvmppc_core_queue_watchdog(vcpu);
666 else
667 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000668}
669
Scott Woodc59a6a32011-11-08 18:23:25 -0600670static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500671{
672 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500673 unsigned int priority;
674
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600675 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000676 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600677 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500678 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500679
680 priority = find_next_bit(pending,
681 BITS_PER_BYTE * sizeof(*pending),
682 priority + 1);
683 }
Alexander Graf90bba352010-07-29 14:47:51 +0200684
685 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600686 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500687}
688
Scott Woodc59a6a32011-11-08 18:23:25 -0600689/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000690int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600691{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000692 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600693 WARN_ON_ONCE(!irqs_disabled());
694
695 kvmppc_core_check_exceptions(vcpu);
696
Radim Krčmář2fa6e1e2017-06-04 14:43:52 +0200697 if (kvm_request_pending(vcpu)) {
Alexander Grafb8c649a2012-12-20 04:52:39 +0000698 /* Exception delivery raised request; start over */
699 return 1;
700 }
701
Scott Woodc59a6a32011-11-08 18:23:25 -0600702 if (vcpu->arch.shared->msr & MSR_WE) {
703 local_irq_enable();
704 kvm_vcpu_block(vcpu);
Radim Krčmář72875d82017-04-26 22:32:19 +0200705 kvm_clear_request(KVM_REQ_UNHALT, vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -0600706 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600707
708 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000709 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600710 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000711
712 return r;
713}
714
Alexander Graf7c973a22012-08-13 12:50:35 +0200715int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200716{
Alexander Graf7c973a22012-08-13 12:50:35 +0200717 int r = 1; /* Indicate we want to get back into the guest */
718
Alexander Graf2d8185d2012-08-10 12:31:12 +0200719 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
720 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200721#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200722 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
723 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200724#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200725
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000726 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
727 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
728 r = 0;
729 }
730
Alexander Graf1c810632013-01-04 18:12:48 +0100731 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
732 vcpu->run->epr.epr = 0;
733 vcpu->arch.epr_needed = true;
734 vcpu->run->exit_reason = KVM_EXIT_EPR;
735 r = 0;
736 }
737
Alexander Graf7c973a22012-08-13 12:50:35 +0200738 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200739}
740
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000741int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
742{
Alexander Graf7ee78852012-08-13 12:44:41 +0200743 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600744 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000745
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200746 if (!vcpu->arch.sane) {
747 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
748 return -EINVAL;
749 }
750
Alexander Graf7ee78852012-08-13 12:44:41 +0200751 s = kvmppc_prepare_to_enter(vcpu);
752 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200753 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600754 goto out;
755 }
Scott Wood6c85f522014-01-09 19:18:40 -0600756 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600757
Scott Wood8fae8452011-12-20 15:34:45 +0000758#ifdef CONFIG_PPC_FPU
759 /* Save userspace FPU state in stack */
760 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000761
762 /*
763 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300764 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000765 */
Scott Wood8fae8452011-12-20 15:34:45 +0000766 kvmppc_load_guest_fp(vcpu);
767#endif
768
Mihai Caraman95d80a22014-08-20 16:36:23 +0300769#ifdef CONFIG_ALTIVEC
770 /* Save userspace AltiVec state in stack */
771 if (cpu_has_feature(CPU_FTR_ALTIVEC))
772 enable_kernel_altivec();
773 /*
774 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
775 * as always using the AltiVec.
776 */
777 kvmppc_load_guest_altivec(vcpu);
778#endif
779
Bharat Bhushance11e482013-07-04 12:27:47 +0530780 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530781 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600782 switch_booke_debug_regs(&debug);
783 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530784 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530785
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530786 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500787 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500788
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000789 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000790
Paolo Bonzini6edaa532016-06-15 15:18:26 +0200791 /* No need for guest_exit. It's done in handle_exit.
Alexander Graf24afa372012-08-12 12:42:30 +0200792 We also get here with interrupts enabled. */
793
Bharat Bhushance11e482013-07-04 12:27:47 +0530794 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600795 switch_booke_debug_regs(&debug);
796 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530797
Scott Wood8fae8452011-12-20 15:34:45 +0000798#ifdef CONFIG_PPC_FPU
799 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000800#endif
801
Mihai Caraman95d80a22014-08-20 16:36:23 +0300802#ifdef CONFIG_ALTIVEC
803 kvmppc_save_guest_altivec(vcpu);
804#endif
805
Scott Wood1d1ef222011-11-08 16:11:59 -0600806out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200807 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000808 return ret;
809}
810
Scott Woodd30f6e42011-12-20 15:34:43 +0000811static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
812{
813 enum emulation_result er;
814
815 er = kvmppc_emulate_instruction(run, vcpu);
816 switch (er) {
817 case EMULATE_DONE:
818 /* don't overwrite subtypes, just account kvm_stats */
819 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
820 /* Future optimization: only reload non-volatiles if
821 * they were actually modified by emulation. */
822 return RESUME_GUEST_NV;
823
Mihai Caraman51f04722014-07-23 19:06:21 +0300824 case EMULATE_AGAIN:
825 return RESUME_GUEST;
826
Scott Woodd30f6e42011-12-20 15:34:43 +0000827 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000828 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
829 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
830 /* For debugging, encode the failing instruction and
831 * report it to userspace. */
832 run->hw.hardware_exit_reason = ~0ULL << 32;
833 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000834 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000835 return RESUME_HOST;
836
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000837 case EMULATE_EXIT_USER:
838 return RESUME_HOST;
839
Scott Woodd30f6e42011-12-20 15:34:43 +0000840 default:
841 BUG();
842 }
843}
844
Bharat Bhushance11e482013-07-04 12:27:47 +0530845static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
846{
Bharat Bhushan348ba712014-08-06 12:08:55 +0530847 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530848 u32 dbsr = vcpu->arch.dbsr;
849
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530850 if (vcpu->guest_debug == 0) {
851 /*
852 * Debug resources belong to Guest.
853 * Imprecise debug event is not injected
854 */
855 if (dbsr & DBSR_IDE) {
856 dbsr &= ~DBSR_IDE;
857 if (!dbsr)
858 return RESUME_GUEST;
859 }
860
861 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
862 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
863 kvmppc_core_queue_debug(vcpu);
864
865 /* Inject a program interrupt if trap debug is not allowed */
866 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
867 kvmppc_core_queue_program(vcpu, ESR_PTR);
868
869 return RESUME_GUEST;
870 }
871
872 /*
873 * Debug resource owned by userspace.
874 * Clear guest dbsr (vcpu->arch.dbsr)
875 */
Bharat Bhushan21909912014-08-06 12:08:54 +0530876 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530877 run->debug.arch.status = 0;
878 run->debug.arch.address = vcpu->arch.pc;
879
880 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
881 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
882 } else {
883 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
884 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
885 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
886 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
887 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
888 run->debug.arch.address = dbg_reg->dac1;
889 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
890 run->debug.arch.address = dbg_reg->dac2;
891 }
892
893 return RESUME_HOST;
894}
895
Alexander Graf4e642cc2012-02-20 23:57:26 +0100896static void kvmppc_fill_pt_regs(struct pt_regs *regs)
897{
898 ulong r1, ip, msr, lr;
899
900 asm("mr %0, 1" : "=r"(r1));
901 asm("mflr %0" : "=r"(lr));
902 asm("mfmsr %0" : "=r"(msr));
903 asm("bl 1f; 1: mflr %0" : "=r"(ip));
904
905 memset(regs, 0, sizeof(*regs));
906 regs->gpr[1] = r1;
907 regs->nip = ip;
908 regs->msr = msr;
909 regs->link = lr;
910}
911
Bharat Bhushan6328e592012-06-20 05:56:53 +0000912/*
913 * For interrupts needed to be handled by host interrupt handlers,
914 * corresponding host handler are called from here in similar way
915 * (but not exact) as they are called from low level handler
916 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
917 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100918static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
919 unsigned int exit_nr)
920{
921 struct pt_regs regs;
922
923 switch (exit_nr) {
924 case BOOKE_INTERRUPT_EXTERNAL:
925 kvmppc_fill_pt_regs(&regs);
926 do_IRQ(&regs);
927 break;
928 case BOOKE_INTERRUPT_DECREMENTER:
929 kvmppc_fill_pt_regs(&regs);
930 timer_interrupt(&regs);
931 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800932#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100933 case BOOKE_INTERRUPT_DOORBELL:
934 kvmppc_fill_pt_regs(&regs);
935 doorbell_exception(&regs);
936 break;
937#endif
938 case BOOKE_INTERRUPT_MACHINE_CHECK:
939 /* FIXME */
940 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100941 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
942 kvmppc_fill_pt_regs(&regs);
943 performance_monitor_exception(&regs);
944 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000945 case BOOKE_INTERRUPT_WATCHDOG:
946 kvmppc_fill_pt_regs(&regs);
947#ifdef CONFIG_BOOKE_WDT
948 WatchdogException(&regs);
949#else
950 unknown_exception(&regs);
951#endif
952 break;
953 case BOOKE_INTERRUPT_CRITICAL:
Tudor Laurentiu845ac982015-05-18 15:44:27 +0300954 kvmppc_fill_pt_regs(&regs);
Bharat Bhushan6328e592012-06-20 05:56:53 +0000955 unknown_exception(&regs);
956 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530957 case BOOKE_INTERRUPT_DEBUG:
958 /* Save DBSR before preemption is enabled */
959 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
960 kvmppc_clear_dbsr();
961 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100962 }
963}
964
Mihai Caramanf5250472014-07-23 19:06:22 +0300965static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
966 enum emulation_result emulated, u32 last_inst)
967{
968 switch (emulated) {
969 case EMULATE_AGAIN:
970 return RESUME_GUEST;
971
972 case EMULATE_FAIL:
973 pr_debug("%s: load instruction from guest address %lx failed\n",
974 __func__, vcpu->arch.pc);
975 /* For debugging, encode the failing instruction and
976 * report it to userspace. */
977 run->hw.hardware_exit_reason = ~0ULL << 32;
978 run->hw.hardware_exit_reason |= last_inst;
979 kvmppc_core_queue_program(vcpu, ESR_PIL);
980 return RESUME_HOST;
981
982 default:
983 BUG();
984 }
985}
986
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500987/**
988 * kvmppc_handle_exit
989 *
990 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
991 */
992int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
993 unsigned int exit_nr)
994{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500995 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200996 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500997 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300998 u32 last_inst = KVM_INST_FETCH_FAILED;
999 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001000
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001001 /* update before a new last_exit_type is rewritten */
1002 kvmppc_update_timing_stats(vcpu);
1003
Alexander Graf4e642cc2012-02-20 23:57:26 +01001004 /* restart interrupts if they were meant for the host */
1005 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +00001006
Mihai Caramanf5250472014-07-23 19:06:22 +03001007 /*
Adam Buchbinder446957b2016-02-24 10:51:11 -08001008 * get last instruction before being preempted
Mihai Caramanf5250472014-07-23 19:06:22 +03001009 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
1010 */
1011 switch (exit_nr) {
1012 case BOOKE_INTERRUPT_DATA_STORAGE:
1013 case BOOKE_INTERRUPT_DTLB_MISS:
1014 case BOOKE_INTERRUPT_HV_PRIV:
Alexander Graf8d0eff62014-09-10 14:37:29 +02001015 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caramanf5250472014-07-23 19:06:22 +03001016 break;
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301017 case BOOKE_INTERRUPT_PROGRAM:
1018 /* SW breakpoints arrive as illegal instructions on HV */
1019 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Alexander Graf8d0eff62014-09-10 14:37:29 +02001020 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301021 break;
Mihai Caramanf5250472014-07-23 19:06:22 +03001022 default:
1023 break;
1024 }
1025
Alexander Graf97c95052012-08-02 15:10:00 +02001026 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzini6edaa532016-06-15 15:18:26 +02001027 guest_exit_irqoff();
Paolo Bonzinie233d542015-04-30 14:39:40 +02001028
1029 local_irq_enable();
Alexander Graf97c95052012-08-02 15:10:00 +02001030
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001031 run->exit_reason = KVM_EXIT_UNKNOWN;
1032 run->ready_for_interrupt_injection = 1;
1033
Mihai Caramanf5250472014-07-23 19:06:22 +03001034 if (emulated != EMULATE_DONE) {
1035 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1036 goto out;
1037 }
1038
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001039 switch (exit_nr) {
1040 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +01001041 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1042 kvmppc_dump_vcpu(vcpu);
1043 /* For debugging, send invalid exit reason to user space */
1044 run->hw.hardware_exit_reason = ~1ULL << 32;
1045 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1046 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001047 break;
1048
1049 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001050 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -06001051 r = RESUME_GUEST;
1052 break;
1053
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001054 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001055 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001056 r = RESUME_GUEST;
1057 break;
1058
Bharat Bhushan6328e592012-06-20 05:56:53 +00001059 case BOOKE_INTERRUPT_WATCHDOG:
1060 r = RESUME_GUEST;
1061 break;
1062
Scott Woodd30f6e42011-12-20 15:34:43 +00001063 case BOOKE_INTERRUPT_DOORBELL:
1064 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001065 r = RESUME_GUEST;
1066 break;
1067
1068 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1069 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1070
1071 /*
1072 * We are here because there is a pending guest interrupt
1073 * which could not be delivered as MSR_CE or MSR_ME was not
1074 * set. Once we break from here we will retry delivery.
1075 */
1076 r = RESUME_GUEST;
1077 break;
1078
1079 case BOOKE_INTERRUPT_GUEST_DBELL:
1080 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1081
1082 /*
1083 * We are here because there is a pending guest interrupt
1084 * which could not be delivered as MSR_EE was not set. Once
1085 * we break from here we will retry delivery.
1086 */
1087 r = RESUME_GUEST;
1088 break;
1089
Alexander Graf95f2e922012-02-20 22:45:12 +01001090 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1091 r = RESUME_GUEST;
1092 break;
1093
Scott Woodd30f6e42011-12-20 15:34:43 +00001094 case BOOKE_INTERRUPT_HV_PRIV:
1095 r = emulation_exit(run, vcpu);
1096 break;
1097
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001098 case BOOKE_INTERRUPT_PROGRAM:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301099 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1100 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1101 /*
1102 * We are here because of an SW breakpoint instr,
1103 * so lets return to host to handle.
1104 */
1105 r = kvmppc_handle_debug(run, vcpu);
1106 run->exit_reason = KVM_EXIT_DEBUG;
1107 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1108 break;
1109 }
1110
Scott Woodd30f6e42011-12-20 15:34:43 +00001111 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +01001112 /*
1113 * Program traps generated by user-level software must
1114 * be handled by the guest kernel.
1115 *
1116 * In GS mode, hypervisor privileged instructions trap
1117 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1118 * actual program interrupts, handled by the guest.
1119 */
Liu Yudaf5e272010-02-02 19:44:35 +08001120 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001121 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001122 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001123 break;
1124 }
1125
Scott Woodd30f6e42011-12-20 15:34:43 +00001126 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001127 break;
1128
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001129 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001130 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001131 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001132 r = RESUME_GUEST;
1133 break;
1134
Scott Wood4cd35f62011-06-14 18:34:31 -05001135#ifdef CONFIG_SPE
1136 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1137 if (vcpu->arch.shared->msr & MSR_SPE)
1138 kvmppc_vcpu_enable_spe(vcpu);
1139 else
1140 kvmppc_booke_queue_irqprio(vcpu,
1141 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001142 r = RESUME_GUEST;
1143 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001144 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001145
1146 case BOOKE_INTERRUPT_SPE_FP_DATA:
1147 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1148 r = RESUME_GUEST;
1149 break;
1150
1151 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1152 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1153 r = RESUME_GUEST;
1154 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001155#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f62011-06-14 18:34:31 -05001156 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1157 /*
1158 * Guest wants SPE, but host kernel doesn't support it. Send
1159 * an "unimplemented operation" program check to the guest.
1160 */
1161 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1162 r = RESUME_GUEST;
1163 break;
1164
1165 /*
1166 * These really should never happen without CONFIG_SPE,
1167 * as we should never enable the real MSR[SPE] in the guest.
1168 */
1169 case BOOKE_INTERRUPT_SPE_FP_DATA:
1170 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1171 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1172 __func__, exit_nr, vcpu->arch.pc);
1173 run->hw.hardware_exit_reason = exit_nr;
1174 r = RESUME_HOST;
1175 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001176#endif /* CONFIG_SPE_POSSIBLE */
1177
1178/*
1179 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1180 * see kvmppc_core_check_processor_compat().
1181 */
1182#ifdef CONFIG_ALTIVEC
1183 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1184 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1185 r = RESUME_GUEST;
1186 break;
1187
1188 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1189 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1190 r = RESUME_GUEST;
1191 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001192#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001193
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001194 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001195 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1196 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001197 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001198 r = RESUME_GUEST;
1199 break;
1200
1201 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001202 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001203 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001204 r = RESUME_GUEST;
1205 break;
1206
Alexander Graf011da892013-01-31 14:17:38 +01001207 case BOOKE_INTERRUPT_ALIGNMENT:
1208 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1209 vcpu->arch.fault_esr);
1210 r = RESUME_GUEST;
1211 break;
1212
Scott Woodd30f6e42011-12-20 15:34:43 +00001213#ifdef CONFIG_KVM_BOOKE_HV
1214 case BOOKE_INTERRUPT_HV_SYSCALL:
1215 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1216 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1217 } else {
1218 /*
1219 * hcall from guest userspace -- send privileged
1220 * instruction program check.
1221 */
1222 kvmppc_core_queue_program(vcpu, ESR_PPR);
1223 }
1224
1225 r = RESUME_GUEST;
1226 break;
1227#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001228 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001229 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1230 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1231 /* KVM PV hypercalls */
1232 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1233 r = RESUME_GUEST;
1234 } else {
1235 /* Guest syscalls */
1236 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1237 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001238 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001239 r = RESUME_GUEST;
1240 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001241#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001242
1243 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001244 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001245 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001246 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001247 gfn_t gfn;
1248
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001249#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001250 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1251 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1252 kvmppc_map_magic(vcpu);
1253 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1254 r = RESUME_GUEST;
1255
1256 break;
1257 }
1258#endif
1259
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001260 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001261 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001262 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001263 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001264 kvmppc_core_queue_dtlb_miss(vcpu,
1265 vcpu->arch.fault_dear,
1266 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001267 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001268 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001269 r = RESUME_GUEST;
1270 break;
1271 }
1272
Scott Woodf1e89022013-06-06 19:16:31 -05001273 idx = srcu_read_lock(&vcpu->kvm->srcu);
1274
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001275 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001276 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001277
1278 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1279 /* The guest TLB had a mapping, but the shadow TLB
1280 * didn't, and it is RAM. This could be because:
1281 * a) the entry is mapping the host kernel, or
1282 * b) the guest used a large mapping which we're faking
1283 * Either way, we need to satisfy the fault without
1284 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001285 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001286 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001287 r = RESUME_GUEST;
1288 } else {
1289 /* Guest has mapped and accessed a page which is not
1290 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001291 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001292 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001293 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001294 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001295 }
1296
Scott Woodf1e89022013-06-06 19:16:31 -05001297 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001298 break;
1299 }
1300
1301 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001302 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001303 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001304 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001305 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001306
1307 r = RESUME_GUEST;
1308
1309 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001310 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001311 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001312 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001313 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001314 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001315 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001316 break;
1317 }
1318
Hollis Blanchard7b701592008-12-02 15:51:58 -06001319 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001320
Scott Woodf1e89022013-06-06 19:16:31 -05001321 idx = srcu_read_lock(&vcpu->kvm->srcu);
1322
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001323 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001324 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001325
1326 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1327 /* The guest TLB had a mapping, but the shadow TLB
1328 * didn't. This could be because:
1329 * a) the entry is mapping the host kernel, or
1330 * b) the guest used a large mapping which we're faking
1331 * Either way, we need to satisfy the fault without
1332 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001333 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001334 } else {
1335 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001336 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001337 }
1338
Scott Woodf1e89022013-06-06 19:16:31 -05001339 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001340 break;
1341 }
1342
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001343 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301344 r = kvmppc_handle_debug(run, vcpu);
1345 if (r == RESUME_HOST)
1346 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001347 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001348 break;
1349 }
1350
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001351 default:
1352 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1353 BUG();
1354 }
1355
Mihai Caramanf5250472014-07-23 19:06:22 +03001356out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001357 /*
1358 * To avoid clobbering exit_reason, only check for signals if we
1359 * aren't already exiting to userspace for some other reason.
1360 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001361 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001362 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001363 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001364 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001365 else {
1366 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001367 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001368 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001369 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001370 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001371 }
1372
1373 return r;
1374}
1375
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001376static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1377{
1378 u32 old_tsr = vcpu->arch.tsr;
1379
1380 vcpu->arch.tsr = new_tsr;
1381
1382 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1383 arm_next_watchdog(vcpu);
1384
1385 update_timer_ints(vcpu);
1386}
1387
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001388/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1389int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1390{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001391 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001392 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001393
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001394 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001395 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001396 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001397 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001398
Scott Woodd30f6e42011-12-20 15:34:43 +00001399#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301400 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001401 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001402 vcpu->arch.shared->msr = 0;
1403#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001404
Hollis Blanchard082decf2010-08-07 10:33:56 -07001405 /* Eye-catching numbers so we know if the guest takes an interrupt
1406 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001407 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001408 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1409 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001410
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001411 kvmppc_init_timing_stats(vcpu);
1412
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001413 r = kvmppc_core_vcpu_setup(vcpu);
1414 kvmppc_sanity_check(vcpu);
1415 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001416}
1417
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001418int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1419{
1420 /* setup watchdog timer once */
1421 spin_lock_init(&vcpu->arch.wdt_lock);
Kees Cook86cb30e2017-10-17 20:21:24 -07001422 timer_setup(&vcpu->arch.wdt_timer, kvmppc_watchdog_func, 0);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001423
Bharat Bhushan2f699a52014-08-13 14:39:44 +05301424 /*
1425 * Clear DBSR.MRR to avoid guest debug interrupt as
1426 * this is of host interest
1427 */
1428 mtspr(SPRN_DBSR, DBSR_MRR);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001429 return 0;
1430}
1431
1432void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1433{
1434 del_timer_sync(&vcpu->arch.wdt_timer);
1435}
1436
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001437int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1438{
1439 int i;
1440
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001441 vcpu_load(vcpu);
1442
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001443 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001444 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001445 regs->ctr = vcpu->arch.ctr;
1446 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001447 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001448 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301449 regs->srr0 = kvmppc_get_srr0(vcpu);
1450 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001451 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301452 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1453 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1454 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1455 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1456 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1457 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1458 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1459 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001460
1461 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001462 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001463
Christoffer Dall1fc9b762017-12-04 21:35:26 +01001464 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001465 return 0;
1466}
1467
1468int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1469{
1470 int i;
1471
Christoffer Dall875656f2017-12-04 21:35:27 +01001472 vcpu_load(vcpu);
1473
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001474 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001475 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001476 vcpu->arch.ctr = regs->ctr;
1477 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001478 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001479 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301480 kvmppc_set_srr0(vcpu, regs->srr0);
1481 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001482 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301483 kvmppc_set_sprg0(vcpu, regs->sprg0);
1484 kvmppc_set_sprg1(vcpu, regs->sprg1);
1485 kvmppc_set_sprg2(vcpu, regs->sprg2);
1486 kvmppc_set_sprg3(vcpu, regs->sprg3);
1487 kvmppc_set_sprg4(vcpu, regs->sprg4);
1488 kvmppc_set_sprg5(vcpu, regs->sprg5);
1489 kvmppc_set_sprg6(vcpu, regs->sprg6);
1490 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001491
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001492 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1493 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001494
Christoffer Dall875656f2017-12-04 21:35:27 +01001495 vcpu_put(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001496 return 0;
1497}
1498
Scott Wood5ce941e2011-04-27 17:24:21 -05001499static void get_sregs_base(struct kvm_vcpu *vcpu,
1500 struct kvm_sregs *sregs)
1501{
1502 u64 tb = get_tb();
1503
1504 sregs->u.e.features |= KVM_SREGS_E_BASE;
1505
1506 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1507 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1508 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301509 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301510 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001511 sregs->u.e.tsr = vcpu->arch.tsr;
1512 sregs->u.e.tcr = vcpu->arch.tcr;
1513 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1514 sregs->u.e.tb = tb;
1515 sregs->u.e.vrsave = vcpu->arch.vrsave;
1516}
1517
1518static int set_sregs_base(struct kvm_vcpu *vcpu,
1519 struct kvm_sregs *sregs)
1520{
1521 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1522 return 0;
1523
1524 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1525 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1526 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301527 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301528 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001529 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001530 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001531
Scott Wooddfd4d472011-11-17 12:39:59 +00001532 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001533 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001534 kvmppc_emulate_dec(vcpu);
1535 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001536
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001537 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1538 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001539
1540 return 0;
1541}
1542
1543static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1544 struct kvm_sregs *sregs)
1545{
1546 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1547
Scott Wood841741f2011-09-02 17:39:37 -05001548 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001549 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1550 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1551 sregs->u.e.decar = vcpu->arch.decar;
1552 sregs->u.e.ivpr = vcpu->arch.ivpr;
1553}
1554
1555static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1556 struct kvm_sregs *sregs)
1557{
1558 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1559 return 0;
1560
Scott Wood841741f2011-09-02 17:39:37 -05001561 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001562 return -EINVAL;
1563
1564 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1565 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1566 vcpu->arch.decar = sregs->u.e.decar;
1567 vcpu->arch.ivpr = sregs->u.e.ivpr;
1568
1569 return 0;
1570}
1571
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301572int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001573{
1574 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1575
1576 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1577 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1578 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1579 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1580 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1581 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1582 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1583 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1584 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1585 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1586 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1587 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1588 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1589 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1590 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1591 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301592 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001593}
1594
1595int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1596{
1597 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1598 return 0;
1599
1600 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1601 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1602 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1603 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1604 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1605 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1606 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1607 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1608 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1609 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1610 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1611 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1612 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1613 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1614 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1615 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1616
1617 return 0;
1618}
1619
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001620int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1621 struct kvm_sregs *sregs)
1622{
Christoffer Dallbcdec412017-12-04 21:35:28 +01001623 int ret;
1624
1625 vcpu_load(vcpu);
1626
Scott Wood5ce941e2011-04-27 17:24:21 -05001627 sregs->pvr = vcpu->arch.pvr;
1628
1629 get_sregs_base(vcpu, sregs);
1630 get_sregs_arch206(vcpu, sregs);
Christoffer Dallbcdec412017-12-04 21:35:28 +01001631 ret = vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
1632
1633 vcpu_put(vcpu);
1634 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001635}
1636
1637int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1638 struct kvm_sregs *sregs)
1639{
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001640 int ret = -EINVAL;
Scott Wood5ce941e2011-04-27 17:24:21 -05001641
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001642 vcpu_load(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001643 if (vcpu->arch.pvr != sregs->pvr)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001644 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001645
1646 ret = set_sregs_base(vcpu, sregs);
1647 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001648 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001649
1650 ret = set_sregs_arch206(vcpu, sregs);
1651 if (ret < 0)
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001652 goto out;
Scott Wood5ce941e2011-04-27 17:24:21 -05001653
Christoffer Dallb4ef9d42017-12-04 21:35:29 +01001654 ret = vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
1655
1656out:
1657 vcpu_put(vcpu);
1658 return ret;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001659}
1660
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001661int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1662 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001663{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001664 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001665
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001666 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001667 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001668 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001669 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301670 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001671 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301672 break;
1673#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1674 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001675 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301676 break;
1677 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001678 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301679 break;
1680#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001681 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001682 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301683 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001684 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001685 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301686 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001687 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301688 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001689 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001690 break;
1691 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001692#if defined(CONFIG_64BIT)
1693 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001694 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001695 break;
1696#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001697 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001698 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001699 break;
1700 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001701 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001702 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001703 case KVM_REG_PPC_DEBUG_INST:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301704 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001705 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001706 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001707 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001708 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001709 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001710 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001711 break;
1712 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001713
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001714 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001715}
1716
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001717int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1718 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001719{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001720 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001721
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001722 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001723 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001724 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001725 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301726 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001727 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301728 break;
1729#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1730 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001731 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301732 break;
1733 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001734 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301735 break;
1736#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001737 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001738 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301739 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001740 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001741 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301742 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001743 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001744 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001745 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001746 break;
1747 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001748#if defined(CONFIG_64BIT)
1749 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001750 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001751 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001752 break;
1753 }
1754#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001755 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001756 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001757 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1758 break;
1759 }
1760 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001761 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001762 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1763 break;
1764 }
1765 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001766 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001767 kvmppc_set_tsr(vcpu, tsr);
1768 break;
1769 }
1770 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001771 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001772 kvmppc_set_tcr(vcpu, tcr);
1773 break;
1774 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001775 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001776 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001777 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001778 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001779 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001780 break;
1781 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001782
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001783 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001784}
1785
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001786int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1787{
1788 return -ENOTSUPP;
1789}
1790
1791int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1792{
1793 return -ENOTSUPP;
1794}
1795
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001796int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1797 struct kvm_translation *tr)
1798{
Avi Kivity98001d82010-05-13 11:05:49 +03001799 int r;
1800
Christoffer Dall1da5b612017-12-04 21:35:32 +01001801 vcpu_load(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001802 r = kvmppc_core_vcpu_translate(vcpu, tr);
Christoffer Dall1da5b612017-12-04 21:35:32 +01001803 vcpu_put(vcpu);
Avi Kivity98001d82010-05-13 11:05:49 +03001804 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001805}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001806
Alexander Graf4e755752009-10-30 05:47:01 +00001807int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1808{
1809 return -ENOTSUPP;
1810}
1811
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301812void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001813 struct kvm_memory_slot *dont)
1814{
1815}
1816
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301817int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001818 unsigned long npages)
1819{
1820 return 0;
1821}
1822
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001823int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001824 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001825 const struct kvm_userspace_memory_region *mem)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001826{
1827 return 0;
1828}
1829
1830void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001831 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001832 const struct kvm_memory_slot *old,
1833 const struct kvm_memory_slot *new)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001834{
1835}
1836
1837void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001838{
1839}
1840
Mihai Caraman38f98822012-10-11 06:13:27 +00001841void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1842{
1843#if defined(CONFIG_64BIT)
1844 vcpu->arch.epcr = new_epcr;
1845#ifdef CONFIG_KVM_BOOKE_HV
1846 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1847 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1848 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1849#endif
1850#endif
1851}
1852
Scott Wooddfd4d472011-11-17 12:39:59 +00001853void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1854{
1855 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001856 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001857 update_timer_ints(vcpu);
1858}
1859
1860void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1861{
1862 set_bits(tsr_bits, &vcpu->arch.tsr);
1863 smp_wmb();
1864 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1865 kvm_vcpu_kick(vcpu);
1866}
1867
1868void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1869{
1870 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001871
1872 /*
1873 * We may have stopped the watchdog due to
1874 * being stuck on final expiration.
1875 */
1876 if (tsr_bits & (TSR_ENW | TSR_WIS))
1877 arm_next_watchdog(vcpu);
1878
Scott Wooddfd4d472011-11-17 12:39:59 +00001879 update_timer_ints(vcpu);
1880}
1881
Mihai Caramand02d4d12014-09-01 17:19:56 +03001882void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
Scott Wooddfd4d472011-11-17 12:39:59 +00001883{
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001884 if (vcpu->arch.tcr & TCR_ARE) {
1885 vcpu->arch.dec = vcpu->arch.decar;
1886 kvmppc_emulate_dec(vcpu);
1887 }
1888
Scott Wooddfd4d472011-11-17 12:39:59 +00001889 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1890}
1891
Bharat Bhushance11e482013-07-04 12:27:47 +05301892static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1893 uint64_t addr, int index)
1894{
1895 switch (index) {
1896 case 0:
1897 dbg_reg->dbcr0 |= DBCR0_IAC1;
1898 dbg_reg->iac1 = addr;
1899 break;
1900 case 1:
1901 dbg_reg->dbcr0 |= DBCR0_IAC2;
1902 dbg_reg->iac2 = addr;
1903 break;
1904#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1905 case 2:
1906 dbg_reg->dbcr0 |= DBCR0_IAC3;
1907 dbg_reg->iac3 = addr;
1908 break;
1909 case 3:
1910 dbg_reg->dbcr0 |= DBCR0_IAC4;
1911 dbg_reg->iac4 = addr;
1912 break;
1913#endif
1914 default:
1915 return -EINVAL;
1916 }
1917
1918 dbg_reg->dbcr0 |= DBCR0_IDM;
1919 return 0;
1920}
1921
1922static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1923 int type, int index)
1924{
1925 switch (index) {
1926 case 0:
1927 if (type & KVMPPC_DEBUG_WATCH_READ)
1928 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1929 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1930 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1931 dbg_reg->dac1 = addr;
1932 break;
1933 case 1:
1934 if (type & KVMPPC_DEBUG_WATCH_READ)
1935 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1936 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1937 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1938 dbg_reg->dac2 = addr;
1939 break;
1940 default:
1941 return -EINVAL;
1942 }
1943
1944 dbg_reg->dbcr0 |= DBCR0_IDM;
1945 return 0;
1946}
1947void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1948{
1949 /* XXX: Add similar MSR protection for BookE-PR */
1950#ifdef CONFIG_KVM_BOOKE_HV
1951 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1952 if (set) {
1953 if (prot_bitmap & MSR_UCLE)
1954 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1955 if (prot_bitmap & MSR_DE)
1956 vcpu->arch.shadow_msrp |= MSRP_DEP;
1957 if (prot_bitmap & MSR_PMM)
1958 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1959 } else {
1960 if (prot_bitmap & MSR_UCLE)
1961 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1962 if (prot_bitmap & MSR_DE)
1963 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1964 if (prot_bitmap & MSR_PMM)
1965 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1966 }
1967#endif
1968}
1969
Alexander Graf7d15c06f2014-06-20 13:52:36 +02001970int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1971 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1972{
1973 int gtlb_index;
1974 gpa_t gpaddr;
1975
1976#ifdef CONFIG_KVM_E500V2
1977 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1978 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1979 pte->eaddr = eaddr;
1980 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1981 (eaddr & ~PAGE_MASK);
1982 pte->vpage = eaddr >> PAGE_SHIFT;
1983 pte->may_read = true;
1984 pte->may_write = true;
1985 pte->may_execute = true;
1986
1987 return 0;
1988 }
1989#endif
1990
1991 /* Check the guest TLB. */
1992 switch (xlid) {
1993 case XLATE_INST:
1994 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1995 break;
1996 case XLATE_DATA:
1997 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1998 break;
1999 default:
2000 BUG();
2001 }
2002
2003 /* Do we have a TLB entry at all? */
2004 if (gtlb_index < 0)
2005 return -ENOENT;
2006
2007 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
2008
2009 pte->eaddr = eaddr;
2010 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
2011 pte->vpage = eaddr >> PAGE_SHIFT;
2012
2013 /* XXX read permissions from the guest TLB */
2014 pte->may_read = true;
2015 pte->may_write = true;
2016 pte->may_execute = true;
2017
2018 return 0;
2019}
2020
Bharat Bhushance11e482013-07-04 12:27:47 +05302021int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
2022 struct kvm_guest_debug *dbg)
2023{
2024 struct debug_reg *dbg_reg;
2025 int n, b = 0, w = 0;
Christoffer Dall66b56562017-12-04 21:35:33 +01002026 int ret = 0;
2027
2028 vcpu_load(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +05302029
2030 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05302031 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302032 vcpu->guest_debug = 0;
2033 kvm_guest_protect_msr(vcpu, MSR_DE, false);
Christoffer Dall66b56562017-12-04 21:35:33 +01002034 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302035 }
2036
2037 kvm_guest_protect_msr(vcpu, MSR_DE, true);
2038 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05302039 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302040
2041 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05302042 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05302043
2044 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05302045 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05302046
2047#ifdef CONFIG_KVM_BOOKE_HV
2048 /*
2049 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2050 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2051 */
2052 dbg_reg->dbcr1 = 0;
2053 dbg_reg->dbcr2 = 0;
2054#else
2055 /*
2056 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2057 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2058 * is set.
2059 */
2060 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2061 DBCR1_IAC4US;
2062 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2063#endif
2064
2065 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
Christoffer Dall66b56562017-12-04 21:35:33 +01002066 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302067
Christoffer Dall66b56562017-12-04 21:35:33 +01002068 ret = -EINVAL;
Bharat Bhushance11e482013-07-04 12:27:47 +05302069 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2070 uint64_t addr = dbg->arch.bp[n].addr;
2071 uint32_t type = dbg->arch.bp[n].type;
2072
2073 if (type == KVMPPC_DEBUG_NONE)
2074 continue;
2075
Dan Carpenterac0e89b2016-07-14 13:15:46 +03002076 if (type & ~(KVMPPC_DEBUG_WATCH_READ |
Bharat Bhushance11e482013-07-04 12:27:47 +05302077 KVMPPC_DEBUG_WATCH_WRITE |
2078 KVMPPC_DEBUG_BREAKPOINT))
Christoffer Dall66b56562017-12-04 21:35:33 +01002079 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302080
2081 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2082 /* Setting H/W breakpoint */
2083 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002084 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302085 } else {
2086 /* Setting H/W watchpoint */
2087 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2088 type, w++))
Christoffer Dall66b56562017-12-04 21:35:33 +01002089 goto out;
Bharat Bhushance11e482013-07-04 12:27:47 +05302090 }
2091 }
2092
Christoffer Dall66b56562017-12-04 21:35:33 +01002093 ret = 0;
2094out:
2095 vcpu_put(vcpu);
2096 return ret;
Bharat Bhushance11e482013-07-04 12:27:47 +05302097}
2098
Scott Wood94fa9d92011-12-20 15:34:22 +00002099void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2100{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002101 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002102 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002103}
2104
2105void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2106{
Scott Woodd30f6e42011-12-20 15:34:43 +00002107 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002108 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302109
2110 /* Clear pending debug event in DBSR */
2111 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002112}
2113
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302114void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2115{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302116 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302117}
2118
2119int kvmppc_core_init_vm(struct kvm *kvm)
2120{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302121 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302122}
2123
2124struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2125{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302126 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302127}
2128
2129void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2130{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302131 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302132}
2133
2134void kvmppc_core_destroy_vm(struct kvm *kvm)
2135{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302136 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302137}
2138
2139void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2140{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302141 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302142}
2143
2144void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2145{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302146 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002147}
2148
2149int __init kvmppc_booke_init(void)
2150{
Scott Woodd30f6e42011-12-20 15:34:43 +00002151#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002152 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002153 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002154 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002155 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002156 int i;
2157
2158 /* We install our own exception handlers by hijacking IVPR. IVPR must
2159 * be 16-bit aligned, so we need a 64KB allocation. */
2160 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2161 VCPU_SIZE_ORDER);
2162 if (!kvmppc_booke_handlers)
2163 return -ENOMEM;
2164
2165 /* XXX make sure our handlers are smaller than Linux's */
2166
2167 /* Copy our interrupt handlers to match host IVORs. That way we don't
2168 * have to swap the IVORs on every guest/host transition. */
2169 ivor[0] = mfspr(SPRN_IVOR0);
2170 ivor[1] = mfspr(SPRN_IVOR1);
2171 ivor[2] = mfspr(SPRN_IVOR2);
2172 ivor[3] = mfspr(SPRN_IVOR3);
2173 ivor[4] = mfspr(SPRN_IVOR4);
2174 ivor[5] = mfspr(SPRN_IVOR5);
2175 ivor[6] = mfspr(SPRN_IVOR6);
2176 ivor[7] = mfspr(SPRN_IVOR7);
2177 ivor[8] = mfspr(SPRN_IVOR8);
2178 ivor[9] = mfspr(SPRN_IVOR9);
2179 ivor[10] = mfspr(SPRN_IVOR10);
2180 ivor[11] = mfspr(SPRN_IVOR11);
2181 ivor[12] = mfspr(SPRN_IVOR12);
2182 ivor[13] = mfspr(SPRN_IVOR13);
2183 ivor[14] = mfspr(SPRN_IVOR14);
2184 ivor[15] = mfspr(SPRN_IVOR15);
2185
2186 for (i = 0; i < 16; i++) {
2187 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002188 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002189
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002190 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002191 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002192 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002193 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002194
2195 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2196 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2197 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002198#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002199 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002200}
2201
Hollis Blancharddb93f572008-11-05 09:36:18 -06002202void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002203{
2204 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2205 kvm_exit();
2206}