blob: 831c1b433b096773bbae1a7eecb21a1772d1aa66 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053043
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060047unsigned long kvmppc_booke_handlers;
48
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "mmio", VCPU_STAT(mmio_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050055 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
57 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
58 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
59 { "sysc", VCPU_STAT(syscall_exits) },
60 { "isi", VCPU_STAT(isi_exits) },
61 { "dsi", VCPU_STAT(dsi_exits) },
62 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050065 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000066 { "doorbell", VCPU_STAT(dbell_exits) },
67 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020068 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050069 { NULL }
70};
71
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050072/* TODO: use vcpu_printf() */
73void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
74{
75 int i;
76
Alexander Graf666e7252010-07-29 14:47:43 +020077 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060078 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020079 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
80 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050081
82 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
83
84 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060085 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010086 kvmppc_get_gpr(vcpu, i),
87 kvmppc_get_gpr(vcpu, i+1),
88 kvmppc_get_gpr(vcpu, i+2),
89 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050090 }
91}
92
Scott Wood4cd35f62011-06-14 18:34:31 -050093#ifdef CONFIG_SPE
94void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
95{
96 preempt_disable();
97 enable_kernel_spe();
98 kvmppc_save_guest_spe(vcpu);
99 vcpu->arch.shadow_msr &= ~MSR_SPE;
100 preempt_enable();
101}
102
103static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
104{
105 preempt_disable();
106 enable_kernel_spe();
107 kvmppc_load_guest_spe(vcpu);
108 vcpu->arch.shadow_msr |= MSR_SPE;
109 preempt_enable();
110}
111
112static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
113{
114 if (vcpu->arch.shared->msr & MSR_SPE) {
115 if (!(vcpu->arch.shadow_msr & MSR_SPE))
116 kvmppc_vcpu_enable_spe(vcpu);
117 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
118 kvmppc_vcpu_disable_spe(vcpu);
119 }
120}
121#else
122static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
123{
124}
125#endif
126
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300127/*
128 * Load up guest vcpu FP state if it's needed.
129 * It also set the MSR_FP in thread so that host know
130 * we're holding FPU, and then host can help to save
131 * guest vcpu FP state if other threads require to use FPU.
132 * This simulates an FP unavailable fault.
133 *
134 * It requires to be called with preemption disabled.
135 */
136static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137{
138#ifdef CONFIG_PPC_FPU
139 if (!(current->thread.regs->msr & MSR_FP)) {
140 enable_kernel_fp();
141 load_fp_state(&vcpu->arch.fp);
142 current->thread.fp_save_area = &vcpu->arch.fp;
143 current->thread.regs->msr |= MSR_FP;
144 }
145#endif
146}
147
148/*
149 * Save guest vcpu FP state into thread.
150 * It requires to be called with preemption disabled.
151 */
152static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
153{
154#ifdef CONFIG_PPC_FPU
155 if (current->thread.regs->msr & MSR_FP)
156 giveup_fpu(current);
157 current->thread.fp_save_area = NULL;
158#endif
159}
160
Alexander Graf7a08c272012-08-16 13:10:16 +0200161static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
162{
163#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
164 /* We always treat the FP bit as enabled from the host
165 perspective, so only need to adjust the shadow MSR */
166 vcpu->arch.shadow_msr &= ~MSR_FP;
167 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
168#endif
169}
170
Mihai Caraman95d80a22014-08-20 16:36:23 +0300171/*
172 * Simulate AltiVec unavailable fault to load guest state
173 * from thread to AltiVec unit.
174 * It requires to be called with preemption disabled.
175 */
176static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
177{
178#ifdef CONFIG_ALTIVEC
179 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
180 if (!(current->thread.regs->msr & MSR_VEC)) {
181 enable_kernel_altivec();
182 load_vr_state(&vcpu->arch.vr);
183 current->thread.vr_save_area = &vcpu->arch.vr;
184 current->thread.regs->msr |= MSR_VEC;
185 }
186 }
187#endif
188}
189
190/*
191 * Save guest vcpu AltiVec state into thread.
192 * It requires to be called with preemption disabled.
193 */
194static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
195{
196#ifdef CONFIG_ALTIVEC
197 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
198 if (current->thread.regs->msr & MSR_VEC)
199 giveup_altivec(current);
200 current->thread.vr_save_area = NULL;
201 }
202#endif
203}
204
Bharat Bhushance11e482013-07-04 12:27:47 +0530205static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
206{
207 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
208#ifndef CONFIG_KVM_BOOKE_HV
209 vcpu->arch.shadow_msr &= ~MSR_DE;
210 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
211#endif
212
213 /* Force enable debug interrupts when user space wants to debug */
214 if (vcpu->guest_debug) {
215#ifdef CONFIG_KVM_BOOKE_HV
216 /*
217 * Since there is no shadow MSR, sync MSR_DE into the guest
218 * visible MSR.
219 */
220 vcpu->arch.shared->msr |= MSR_DE;
221#else
222 vcpu->arch.shadow_msr |= MSR_DE;
223 vcpu->arch.shared->msr &= ~MSR_DE;
224#endif
225 }
226}
227
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500228/*
229 * Helper function for "full" MSR writes. No need to call this if only
230 * EE/CE/ME/DE/RI are changing.
231 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500232void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
233{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500234 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500235
Scott Woodd30f6e42011-12-20 15:34:43 +0000236#ifdef CONFIG_KVM_BOOKE_HV
237 new_msr |= MSR_GS;
238#endif
239
Scott Wood4cd35f62011-06-14 18:34:31 -0500240 vcpu->arch.shared->msr = new_msr;
241
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500242 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500243 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200244 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530245 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500246}
247
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600248static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
249 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600250{
Alexander Graf63460462012-08-08 00:44:52 +0200251 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600252 set_bit(priority, &vcpu->arch.pending_exceptions);
253}
254
Alexander Graf8de12012014-06-18 21:56:55 +0200255void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
256 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600257{
Liu Yudaf5e272010-02-02 19:44:35 +0800258 vcpu->arch.queued_dear = dear_flags;
259 vcpu->arch.queued_esr = esr_flags;
260 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
261}
262
Alexander Graf8de12012014-06-18 21:56:55 +0200263void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
264 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800265{
266 vcpu->arch.queued_dear = dear_flags;
267 vcpu->arch.queued_esr = esr_flags;
268 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
269}
270
Alexander Graf8de12012014-06-18 21:56:55 +0200271void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
272{
273 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
274}
275
276void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800277{
278 vcpu->arch.queued_esr = esr_flags;
279 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
280}
281
Alexander Graf011da892013-01-31 14:17:38 +0100282static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
283 ulong esr_flags)
284{
285 vcpu->arch.queued_dear = dear_flags;
286 vcpu->arch.queued_esr = esr_flags;
287 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
288}
289
Liu Yudaf5e272010-02-02 19:44:35 +0800290void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
291{
292 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600293 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600294}
295
296void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
297{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600298 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600299}
300
301int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
302{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600303 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600304}
305
Alexander Graf7706664d2009-12-21 20:21:24 +0100306void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
307{
308 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
309}
310
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600311void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
312 struct kvm_interrupt *irq)
313{
Alexander Grafc5335f12010-08-30 14:03:24 +0200314 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
315
316 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
317 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
318
319 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600320}
321
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000322void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200323{
324 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200325 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200326}
327
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000328static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
329{
330 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
331}
332
333static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
334{
335 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
336}
337
Scott Woodd30f6e42011-12-20 15:34:43 +0000338static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
339{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530340 kvmppc_set_srr0(vcpu, srr0);
341 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000342}
343
344static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
345{
346 vcpu->arch.csrr0 = srr0;
347 vcpu->arch.csrr1 = srr1;
348}
349
350static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
351{
352 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
353 vcpu->arch.dsrr0 = srr0;
354 vcpu->arch.dsrr1 = srr1;
355 } else {
356 set_guest_csrr(vcpu, srr0, srr1);
357 }
358}
359
360static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
361{
362 vcpu->arch.mcsrr0 = srr0;
363 vcpu->arch.mcsrr1 = srr1;
364}
365
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600366/* Deliver the interrupt of the corresponding priority, if possible. */
367static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
368 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500369{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600370 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000371 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100372 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200373 ulong crit_raw = vcpu->arch.shared->critical;
374 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
375 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200376 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000377 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000378 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200379
380 /* Truncate crit indicators in 32 bit mode */
381 if (!(vcpu->arch.shared->msr & MSR_SF)) {
382 crit_raw &= 0xffffffff;
383 crit_r1 &= 0xffffffff;
384 }
385
386 /* Critical section when crit == r1 */
387 crit = (crit_raw == crit_r1);
388 /* ... and we're in supervisor mode */
389 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500390
Alexander Grafc5335f12010-08-30 14:03:24 +0200391 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
392 priority = BOOKE_IRQPRIO_EXTERNAL;
393 keep_irq = true;
394 }
395
Scott Wood5df554ad2013-04-12 14:08:46 +0000396 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100397 update_epr = true;
398
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600399 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600400 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800401 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100402 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800403 update_dear = true;
404 /* fall through */
405 case BOOKE_IRQPRIO_INST_STORAGE:
406 case BOOKE_IRQPRIO_PROGRAM:
407 update_esr = true;
408 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600409 case BOOKE_IRQPRIO_ITLB_MISS:
410 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600411 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300412#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600413 case BOOKE_IRQPRIO_SPE_UNAVAIL:
414 case BOOKE_IRQPRIO_SPE_FP_DATA:
415 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300416#endif
417#ifdef CONFIG_ALTIVEC
418 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
419 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
420#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600421 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600422 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000423 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000424 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500425 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000426 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600427 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000428 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200429 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000430 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000431 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000432 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500433 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600434 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200435 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000436 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000437 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500438 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600439 case BOOKE_IRQPRIO_DECREMENTER:
440 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000441 keep_irq = true;
442 /* fall through */
443 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000444 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200445 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200446 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000447 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000448 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500449 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600450 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200451 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000452 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000453 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530454 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
455 int_class = INT_CLASS_DBG;
456 else
457 int_class = INT_CLASS_CRIT;
458
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500459 break;
460 }
461
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600462 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000463 switch (int_class) {
464 case INT_CLASS_NONCRIT:
465 set_guest_srr(vcpu, vcpu->arch.pc,
466 vcpu->arch.shared->msr);
467 break;
468 case INT_CLASS_CRIT:
469 set_guest_csrr(vcpu, vcpu->arch.pc,
470 vcpu->arch.shared->msr);
471 break;
472 case INT_CLASS_DBG:
473 set_guest_dsrr(vcpu, vcpu->arch.pc,
474 vcpu->arch.shared->msr);
475 break;
476 case INT_CLASS_MC:
477 set_guest_mcsrr(vcpu, vcpu->arch.pc,
478 vcpu->arch.shared->msr);
479 break;
480 }
481
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600482 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800483 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530484 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800485 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530486 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000487 if (update_epr == true) {
488 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
489 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000490 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
491 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
492 kvmppc_mpic_set_epr(vcpu);
493 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000494 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000495
496 new_msr &= msr_mask;
497#if defined(CONFIG_64BIT)
498 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
499 new_msr |= MSR_CM;
500#endif
501 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600502
Alexander Grafc5335f12010-08-30 14:03:24 +0200503 if (!keep_irq)
504 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600505 }
506
Scott Woodd30f6e42011-12-20 15:34:43 +0000507#ifdef CONFIG_KVM_BOOKE_HV
508 /*
509 * If an interrupt is pending but masked, raise a guest doorbell
510 * so that we are notified when the guest enables the relevant
511 * MSR bit.
512 */
513 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
514 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
515 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
516 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
517 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
518 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
519#endif
520
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600521 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500522}
523
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000524/*
525 * Return the number of jiffies until the next timeout. If the timeout is
526 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
527 * because the larger value can break the timer APIs.
528 */
529static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
530{
531 u64 tb, wdt_tb, wdt_ticks = 0;
532 u64 nr_jiffies = 0;
533 u32 period = TCR_GET_WP(vcpu->arch.tcr);
534
535 wdt_tb = 1ULL << (63 - period);
536 tb = get_tb();
537 /*
538 * The watchdog timeout will hapeen when TB bit corresponding
539 * to watchdog will toggle from 0 to 1.
540 */
541 if (tb & wdt_tb)
542 wdt_ticks = wdt_tb;
543
544 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
545
546 /* Convert timebase ticks to jiffies */
547 nr_jiffies = wdt_ticks;
548
549 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
550 nr_jiffies++;
551
552 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
553}
554
555static void arm_next_watchdog(struct kvm_vcpu *vcpu)
556{
557 unsigned long nr_jiffies;
558 unsigned long flags;
559
560 /*
561 * If TSR_ENW and TSR_WIS are not set then no need to exit to
562 * userspace, so clear the KVM_REQ_WATCHDOG request.
563 */
564 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
565 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
566
567 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
568 nr_jiffies = watchdog_next_timeout(vcpu);
569 /*
570 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
571 * then do not run the watchdog timer as this can break timer APIs.
572 */
573 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
574 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
575 else
576 del_timer(&vcpu->arch.wdt_timer);
577 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
578}
579
580void kvmppc_watchdog_func(unsigned long data)
581{
582 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
583 u32 tsr, new_tsr;
584 int final;
585
586 do {
587 new_tsr = tsr = vcpu->arch.tsr;
588 final = 0;
589
590 /* Time out event */
591 if (tsr & TSR_ENW) {
592 if (tsr & TSR_WIS)
593 final = 1;
594 else
595 new_tsr = tsr | TSR_WIS;
596 } else {
597 new_tsr = tsr | TSR_ENW;
598 }
599 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
600
601 if (new_tsr & TSR_WIS) {
602 smp_wmb();
603 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
604 kvm_vcpu_kick(vcpu);
605 }
606
607 /*
608 * If this is final watchdog expiry and some action is required
609 * then exit to userspace.
610 */
611 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
612 vcpu->arch.watchdog_enabled) {
613 smp_wmb();
614 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
615 kvm_vcpu_kick(vcpu);
616 }
617
618 /*
619 * Stop running the watchdog timer after final expiration to
620 * prevent the host from being flooded with timers if the
621 * guest sets a short period.
622 * Timers will resume when TSR/TCR is updated next time.
623 */
624 if (!final)
625 arm_next_watchdog(vcpu);
626}
627
Scott Wooddfd4d472011-11-17 12:39:59 +0000628static void update_timer_ints(struct kvm_vcpu *vcpu)
629{
630 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
631 kvmppc_core_queue_dec(vcpu);
632 else
633 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000634
635 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
636 kvmppc_core_queue_watchdog(vcpu);
637 else
638 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000639}
640
Scott Woodc59a6a32011-11-08 18:23:25 -0600641static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500642{
643 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500644 unsigned int priority;
645
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600646 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000647 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600648 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500649 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500650
651 priority = find_next_bit(pending,
652 BITS_PER_BYTE * sizeof(*pending),
653 priority + 1);
654 }
Alexander Graf90bba352010-07-29 14:47:51 +0200655
656 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600657 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500658}
659
Scott Woodc59a6a32011-11-08 18:23:25 -0600660/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000661int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600662{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000663 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600664 WARN_ON_ONCE(!irqs_disabled());
665
666 kvmppc_core_check_exceptions(vcpu);
667
Alexander Grafb8c649a2012-12-20 04:52:39 +0000668 if (vcpu->requests) {
669 /* Exception delivery raised request; start over */
670 return 1;
671 }
672
Scott Woodc59a6a32011-11-08 18:23:25 -0600673 if (vcpu->arch.shared->msr & MSR_WE) {
674 local_irq_enable();
675 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100676 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Wood6c85f522014-01-09 19:18:40 -0600677 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600678
679 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000680 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600681 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000682
683 return r;
684}
685
Alexander Graf7c973a22012-08-13 12:50:35 +0200686int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200687{
Alexander Graf7c973a22012-08-13 12:50:35 +0200688 int r = 1; /* Indicate we want to get back into the guest */
689
Alexander Graf2d8185d2012-08-10 12:31:12 +0200690 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
691 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200692#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200693 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
694 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200695#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200696
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000697 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
698 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
699 r = 0;
700 }
701
Alexander Graf1c810632013-01-04 18:12:48 +0100702 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
703 vcpu->run->epr.epr = 0;
704 vcpu->arch.epr_needed = true;
705 vcpu->run->exit_reason = KVM_EXIT_EPR;
706 r = 0;
707 }
708
Alexander Graf7c973a22012-08-13 12:50:35 +0200709 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200710}
711
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000712int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
713{
Alexander Graf7ee78852012-08-13 12:44:41 +0200714 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600715 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000716
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200717 if (!vcpu->arch.sane) {
718 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
719 return -EINVAL;
720 }
721
Alexander Graf7ee78852012-08-13 12:44:41 +0200722 s = kvmppc_prepare_to_enter(vcpu);
723 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200724 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600725 goto out;
726 }
Scott Wood6c85f522014-01-09 19:18:40 -0600727 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600728
Scott Wood8fae8452011-12-20 15:34:45 +0000729#ifdef CONFIG_PPC_FPU
730 /* Save userspace FPU state in stack */
731 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000732
733 /*
734 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300735 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000736 */
Scott Wood8fae8452011-12-20 15:34:45 +0000737 kvmppc_load_guest_fp(vcpu);
738#endif
739
Mihai Caraman95d80a22014-08-20 16:36:23 +0300740#ifdef CONFIG_ALTIVEC
741 /* Save userspace AltiVec state in stack */
742 if (cpu_has_feature(CPU_FTR_ALTIVEC))
743 enable_kernel_altivec();
744 /*
745 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
746 * as always using the AltiVec.
747 */
748 kvmppc_load_guest_altivec(vcpu);
749#endif
750
Bharat Bhushance11e482013-07-04 12:27:47 +0530751 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530752 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600753 switch_booke_debug_regs(&debug);
754 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530755 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530756
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530757 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500758 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500759
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000760 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000761
Alexander Graf24afa37b2012-08-12 12:42:30 +0200762 /* No need for kvm_guest_exit. It's done in handle_exit.
763 We also get here with interrupts enabled. */
764
Bharat Bhushance11e482013-07-04 12:27:47 +0530765 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600766 switch_booke_debug_regs(&debug);
767 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530768
Scott Wood8fae8452011-12-20 15:34:45 +0000769#ifdef CONFIG_PPC_FPU
770 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000771#endif
772
Mihai Caraman95d80a22014-08-20 16:36:23 +0300773#ifdef CONFIG_ALTIVEC
774 kvmppc_save_guest_altivec(vcpu);
775#endif
776
Scott Wood1d1ef222011-11-08 16:11:59 -0600777out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200778 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000779 return ret;
780}
781
Scott Woodd30f6e42011-12-20 15:34:43 +0000782static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
783{
784 enum emulation_result er;
785
786 er = kvmppc_emulate_instruction(run, vcpu);
787 switch (er) {
788 case EMULATE_DONE:
789 /* don't overwrite subtypes, just account kvm_stats */
790 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
791 /* Future optimization: only reload non-volatiles if
792 * they were actually modified by emulation. */
793 return RESUME_GUEST_NV;
794
Mihai Caraman51f04722014-07-23 19:06:21 +0300795 case EMULATE_AGAIN:
796 return RESUME_GUEST;
797
Scott Woodd30f6e42011-12-20 15:34:43 +0000798 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000799 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
800 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
801 /* For debugging, encode the failing instruction and
802 * report it to userspace. */
803 run->hw.hardware_exit_reason = ~0ULL << 32;
804 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000805 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000806 return RESUME_HOST;
807
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000808 case EMULATE_EXIT_USER:
809 return RESUME_HOST;
810
Scott Woodd30f6e42011-12-20 15:34:43 +0000811 default:
812 BUG();
813 }
814}
815
Bharat Bhushance11e482013-07-04 12:27:47 +0530816static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
817{
Bharat Bhushan348ba712014-08-06 12:08:55 +0530818 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530819 u32 dbsr = vcpu->arch.dbsr;
820
Bharat Bhushan21909912014-08-06 12:08:54 +0530821 /* Clear guest dbsr (vcpu->arch.dbsr) */
822 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530823 run->debug.arch.status = 0;
824 run->debug.arch.address = vcpu->arch.pc;
825
826 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
827 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
828 } else {
829 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
830 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
831 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
832 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
833 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
834 run->debug.arch.address = dbg_reg->dac1;
835 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
836 run->debug.arch.address = dbg_reg->dac2;
837 }
838
839 return RESUME_HOST;
840}
841
Alexander Graf4e642cc2012-02-20 23:57:26 +0100842static void kvmppc_fill_pt_regs(struct pt_regs *regs)
843{
844 ulong r1, ip, msr, lr;
845
846 asm("mr %0, 1" : "=r"(r1));
847 asm("mflr %0" : "=r"(lr));
848 asm("mfmsr %0" : "=r"(msr));
849 asm("bl 1f; 1: mflr %0" : "=r"(ip));
850
851 memset(regs, 0, sizeof(*regs));
852 regs->gpr[1] = r1;
853 regs->nip = ip;
854 regs->msr = msr;
855 regs->link = lr;
856}
857
Bharat Bhushan6328e592012-06-20 05:56:53 +0000858/*
859 * For interrupts needed to be handled by host interrupt handlers,
860 * corresponding host handler are called from here in similar way
861 * (but not exact) as they are called from low level handler
862 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
863 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100864static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
865 unsigned int exit_nr)
866{
867 struct pt_regs regs;
868
869 switch (exit_nr) {
870 case BOOKE_INTERRUPT_EXTERNAL:
871 kvmppc_fill_pt_regs(&regs);
872 do_IRQ(&regs);
873 break;
874 case BOOKE_INTERRUPT_DECREMENTER:
875 kvmppc_fill_pt_regs(&regs);
876 timer_interrupt(&regs);
877 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800878#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100879 case BOOKE_INTERRUPT_DOORBELL:
880 kvmppc_fill_pt_regs(&regs);
881 doorbell_exception(&regs);
882 break;
883#endif
884 case BOOKE_INTERRUPT_MACHINE_CHECK:
885 /* FIXME */
886 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100887 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
888 kvmppc_fill_pt_regs(&regs);
889 performance_monitor_exception(&regs);
890 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000891 case BOOKE_INTERRUPT_WATCHDOG:
892 kvmppc_fill_pt_regs(&regs);
893#ifdef CONFIG_BOOKE_WDT
894 WatchdogException(&regs);
895#else
896 unknown_exception(&regs);
897#endif
898 break;
899 case BOOKE_INTERRUPT_CRITICAL:
900 unknown_exception(&regs);
901 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530902 case BOOKE_INTERRUPT_DEBUG:
903 /* Save DBSR before preemption is enabled */
904 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
905 kvmppc_clear_dbsr();
906 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100907 }
908}
909
Mihai Caramanf5250472014-07-23 19:06:22 +0300910static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
911 enum emulation_result emulated, u32 last_inst)
912{
913 switch (emulated) {
914 case EMULATE_AGAIN:
915 return RESUME_GUEST;
916
917 case EMULATE_FAIL:
918 pr_debug("%s: load instruction from guest address %lx failed\n",
919 __func__, vcpu->arch.pc);
920 /* For debugging, encode the failing instruction and
921 * report it to userspace. */
922 run->hw.hardware_exit_reason = ~0ULL << 32;
923 run->hw.hardware_exit_reason |= last_inst;
924 kvmppc_core_queue_program(vcpu, ESR_PIL);
925 return RESUME_HOST;
926
927 default:
928 BUG();
929 }
930}
931
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500932/**
933 * kvmppc_handle_exit
934 *
935 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
936 */
937int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
938 unsigned int exit_nr)
939{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500940 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200941 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500942 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300943 u32 last_inst = KVM_INST_FETCH_FAILED;
944 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500945
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600946 /* update before a new last_exit_type is rewritten */
947 kvmppc_update_timing_stats(vcpu);
948
Alexander Graf4e642cc2012-02-20 23:57:26 +0100949 /* restart interrupts if they were meant for the host */
950 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000951
Mihai Caramanf5250472014-07-23 19:06:22 +0300952 /*
953 * get last instruction before beeing preempted
954 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
955 */
956 switch (exit_nr) {
957 case BOOKE_INTERRUPT_DATA_STORAGE:
958 case BOOKE_INTERRUPT_DTLB_MISS:
959 case BOOKE_INTERRUPT_HV_PRIV:
960 emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
961 break;
962 default:
963 break;
964 }
965
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500966 local_irq_enable();
967
Alexander Graf97c95052012-08-02 15:10:00 +0200968 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200969 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200970
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500971 run->exit_reason = KVM_EXIT_UNKNOWN;
972 run->ready_for_interrupt_injection = 1;
973
Mihai Caramanf5250472014-07-23 19:06:22 +0300974 if (emulated != EMULATE_DONE) {
975 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
976 goto out;
977 }
978
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500979 switch (exit_nr) {
980 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100981 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
982 kvmppc_dump_vcpu(vcpu);
983 /* For debugging, send invalid exit reason to user space */
984 run->hw.hardware_exit_reason = ~1ULL << 32;
985 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
986 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500987 break;
988
989 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600990 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600991 r = RESUME_GUEST;
992 break;
993
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500994 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600995 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500996 r = RESUME_GUEST;
997 break;
998
Bharat Bhushan6328e592012-06-20 05:56:53 +0000999 case BOOKE_INTERRUPT_WATCHDOG:
1000 r = RESUME_GUEST;
1001 break;
1002
Scott Woodd30f6e42011-12-20 15:34:43 +00001003 case BOOKE_INTERRUPT_DOORBELL:
1004 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001005 r = RESUME_GUEST;
1006 break;
1007
1008 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1009 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1010
1011 /*
1012 * We are here because there is a pending guest interrupt
1013 * which could not be delivered as MSR_CE or MSR_ME was not
1014 * set. Once we break from here we will retry delivery.
1015 */
1016 r = RESUME_GUEST;
1017 break;
1018
1019 case BOOKE_INTERRUPT_GUEST_DBELL:
1020 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1021
1022 /*
1023 * We are here because there is a pending guest interrupt
1024 * which could not be delivered as MSR_EE was not set. Once
1025 * we break from here we will retry delivery.
1026 */
1027 r = RESUME_GUEST;
1028 break;
1029
Alexander Graf95f2e922012-02-20 22:45:12 +01001030 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1031 r = RESUME_GUEST;
1032 break;
1033
Scott Woodd30f6e42011-12-20 15:34:43 +00001034 case BOOKE_INTERRUPT_HV_PRIV:
1035 r = emulation_exit(run, vcpu);
1036 break;
1037
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001038 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +00001039 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf0268597c2012-02-20 12:33:22 +01001040 /*
1041 * Program traps generated by user-level software must
1042 * be handled by the guest kernel.
1043 *
1044 * In GS mode, hypervisor privileged instructions trap
1045 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1046 * actual program interrupts, handled by the guest.
1047 */
Liu Yudaf5e272010-02-02 19:44:35 +08001048 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001049 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001050 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001051 break;
1052 }
1053
Scott Woodd30f6e42011-12-20 15:34:43 +00001054 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001055 break;
1056
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001057 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001058 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001059 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001060 r = RESUME_GUEST;
1061 break;
1062
Scott Wood4cd35f62011-06-14 18:34:31 -05001063#ifdef CONFIG_SPE
1064 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1065 if (vcpu->arch.shared->msr & MSR_SPE)
1066 kvmppc_vcpu_enable_spe(vcpu);
1067 else
1068 kvmppc_booke_queue_irqprio(vcpu,
1069 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001070 r = RESUME_GUEST;
1071 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001072 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001073
1074 case BOOKE_INTERRUPT_SPE_FP_DATA:
1075 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1076 r = RESUME_GUEST;
1077 break;
1078
1079 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1080 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1081 r = RESUME_GUEST;
1082 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001083#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f62011-06-14 18:34:31 -05001084 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1085 /*
1086 * Guest wants SPE, but host kernel doesn't support it. Send
1087 * an "unimplemented operation" program check to the guest.
1088 */
1089 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1090 r = RESUME_GUEST;
1091 break;
1092
1093 /*
1094 * These really should never happen without CONFIG_SPE,
1095 * as we should never enable the real MSR[SPE] in the guest.
1096 */
1097 case BOOKE_INTERRUPT_SPE_FP_DATA:
1098 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1099 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1100 __func__, exit_nr, vcpu->arch.pc);
1101 run->hw.hardware_exit_reason = exit_nr;
1102 r = RESUME_HOST;
1103 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001104#endif /* CONFIG_SPE_POSSIBLE */
1105
1106/*
1107 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1108 * see kvmppc_core_check_processor_compat().
1109 */
1110#ifdef CONFIG_ALTIVEC
1111 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1112 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1113 r = RESUME_GUEST;
1114 break;
1115
1116 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1117 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1118 r = RESUME_GUEST;
1119 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001120#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001121
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001122 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001123 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1124 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001125 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001126 r = RESUME_GUEST;
1127 break;
1128
1129 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001130 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001131 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001132 r = RESUME_GUEST;
1133 break;
1134
Alexander Graf011da892013-01-31 14:17:38 +01001135 case BOOKE_INTERRUPT_ALIGNMENT:
1136 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1137 vcpu->arch.fault_esr);
1138 r = RESUME_GUEST;
1139 break;
1140
Scott Woodd30f6e42011-12-20 15:34:43 +00001141#ifdef CONFIG_KVM_BOOKE_HV
1142 case BOOKE_INTERRUPT_HV_SYSCALL:
1143 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1144 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1145 } else {
1146 /*
1147 * hcall from guest userspace -- send privileged
1148 * instruction program check.
1149 */
1150 kvmppc_core_queue_program(vcpu, ESR_PPR);
1151 }
1152
1153 r = RESUME_GUEST;
1154 break;
1155#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001156 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001157 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1158 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1159 /* KVM PV hypercalls */
1160 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1161 r = RESUME_GUEST;
1162 } else {
1163 /* Guest syscalls */
1164 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1165 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001166 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001167 r = RESUME_GUEST;
1168 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001169#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001170
1171 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001172 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001173 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001174 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001175 gfn_t gfn;
1176
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001177#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001178 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1179 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1180 kvmppc_map_magic(vcpu);
1181 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1182 r = RESUME_GUEST;
1183
1184 break;
1185 }
1186#endif
1187
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001188 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001189 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001190 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001191 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001192 kvmppc_core_queue_dtlb_miss(vcpu,
1193 vcpu->arch.fault_dear,
1194 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001195 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001196 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001197 r = RESUME_GUEST;
1198 break;
1199 }
1200
Scott Woodf1e89022013-06-06 19:16:31 -05001201 idx = srcu_read_lock(&vcpu->kvm->srcu);
1202
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001203 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001204 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001205
1206 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1207 /* The guest TLB had a mapping, but the shadow TLB
1208 * didn't, and it is RAM. This could be because:
1209 * a) the entry is mapping the host kernel, or
1210 * b) the guest used a large mapping which we're faking
1211 * Either way, we need to satisfy the fault without
1212 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001213 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001214 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001215 r = RESUME_GUEST;
1216 } else {
1217 /* Guest has mapped and accessed a page which is not
1218 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001219 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001220 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001221 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001222 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001223 }
1224
Scott Woodf1e89022013-06-06 19:16:31 -05001225 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001226 break;
1227 }
1228
1229 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001230 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001231 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001232 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001233 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001234
1235 r = RESUME_GUEST;
1236
1237 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001238 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001239 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001240 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001241 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001242 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001243 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001244 break;
1245 }
1246
Hollis Blanchard7b701592008-12-02 15:51:58 -06001247 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001248
Scott Woodf1e89022013-06-06 19:16:31 -05001249 idx = srcu_read_lock(&vcpu->kvm->srcu);
1250
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001251 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001252 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001253
1254 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1255 /* The guest TLB had a mapping, but the shadow TLB
1256 * didn't. This could be because:
1257 * a) the entry is mapping the host kernel, or
1258 * b) the guest used a large mapping which we're faking
1259 * Either way, we need to satisfy the fault without
1260 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001261 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001262 } else {
1263 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001264 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001265 }
1266
Scott Woodf1e89022013-06-06 19:16:31 -05001267 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001268 break;
1269 }
1270
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001271 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301272 r = kvmppc_handle_debug(run, vcpu);
1273 if (r == RESUME_HOST)
1274 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001275 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001276 break;
1277 }
1278
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001279 default:
1280 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1281 BUG();
1282 }
1283
Mihai Caramanf5250472014-07-23 19:06:22 +03001284out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001285 /*
1286 * To avoid clobbering exit_reason, only check for signals if we
1287 * aren't already exiting to userspace for some other reason.
1288 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001289 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001290 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001291 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001292 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001293 else {
1294 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001295 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001296 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001297 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001298 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001299 }
1300
1301 return r;
1302}
1303
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001304static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1305{
1306 u32 old_tsr = vcpu->arch.tsr;
1307
1308 vcpu->arch.tsr = new_tsr;
1309
1310 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1311 arm_next_watchdog(vcpu);
1312
1313 update_timer_ints(vcpu);
1314}
1315
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001316/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1317int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1318{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001319 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001320 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001321
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001322 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001323 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001324 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001325 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001326
Scott Woodd30f6e42011-12-20 15:34:43 +00001327#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301328 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001329 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001330 vcpu->arch.shared->msr = 0;
1331#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001332
Hollis Blanchard082decf2010-08-07 10:33:56 -07001333 /* Eye-catching numbers so we know if the guest takes an interrupt
1334 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001335 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001336 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1337 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001338
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001339 kvmppc_init_timing_stats(vcpu);
1340
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001341 r = kvmppc_core_vcpu_setup(vcpu);
1342 kvmppc_sanity_check(vcpu);
1343 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001344}
1345
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001346int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1347{
1348 /* setup watchdog timer once */
1349 spin_lock_init(&vcpu->arch.wdt_lock);
1350 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1351 (unsigned long)vcpu);
1352
1353 return 0;
1354}
1355
1356void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1357{
1358 del_timer_sync(&vcpu->arch.wdt_timer);
1359}
1360
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001361int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1362{
1363 int i;
1364
1365 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001366 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001367 regs->ctr = vcpu->arch.ctr;
1368 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001369 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001370 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301371 regs->srr0 = kvmppc_get_srr0(vcpu);
1372 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001373 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301374 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1375 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1376 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1377 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1378 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1379 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1380 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1381 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001382
1383 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001384 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001385
1386 return 0;
1387}
1388
1389int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1390{
1391 int i;
1392
1393 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001394 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001395 vcpu->arch.ctr = regs->ctr;
1396 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001397 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001398 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301399 kvmppc_set_srr0(vcpu, regs->srr0);
1400 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001401 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301402 kvmppc_set_sprg0(vcpu, regs->sprg0);
1403 kvmppc_set_sprg1(vcpu, regs->sprg1);
1404 kvmppc_set_sprg2(vcpu, regs->sprg2);
1405 kvmppc_set_sprg3(vcpu, regs->sprg3);
1406 kvmppc_set_sprg4(vcpu, regs->sprg4);
1407 kvmppc_set_sprg5(vcpu, regs->sprg5);
1408 kvmppc_set_sprg6(vcpu, regs->sprg6);
1409 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001410
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001411 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1412 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001413
1414 return 0;
1415}
1416
Scott Wood5ce941e2011-04-27 17:24:21 -05001417static void get_sregs_base(struct kvm_vcpu *vcpu,
1418 struct kvm_sregs *sregs)
1419{
1420 u64 tb = get_tb();
1421
1422 sregs->u.e.features |= KVM_SREGS_E_BASE;
1423
1424 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1425 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1426 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301427 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301428 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001429 sregs->u.e.tsr = vcpu->arch.tsr;
1430 sregs->u.e.tcr = vcpu->arch.tcr;
1431 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1432 sregs->u.e.tb = tb;
1433 sregs->u.e.vrsave = vcpu->arch.vrsave;
1434}
1435
1436static int set_sregs_base(struct kvm_vcpu *vcpu,
1437 struct kvm_sregs *sregs)
1438{
1439 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1440 return 0;
1441
1442 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1443 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1444 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301445 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301446 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001447 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001448 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001449
Scott Wooddfd4d472011-11-17 12:39:59 +00001450 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001451 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001452 kvmppc_emulate_dec(vcpu);
1453 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001454
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001455 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1456 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001457
1458 return 0;
1459}
1460
1461static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1462 struct kvm_sregs *sregs)
1463{
1464 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1465
Scott Wood841741f2011-09-02 17:39:37 -05001466 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001467 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1468 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1469 sregs->u.e.decar = vcpu->arch.decar;
1470 sregs->u.e.ivpr = vcpu->arch.ivpr;
1471}
1472
1473static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1474 struct kvm_sregs *sregs)
1475{
1476 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1477 return 0;
1478
Scott Wood841741f2011-09-02 17:39:37 -05001479 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001480 return -EINVAL;
1481
1482 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1483 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1484 vcpu->arch.decar = sregs->u.e.decar;
1485 vcpu->arch.ivpr = sregs->u.e.ivpr;
1486
1487 return 0;
1488}
1489
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301490int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001491{
1492 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1493
1494 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1495 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1496 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1497 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1498 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1499 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1500 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1501 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1502 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1503 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1504 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1505 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1506 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1507 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1508 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1509 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301510 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001511}
1512
1513int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1514{
1515 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1516 return 0;
1517
1518 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1519 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1520 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1521 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1522 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1523 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1524 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1525 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1526 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1527 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1528 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1529 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1530 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1531 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1532 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1533 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1534
1535 return 0;
1536}
1537
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001538int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1539 struct kvm_sregs *sregs)
1540{
Scott Wood5ce941e2011-04-27 17:24:21 -05001541 sregs->pvr = vcpu->arch.pvr;
1542
1543 get_sregs_base(vcpu, sregs);
1544 get_sregs_arch206(vcpu, sregs);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301545 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001546}
1547
1548int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1549 struct kvm_sregs *sregs)
1550{
Scott Wood5ce941e2011-04-27 17:24:21 -05001551 int ret;
1552
1553 if (vcpu->arch.pvr != sregs->pvr)
1554 return -EINVAL;
1555
1556 ret = set_sregs_base(vcpu, sregs);
1557 if (ret < 0)
1558 return ret;
1559
1560 ret = set_sregs_arch206(vcpu, sregs);
1561 if (ret < 0)
1562 return ret;
1563
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301564 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001565}
1566
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001567int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1568 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001569{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001570 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001571
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001572 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001573 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001574 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001575 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301576 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001577 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301578 break;
1579#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1580 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001581 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301582 break;
1583 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001584 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301585 break;
1586#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001587 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001588 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301589 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001590 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001591 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301592 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001593 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301594 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001595 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001596 break;
1597 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001598#if defined(CONFIG_64BIT)
1599 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001600 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001601 break;
1602#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001603 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001604 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001605 break;
1606 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001607 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001608 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001609 case KVM_REG_PPC_DEBUG_INST:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001610 *val = get_reg_val(id, KVMPPC_INST_EHPRIV_DEBUG);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001611 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001612 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001613 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001614 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001615 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001616 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001617 break;
1618 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001619
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001620 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001621}
1622
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001623int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1624 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001625{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001626 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001627
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001628 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001629 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001630 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001631 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301632 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001633 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301634 break;
1635#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1636 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001637 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301638 break;
1639 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001640 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301641 break;
1642#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001643 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001644 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301645 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001646 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001647 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301648 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001649 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001650 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001651 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001652 break;
1653 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001654#if defined(CONFIG_64BIT)
1655 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001656 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001657 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001658 break;
1659 }
1660#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001661 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001662 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001663 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1664 break;
1665 }
1666 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001667 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001668 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1669 break;
1670 }
1671 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001672 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001673 kvmppc_set_tsr(vcpu, tsr);
1674 break;
1675 }
1676 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001677 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001678 kvmppc_set_tcr(vcpu, tcr);
1679 break;
1680 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001681 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001682 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001683 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001684 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001685 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001686 break;
1687 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001688
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001689 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001690}
1691
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001692int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1693{
1694 return -ENOTSUPP;
1695}
1696
1697int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1698{
1699 return -ENOTSUPP;
1700}
1701
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001702int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1703 struct kvm_translation *tr)
1704{
Avi Kivity98001d82010-05-13 11:05:49 +03001705 int r;
1706
Avi Kivity98001d82010-05-13 11:05:49 +03001707 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001708 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001709}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001710
Alexander Graf4e755752009-10-30 05:47:01 +00001711int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1712{
1713 return -ENOTSUPP;
1714}
1715
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301716void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001717 struct kvm_memory_slot *dont)
1718{
1719}
1720
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301721int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001722 unsigned long npages)
1723{
1724 return 0;
1725}
1726
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001727int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001728 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001729 struct kvm_userspace_memory_region *mem)
1730{
1731 return 0;
1732}
1733
1734void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001735 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001736 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001737{
1738}
1739
1740void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001741{
1742}
1743
Mihai Caraman38f98822012-10-11 06:13:27 +00001744void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1745{
1746#if defined(CONFIG_64BIT)
1747 vcpu->arch.epcr = new_epcr;
1748#ifdef CONFIG_KVM_BOOKE_HV
1749 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1750 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1751 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1752#endif
1753#endif
1754}
1755
Scott Wooddfd4d472011-11-17 12:39:59 +00001756void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1757{
1758 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001759 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001760 update_timer_ints(vcpu);
1761}
1762
1763void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1764{
1765 set_bits(tsr_bits, &vcpu->arch.tsr);
1766 smp_wmb();
1767 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1768 kvm_vcpu_kick(vcpu);
1769}
1770
1771void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1772{
1773 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001774
1775 /*
1776 * We may have stopped the watchdog due to
1777 * being stuck on final expiration.
1778 */
1779 if (tsr_bits & (TSR_ENW | TSR_WIS))
1780 arm_next_watchdog(vcpu);
1781
Scott Wooddfd4d472011-11-17 12:39:59 +00001782 update_timer_ints(vcpu);
1783}
1784
1785void kvmppc_decrementer_func(unsigned long data)
1786{
1787 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1788
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001789 if (vcpu->arch.tcr & TCR_ARE) {
1790 vcpu->arch.dec = vcpu->arch.decar;
1791 kvmppc_emulate_dec(vcpu);
1792 }
1793
Scott Wooddfd4d472011-11-17 12:39:59 +00001794 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1795}
1796
Bharat Bhushance11e482013-07-04 12:27:47 +05301797static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1798 uint64_t addr, int index)
1799{
1800 switch (index) {
1801 case 0:
1802 dbg_reg->dbcr0 |= DBCR0_IAC1;
1803 dbg_reg->iac1 = addr;
1804 break;
1805 case 1:
1806 dbg_reg->dbcr0 |= DBCR0_IAC2;
1807 dbg_reg->iac2 = addr;
1808 break;
1809#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1810 case 2:
1811 dbg_reg->dbcr0 |= DBCR0_IAC3;
1812 dbg_reg->iac3 = addr;
1813 break;
1814 case 3:
1815 dbg_reg->dbcr0 |= DBCR0_IAC4;
1816 dbg_reg->iac4 = addr;
1817 break;
1818#endif
1819 default:
1820 return -EINVAL;
1821 }
1822
1823 dbg_reg->dbcr0 |= DBCR0_IDM;
1824 return 0;
1825}
1826
1827static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1828 int type, int index)
1829{
1830 switch (index) {
1831 case 0:
1832 if (type & KVMPPC_DEBUG_WATCH_READ)
1833 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1834 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1835 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1836 dbg_reg->dac1 = addr;
1837 break;
1838 case 1:
1839 if (type & KVMPPC_DEBUG_WATCH_READ)
1840 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1841 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1842 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1843 dbg_reg->dac2 = addr;
1844 break;
1845 default:
1846 return -EINVAL;
1847 }
1848
1849 dbg_reg->dbcr0 |= DBCR0_IDM;
1850 return 0;
1851}
1852void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1853{
1854 /* XXX: Add similar MSR protection for BookE-PR */
1855#ifdef CONFIG_KVM_BOOKE_HV
1856 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1857 if (set) {
1858 if (prot_bitmap & MSR_UCLE)
1859 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1860 if (prot_bitmap & MSR_DE)
1861 vcpu->arch.shadow_msrp |= MSRP_DEP;
1862 if (prot_bitmap & MSR_PMM)
1863 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1864 } else {
1865 if (prot_bitmap & MSR_UCLE)
1866 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1867 if (prot_bitmap & MSR_DE)
1868 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1869 if (prot_bitmap & MSR_PMM)
1870 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1871 }
1872#endif
1873}
1874
Alexander Graf7d15c062014-06-20 13:52:36 +02001875int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1876 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1877{
1878 int gtlb_index;
1879 gpa_t gpaddr;
1880
1881#ifdef CONFIG_KVM_E500V2
1882 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1883 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1884 pte->eaddr = eaddr;
1885 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1886 (eaddr & ~PAGE_MASK);
1887 pte->vpage = eaddr >> PAGE_SHIFT;
1888 pte->may_read = true;
1889 pte->may_write = true;
1890 pte->may_execute = true;
1891
1892 return 0;
1893 }
1894#endif
1895
1896 /* Check the guest TLB. */
1897 switch (xlid) {
1898 case XLATE_INST:
1899 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1900 break;
1901 case XLATE_DATA:
1902 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1903 break;
1904 default:
1905 BUG();
1906 }
1907
1908 /* Do we have a TLB entry at all? */
1909 if (gtlb_index < 0)
1910 return -ENOENT;
1911
1912 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1913
1914 pte->eaddr = eaddr;
1915 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1916 pte->vpage = eaddr >> PAGE_SHIFT;
1917
1918 /* XXX read permissions from the guest TLB */
1919 pte->may_read = true;
1920 pte->may_write = true;
1921 pte->may_execute = true;
1922
1923 return 0;
1924}
1925
Bharat Bhushance11e482013-07-04 12:27:47 +05301926int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1927 struct kvm_guest_debug *dbg)
1928{
1929 struct debug_reg *dbg_reg;
1930 int n, b = 0, w = 0;
1931
1932 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05301933 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301934 vcpu->guest_debug = 0;
1935 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1936 return 0;
1937 }
1938
1939 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1940 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05301941 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301942
1943 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05301944 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05301945
1946 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05301947 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05301948
1949#ifdef CONFIG_KVM_BOOKE_HV
1950 /*
1951 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1952 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1953 */
1954 dbg_reg->dbcr1 = 0;
1955 dbg_reg->dbcr2 = 0;
1956#else
1957 /*
1958 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1959 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1960 * is set.
1961 */
1962 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1963 DBCR1_IAC4US;
1964 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1965#endif
1966
1967 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1968 return 0;
1969
1970 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1971 uint64_t addr = dbg->arch.bp[n].addr;
1972 uint32_t type = dbg->arch.bp[n].type;
1973
1974 if (type == KVMPPC_DEBUG_NONE)
1975 continue;
1976
1977 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1978 KVMPPC_DEBUG_WATCH_WRITE |
1979 KVMPPC_DEBUG_BREAKPOINT))
1980 return -EINVAL;
1981
1982 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1983 /* Setting H/W breakpoint */
1984 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1985 return -EINVAL;
1986 } else {
1987 /* Setting H/W watchpoint */
1988 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1989 type, w++))
1990 return -EINVAL;
1991 }
1992 }
1993
1994 return 0;
1995}
1996
Scott Wood94fa9d92011-12-20 15:34:22 +00001997void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1998{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001999 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002000 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002001}
2002
2003void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2004{
Scott Woodd30f6e42011-12-20 15:34:43 +00002005 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002006 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302007
2008 /* Clear pending debug event in DBSR */
2009 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002010}
2011
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302012void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2013{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302014 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302015}
2016
2017int kvmppc_core_init_vm(struct kvm *kvm)
2018{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302019 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302020}
2021
2022struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2023{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302024 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302025}
2026
2027void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2028{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302029 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302030}
2031
2032void kvmppc_core_destroy_vm(struct kvm *kvm)
2033{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302034 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302035}
2036
2037void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2038{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302039 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302040}
2041
2042void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2043{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302044 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002045}
2046
2047int __init kvmppc_booke_init(void)
2048{
Scott Woodd30f6e42011-12-20 15:34:43 +00002049#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002050 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002051 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002052 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002053 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002054 int i;
2055
2056 /* We install our own exception handlers by hijacking IVPR. IVPR must
2057 * be 16-bit aligned, so we need a 64KB allocation. */
2058 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2059 VCPU_SIZE_ORDER);
2060 if (!kvmppc_booke_handlers)
2061 return -ENOMEM;
2062
2063 /* XXX make sure our handlers are smaller than Linux's */
2064
2065 /* Copy our interrupt handlers to match host IVORs. That way we don't
2066 * have to swap the IVORs on every guest/host transition. */
2067 ivor[0] = mfspr(SPRN_IVOR0);
2068 ivor[1] = mfspr(SPRN_IVOR1);
2069 ivor[2] = mfspr(SPRN_IVOR2);
2070 ivor[3] = mfspr(SPRN_IVOR3);
2071 ivor[4] = mfspr(SPRN_IVOR4);
2072 ivor[5] = mfspr(SPRN_IVOR5);
2073 ivor[6] = mfspr(SPRN_IVOR6);
2074 ivor[7] = mfspr(SPRN_IVOR7);
2075 ivor[8] = mfspr(SPRN_IVOR8);
2076 ivor[9] = mfspr(SPRN_IVOR9);
2077 ivor[10] = mfspr(SPRN_IVOR10);
2078 ivor[11] = mfspr(SPRN_IVOR11);
2079 ivor[12] = mfspr(SPRN_IVOR12);
2080 ivor[13] = mfspr(SPRN_IVOR13);
2081 ivor[14] = mfspr(SPRN_IVOR14);
2082 ivor[15] = mfspr(SPRN_IVOR15);
2083
2084 for (i = 0; i < 16; i++) {
2085 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002086 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002087
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002088 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002089 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002090 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002091 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002092
2093 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2094 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2095 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002096#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002097 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002098}
2099
Hollis Blancharddb93f572008-11-05 09:36:18 -06002100void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002101{
2102 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2103 kvm_exit();
2104}