blob: b4e81e6be11af69b49ecfde3968ffbe1b6247ba1 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053043
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060047unsigned long kvmppc_booke_handlers;
48
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "mmio", VCPU_STAT(mmio_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050055 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
57 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
58 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
59 { "sysc", VCPU_STAT(syscall_exits) },
60 { "isi", VCPU_STAT(isi_exits) },
61 { "dsi", VCPU_STAT(dsi_exits) },
62 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050065 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000066 { "doorbell", VCPU_STAT(dbell_exits) },
67 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020068 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050069 { NULL }
70};
71
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050072/* TODO: use vcpu_printf() */
73void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
74{
75 int i;
76
Alexander Graf666e7252010-07-29 14:47:43 +020077 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060078 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020079 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
80 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050081
82 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
83
84 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060085 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010086 kvmppc_get_gpr(vcpu, i),
87 kvmppc_get_gpr(vcpu, i+1),
88 kvmppc_get_gpr(vcpu, i+2),
89 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050090 }
91}
92
Scott Wood4cd35f62011-06-14 18:34:31 -050093#ifdef CONFIG_SPE
94void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
95{
96 preempt_disable();
97 enable_kernel_spe();
98 kvmppc_save_guest_spe(vcpu);
99 vcpu->arch.shadow_msr &= ~MSR_SPE;
100 preempt_enable();
101}
102
103static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
104{
105 preempt_disable();
106 enable_kernel_spe();
107 kvmppc_load_guest_spe(vcpu);
108 vcpu->arch.shadow_msr |= MSR_SPE;
109 preempt_enable();
110}
111
112static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
113{
114 if (vcpu->arch.shared->msr & MSR_SPE) {
115 if (!(vcpu->arch.shadow_msr & MSR_SPE))
116 kvmppc_vcpu_enable_spe(vcpu);
117 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
118 kvmppc_vcpu_disable_spe(vcpu);
119 }
120}
121#else
122static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
123{
124}
125#endif
126
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300127/*
128 * Load up guest vcpu FP state if it's needed.
129 * It also set the MSR_FP in thread so that host know
130 * we're holding FPU, and then host can help to save
131 * guest vcpu FP state if other threads require to use FPU.
132 * This simulates an FP unavailable fault.
133 *
134 * It requires to be called with preemption disabled.
135 */
136static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
137{
138#ifdef CONFIG_PPC_FPU
139 if (!(current->thread.regs->msr & MSR_FP)) {
140 enable_kernel_fp();
141 load_fp_state(&vcpu->arch.fp);
142 current->thread.fp_save_area = &vcpu->arch.fp;
143 current->thread.regs->msr |= MSR_FP;
144 }
145#endif
146}
147
148/*
149 * Save guest vcpu FP state into thread.
150 * It requires to be called with preemption disabled.
151 */
152static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
153{
154#ifdef CONFIG_PPC_FPU
155 if (current->thread.regs->msr & MSR_FP)
156 giveup_fpu(current);
157 current->thread.fp_save_area = NULL;
158#endif
159}
160
Alexander Graf7a08c272012-08-16 13:10:16 +0200161static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
162{
163#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
164 /* We always treat the FP bit as enabled from the host
165 perspective, so only need to adjust the shadow MSR */
166 vcpu->arch.shadow_msr &= ~MSR_FP;
167 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
168#endif
169}
170
Mihai Caraman95d80a22014-08-20 16:36:23 +0300171/*
172 * Simulate AltiVec unavailable fault to load guest state
173 * from thread to AltiVec unit.
174 * It requires to be called with preemption disabled.
175 */
176static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
177{
178#ifdef CONFIG_ALTIVEC
179 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
180 if (!(current->thread.regs->msr & MSR_VEC)) {
181 enable_kernel_altivec();
182 load_vr_state(&vcpu->arch.vr);
183 current->thread.vr_save_area = &vcpu->arch.vr;
184 current->thread.regs->msr |= MSR_VEC;
185 }
186 }
187#endif
188}
189
190/*
191 * Save guest vcpu AltiVec state into thread.
192 * It requires to be called with preemption disabled.
193 */
194static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
195{
196#ifdef CONFIG_ALTIVEC
197 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
198 if (current->thread.regs->msr & MSR_VEC)
199 giveup_altivec(current);
200 current->thread.vr_save_area = NULL;
201 }
202#endif
203}
204
Bharat Bhushance11e482013-07-04 12:27:47 +0530205static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
206{
207 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
208#ifndef CONFIG_KVM_BOOKE_HV
209 vcpu->arch.shadow_msr &= ~MSR_DE;
210 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
211#endif
212
213 /* Force enable debug interrupts when user space wants to debug */
214 if (vcpu->guest_debug) {
215#ifdef CONFIG_KVM_BOOKE_HV
216 /*
217 * Since there is no shadow MSR, sync MSR_DE into the guest
218 * visible MSR.
219 */
220 vcpu->arch.shared->msr |= MSR_DE;
221#else
222 vcpu->arch.shadow_msr |= MSR_DE;
223 vcpu->arch.shared->msr &= ~MSR_DE;
224#endif
225 }
226}
227
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500228/*
229 * Helper function for "full" MSR writes. No need to call this if only
230 * EE/CE/ME/DE/RI are changing.
231 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500232void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
233{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500234 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500235
Scott Woodd30f6e42011-12-20 15:34:43 +0000236#ifdef CONFIG_KVM_BOOKE_HV
237 new_msr |= MSR_GS;
238#endif
239
Scott Wood4cd35f62011-06-14 18:34:31 -0500240 vcpu->arch.shared->msr = new_msr;
241
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500242 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500243 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200244 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530245 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500246}
247
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600248static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
249 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600250{
Alexander Graf63460462012-08-08 00:44:52 +0200251 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600252 set_bit(priority, &vcpu->arch.pending_exceptions);
253}
254
Alexander Graf8de12012014-06-18 21:56:55 +0200255void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
256 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600257{
Liu Yudaf5e272010-02-02 19:44:35 +0800258 vcpu->arch.queued_dear = dear_flags;
259 vcpu->arch.queued_esr = esr_flags;
260 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
261}
262
Alexander Graf8de12012014-06-18 21:56:55 +0200263void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
264 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800265{
266 vcpu->arch.queued_dear = dear_flags;
267 vcpu->arch.queued_esr = esr_flags;
268 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
269}
270
Alexander Graf8de12012014-06-18 21:56:55 +0200271void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
272{
273 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
274}
275
276void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800277{
278 vcpu->arch.queued_esr = esr_flags;
279 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
280}
281
Alexander Graf011da892013-01-31 14:17:38 +0100282static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
283 ulong esr_flags)
284{
285 vcpu->arch.queued_dear = dear_flags;
286 vcpu->arch.queued_esr = esr_flags;
287 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
288}
289
Liu Yudaf5e272010-02-02 19:44:35 +0800290void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
291{
292 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600293 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600294}
295
296void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
297{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600298 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600299}
300
301int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
302{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600303 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600304}
305
Alexander Graf7706664d2009-12-21 20:21:24 +0100306void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
307{
308 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
309}
310
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600311void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
312 struct kvm_interrupt *irq)
313{
Alexander Grafc5335f12010-08-30 14:03:24 +0200314 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
315
316 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
317 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
318
319 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600320}
321
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000322void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200323{
324 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200325 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200326}
327
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000328static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
329{
330 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
331}
332
333static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
334{
335 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
336}
337
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530338void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
339{
340 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
341}
342
343void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
344{
345 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
346}
347
Scott Woodd30f6e42011-12-20 15:34:43 +0000348static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
349{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530350 kvmppc_set_srr0(vcpu, srr0);
351 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000352}
353
354static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
355{
356 vcpu->arch.csrr0 = srr0;
357 vcpu->arch.csrr1 = srr1;
358}
359
360static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
361{
362 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
363 vcpu->arch.dsrr0 = srr0;
364 vcpu->arch.dsrr1 = srr1;
365 } else {
366 set_guest_csrr(vcpu, srr0, srr1);
367 }
368}
369
370static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
371{
372 vcpu->arch.mcsrr0 = srr0;
373 vcpu->arch.mcsrr1 = srr1;
374}
375
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600376/* Deliver the interrupt of the corresponding priority, if possible. */
377static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
378 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500379{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600380 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000381 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100382 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200383 ulong crit_raw = vcpu->arch.shared->critical;
384 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
385 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200386 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000387 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000388 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200389
390 /* Truncate crit indicators in 32 bit mode */
391 if (!(vcpu->arch.shared->msr & MSR_SF)) {
392 crit_raw &= 0xffffffff;
393 crit_r1 &= 0xffffffff;
394 }
395
396 /* Critical section when crit == r1 */
397 crit = (crit_raw == crit_r1);
398 /* ... and we're in supervisor mode */
399 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500400
Alexander Grafc5335f12010-08-30 14:03:24 +0200401 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
402 priority = BOOKE_IRQPRIO_EXTERNAL;
403 keep_irq = true;
404 }
405
Scott Wood5df554ad2013-04-12 14:08:46 +0000406 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100407 update_epr = true;
408
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600409 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600410 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800411 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100412 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800413 update_dear = true;
414 /* fall through */
415 case BOOKE_IRQPRIO_INST_STORAGE:
416 case BOOKE_IRQPRIO_PROGRAM:
417 update_esr = true;
418 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600419 case BOOKE_IRQPRIO_ITLB_MISS:
420 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600421 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300422#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600423 case BOOKE_IRQPRIO_SPE_UNAVAIL:
424 case BOOKE_IRQPRIO_SPE_FP_DATA:
425 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300426#endif
427#ifdef CONFIG_ALTIVEC
428 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
429 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
430#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600431 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600432 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000433 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000434 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500435 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000436 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600437 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000438 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200439 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000440 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000441 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000442 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500443 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600444 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200445 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000446 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000447 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500448 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600449 case BOOKE_IRQPRIO_DECREMENTER:
450 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000451 keep_irq = true;
452 /* fall through */
453 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000454 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200455 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200456 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000457 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000458 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500459 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600460 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200461 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000462 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000463 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530464 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
465 int_class = INT_CLASS_DBG;
466 else
467 int_class = INT_CLASS_CRIT;
468
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500469 break;
470 }
471
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600472 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000473 switch (int_class) {
474 case INT_CLASS_NONCRIT:
475 set_guest_srr(vcpu, vcpu->arch.pc,
476 vcpu->arch.shared->msr);
477 break;
478 case INT_CLASS_CRIT:
479 set_guest_csrr(vcpu, vcpu->arch.pc,
480 vcpu->arch.shared->msr);
481 break;
482 case INT_CLASS_DBG:
483 set_guest_dsrr(vcpu, vcpu->arch.pc,
484 vcpu->arch.shared->msr);
485 break;
486 case INT_CLASS_MC:
487 set_guest_mcsrr(vcpu, vcpu->arch.pc,
488 vcpu->arch.shared->msr);
489 break;
490 }
491
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600492 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800493 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530494 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800495 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530496 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000497 if (update_epr == true) {
498 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
499 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000500 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
501 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
502 kvmppc_mpic_set_epr(vcpu);
503 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000504 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000505
506 new_msr &= msr_mask;
507#if defined(CONFIG_64BIT)
508 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
509 new_msr |= MSR_CM;
510#endif
511 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600512
Alexander Grafc5335f12010-08-30 14:03:24 +0200513 if (!keep_irq)
514 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600515 }
516
Scott Woodd30f6e42011-12-20 15:34:43 +0000517#ifdef CONFIG_KVM_BOOKE_HV
518 /*
519 * If an interrupt is pending but masked, raise a guest doorbell
520 * so that we are notified when the guest enables the relevant
521 * MSR bit.
522 */
523 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
524 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
525 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
526 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
527 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
528 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
529#endif
530
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600531 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500532}
533
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000534/*
535 * Return the number of jiffies until the next timeout. If the timeout is
536 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
537 * because the larger value can break the timer APIs.
538 */
539static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
540{
541 u64 tb, wdt_tb, wdt_ticks = 0;
542 u64 nr_jiffies = 0;
543 u32 period = TCR_GET_WP(vcpu->arch.tcr);
544
545 wdt_tb = 1ULL << (63 - period);
546 tb = get_tb();
547 /*
548 * The watchdog timeout will hapeen when TB bit corresponding
549 * to watchdog will toggle from 0 to 1.
550 */
551 if (tb & wdt_tb)
552 wdt_ticks = wdt_tb;
553
554 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
555
556 /* Convert timebase ticks to jiffies */
557 nr_jiffies = wdt_ticks;
558
559 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
560 nr_jiffies++;
561
562 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
563}
564
565static void arm_next_watchdog(struct kvm_vcpu *vcpu)
566{
567 unsigned long nr_jiffies;
568 unsigned long flags;
569
570 /*
571 * If TSR_ENW and TSR_WIS are not set then no need to exit to
572 * userspace, so clear the KVM_REQ_WATCHDOG request.
573 */
574 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
575 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
576
577 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
578 nr_jiffies = watchdog_next_timeout(vcpu);
579 /*
580 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
581 * then do not run the watchdog timer as this can break timer APIs.
582 */
583 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
584 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
585 else
586 del_timer(&vcpu->arch.wdt_timer);
587 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
588}
589
590void kvmppc_watchdog_func(unsigned long data)
591{
592 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
593 u32 tsr, new_tsr;
594 int final;
595
596 do {
597 new_tsr = tsr = vcpu->arch.tsr;
598 final = 0;
599
600 /* Time out event */
601 if (tsr & TSR_ENW) {
602 if (tsr & TSR_WIS)
603 final = 1;
604 else
605 new_tsr = tsr | TSR_WIS;
606 } else {
607 new_tsr = tsr | TSR_ENW;
608 }
609 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
610
611 if (new_tsr & TSR_WIS) {
612 smp_wmb();
613 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
614 kvm_vcpu_kick(vcpu);
615 }
616
617 /*
618 * If this is final watchdog expiry and some action is required
619 * then exit to userspace.
620 */
621 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
622 vcpu->arch.watchdog_enabled) {
623 smp_wmb();
624 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
625 kvm_vcpu_kick(vcpu);
626 }
627
628 /*
629 * Stop running the watchdog timer after final expiration to
630 * prevent the host from being flooded with timers if the
631 * guest sets a short period.
632 * Timers will resume when TSR/TCR is updated next time.
633 */
634 if (!final)
635 arm_next_watchdog(vcpu);
636}
637
Scott Wooddfd4d472011-11-17 12:39:59 +0000638static void update_timer_ints(struct kvm_vcpu *vcpu)
639{
640 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
641 kvmppc_core_queue_dec(vcpu);
642 else
643 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000644
645 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
646 kvmppc_core_queue_watchdog(vcpu);
647 else
648 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000649}
650
Scott Woodc59a6a32011-11-08 18:23:25 -0600651static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500652{
653 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500654 unsigned int priority;
655
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600656 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000657 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600658 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500659 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500660
661 priority = find_next_bit(pending,
662 BITS_PER_BYTE * sizeof(*pending),
663 priority + 1);
664 }
Alexander Graf90bba352010-07-29 14:47:51 +0200665
666 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600667 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500668}
669
Scott Woodc59a6a32011-11-08 18:23:25 -0600670/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000671int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600672{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000673 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600674 WARN_ON_ONCE(!irqs_disabled());
675
676 kvmppc_core_check_exceptions(vcpu);
677
Alexander Grafb8c649a2012-12-20 04:52:39 +0000678 if (vcpu->requests) {
679 /* Exception delivery raised request; start over */
680 return 1;
681 }
682
Scott Woodc59a6a32011-11-08 18:23:25 -0600683 if (vcpu->arch.shared->msr & MSR_WE) {
684 local_irq_enable();
685 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100686 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Wood6c85f522014-01-09 19:18:40 -0600687 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600688
689 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000690 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600691 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000692
693 return r;
694}
695
Alexander Graf7c973a22012-08-13 12:50:35 +0200696int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200697{
Alexander Graf7c973a22012-08-13 12:50:35 +0200698 int r = 1; /* Indicate we want to get back into the guest */
699
Alexander Graf2d8185d2012-08-10 12:31:12 +0200700 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
701 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200702#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200703 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
704 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200705#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200706
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000707 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
708 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
709 r = 0;
710 }
711
Alexander Graf1c810632013-01-04 18:12:48 +0100712 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
713 vcpu->run->epr.epr = 0;
714 vcpu->arch.epr_needed = true;
715 vcpu->run->exit_reason = KVM_EXIT_EPR;
716 r = 0;
717 }
718
Alexander Graf7c973a22012-08-13 12:50:35 +0200719 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200720}
721
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000722int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
723{
Alexander Graf7ee78852012-08-13 12:44:41 +0200724 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600725 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000726
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200727 if (!vcpu->arch.sane) {
728 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
729 return -EINVAL;
730 }
731
Alexander Graf7ee78852012-08-13 12:44:41 +0200732 s = kvmppc_prepare_to_enter(vcpu);
733 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200734 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600735 goto out;
736 }
Scott Wood6c85f522014-01-09 19:18:40 -0600737 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600738
Scott Wood8fae8452011-12-20 15:34:45 +0000739#ifdef CONFIG_PPC_FPU
740 /* Save userspace FPU state in stack */
741 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000742
743 /*
744 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300745 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000746 */
Scott Wood8fae8452011-12-20 15:34:45 +0000747 kvmppc_load_guest_fp(vcpu);
748#endif
749
Mihai Caraman95d80a22014-08-20 16:36:23 +0300750#ifdef CONFIG_ALTIVEC
751 /* Save userspace AltiVec state in stack */
752 if (cpu_has_feature(CPU_FTR_ALTIVEC))
753 enable_kernel_altivec();
754 /*
755 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
756 * as always using the AltiVec.
757 */
758 kvmppc_load_guest_altivec(vcpu);
759#endif
760
Bharat Bhushance11e482013-07-04 12:27:47 +0530761 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530762 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600763 switch_booke_debug_regs(&debug);
764 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530765 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530766
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530767 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500768 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500769
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000770 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000771
Alexander Graf24afa37b2012-08-12 12:42:30 +0200772 /* No need for kvm_guest_exit. It's done in handle_exit.
773 We also get here with interrupts enabled. */
774
Bharat Bhushance11e482013-07-04 12:27:47 +0530775 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600776 switch_booke_debug_regs(&debug);
777 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530778
Scott Wood8fae8452011-12-20 15:34:45 +0000779#ifdef CONFIG_PPC_FPU
780 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000781#endif
782
Mihai Caraman95d80a22014-08-20 16:36:23 +0300783#ifdef CONFIG_ALTIVEC
784 kvmppc_save_guest_altivec(vcpu);
785#endif
786
Scott Wood1d1ef222011-11-08 16:11:59 -0600787out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200788 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000789 return ret;
790}
791
Scott Woodd30f6e42011-12-20 15:34:43 +0000792static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
793{
794 enum emulation_result er;
795
796 er = kvmppc_emulate_instruction(run, vcpu);
797 switch (er) {
798 case EMULATE_DONE:
799 /* don't overwrite subtypes, just account kvm_stats */
800 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
801 /* Future optimization: only reload non-volatiles if
802 * they were actually modified by emulation. */
803 return RESUME_GUEST_NV;
804
Mihai Caraman51f04722014-07-23 19:06:21 +0300805 case EMULATE_AGAIN:
806 return RESUME_GUEST;
807
Scott Woodd30f6e42011-12-20 15:34:43 +0000808 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000809 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
810 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
811 /* For debugging, encode the failing instruction and
812 * report it to userspace. */
813 run->hw.hardware_exit_reason = ~0ULL << 32;
814 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000815 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000816 return RESUME_HOST;
817
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000818 case EMULATE_EXIT_USER:
819 return RESUME_HOST;
820
Scott Woodd30f6e42011-12-20 15:34:43 +0000821 default:
822 BUG();
823 }
824}
825
Bharat Bhushance11e482013-07-04 12:27:47 +0530826static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
827{
Bharat Bhushan348ba712014-08-06 12:08:55 +0530828 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530829 u32 dbsr = vcpu->arch.dbsr;
830
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530831 if (vcpu->guest_debug == 0) {
832 /*
833 * Debug resources belong to Guest.
834 * Imprecise debug event is not injected
835 */
836 if (dbsr & DBSR_IDE) {
837 dbsr &= ~DBSR_IDE;
838 if (!dbsr)
839 return RESUME_GUEST;
840 }
841
842 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
843 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
844 kvmppc_core_queue_debug(vcpu);
845
846 /* Inject a program interrupt if trap debug is not allowed */
847 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
848 kvmppc_core_queue_program(vcpu, ESR_PTR);
849
850 return RESUME_GUEST;
851 }
852
853 /*
854 * Debug resource owned by userspace.
855 * Clear guest dbsr (vcpu->arch.dbsr)
856 */
Bharat Bhushan21909912014-08-06 12:08:54 +0530857 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530858 run->debug.arch.status = 0;
859 run->debug.arch.address = vcpu->arch.pc;
860
861 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
862 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
863 } else {
864 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
865 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
866 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
867 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
868 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
869 run->debug.arch.address = dbg_reg->dac1;
870 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
871 run->debug.arch.address = dbg_reg->dac2;
872 }
873
874 return RESUME_HOST;
875}
876
Alexander Graf4e642cc2012-02-20 23:57:26 +0100877static void kvmppc_fill_pt_regs(struct pt_regs *regs)
878{
879 ulong r1, ip, msr, lr;
880
881 asm("mr %0, 1" : "=r"(r1));
882 asm("mflr %0" : "=r"(lr));
883 asm("mfmsr %0" : "=r"(msr));
884 asm("bl 1f; 1: mflr %0" : "=r"(ip));
885
886 memset(regs, 0, sizeof(*regs));
887 regs->gpr[1] = r1;
888 regs->nip = ip;
889 regs->msr = msr;
890 regs->link = lr;
891}
892
Bharat Bhushan6328e592012-06-20 05:56:53 +0000893/*
894 * For interrupts needed to be handled by host interrupt handlers,
895 * corresponding host handler are called from here in similar way
896 * (but not exact) as they are called from low level handler
897 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
898 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100899static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
900 unsigned int exit_nr)
901{
902 struct pt_regs regs;
903
904 switch (exit_nr) {
905 case BOOKE_INTERRUPT_EXTERNAL:
906 kvmppc_fill_pt_regs(&regs);
907 do_IRQ(&regs);
908 break;
909 case BOOKE_INTERRUPT_DECREMENTER:
910 kvmppc_fill_pt_regs(&regs);
911 timer_interrupt(&regs);
912 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800913#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100914 case BOOKE_INTERRUPT_DOORBELL:
915 kvmppc_fill_pt_regs(&regs);
916 doorbell_exception(&regs);
917 break;
918#endif
919 case BOOKE_INTERRUPT_MACHINE_CHECK:
920 /* FIXME */
921 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100922 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
923 kvmppc_fill_pt_regs(&regs);
924 performance_monitor_exception(&regs);
925 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000926 case BOOKE_INTERRUPT_WATCHDOG:
927 kvmppc_fill_pt_regs(&regs);
928#ifdef CONFIG_BOOKE_WDT
929 WatchdogException(&regs);
930#else
931 unknown_exception(&regs);
932#endif
933 break;
934 case BOOKE_INTERRUPT_CRITICAL:
935 unknown_exception(&regs);
936 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530937 case BOOKE_INTERRUPT_DEBUG:
938 /* Save DBSR before preemption is enabled */
939 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
940 kvmppc_clear_dbsr();
941 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100942 }
943}
944
Mihai Caramanf5250472014-07-23 19:06:22 +0300945static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
946 enum emulation_result emulated, u32 last_inst)
947{
948 switch (emulated) {
949 case EMULATE_AGAIN:
950 return RESUME_GUEST;
951
952 case EMULATE_FAIL:
953 pr_debug("%s: load instruction from guest address %lx failed\n",
954 __func__, vcpu->arch.pc);
955 /* For debugging, encode the failing instruction and
956 * report it to userspace. */
957 run->hw.hardware_exit_reason = ~0ULL << 32;
958 run->hw.hardware_exit_reason |= last_inst;
959 kvmppc_core_queue_program(vcpu, ESR_PIL);
960 return RESUME_HOST;
961
962 default:
963 BUG();
964 }
965}
966
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500967/**
968 * kvmppc_handle_exit
969 *
970 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
971 */
972int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
973 unsigned int exit_nr)
974{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500975 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200976 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500977 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300978 u32 last_inst = KVM_INST_FETCH_FAILED;
979 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500980
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600981 /* update before a new last_exit_type is rewritten */
982 kvmppc_update_timing_stats(vcpu);
983
Alexander Graf4e642cc2012-02-20 23:57:26 +0100984 /* restart interrupts if they were meant for the host */
985 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000986
Mihai Caramanf5250472014-07-23 19:06:22 +0300987 /*
988 * get last instruction before beeing preempted
989 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
990 */
991 switch (exit_nr) {
992 case BOOKE_INTERRUPT_DATA_STORAGE:
993 case BOOKE_INTERRUPT_DTLB_MISS:
994 case BOOKE_INTERRUPT_HV_PRIV:
995 emulated = kvmppc_get_last_inst(vcpu, false, &last_inst);
996 break;
997 default:
998 break;
999 }
1000
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001001 local_irq_enable();
1002
Alexander Graf97c95052012-08-02 15:10:00 +02001003 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +02001004 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +02001005
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001006 run->exit_reason = KVM_EXIT_UNKNOWN;
1007 run->ready_for_interrupt_injection = 1;
1008
Mihai Caramanf5250472014-07-23 19:06:22 +03001009 if (emulated != EMULATE_DONE) {
1010 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1011 goto out;
1012 }
1013
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001014 switch (exit_nr) {
1015 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +01001016 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1017 kvmppc_dump_vcpu(vcpu);
1018 /* For debugging, send invalid exit reason to user space */
1019 run->hw.hardware_exit_reason = ~1ULL << 32;
1020 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1021 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001022 break;
1023
1024 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001025 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -06001026 r = RESUME_GUEST;
1027 break;
1028
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001029 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001030 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001031 r = RESUME_GUEST;
1032 break;
1033
Bharat Bhushan6328e592012-06-20 05:56:53 +00001034 case BOOKE_INTERRUPT_WATCHDOG:
1035 r = RESUME_GUEST;
1036 break;
1037
Scott Woodd30f6e42011-12-20 15:34:43 +00001038 case BOOKE_INTERRUPT_DOORBELL:
1039 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001040 r = RESUME_GUEST;
1041 break;
1042
1043 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1044 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1045
1046 /*
1047 * We are here because there is a pending guest interrupt
1048 * which could not be delivered as MSR_CE or MSR_ME was not
1049 * set. Once we break from here we will retry delivery.
1050 */
1051 r = RESUME_GUEST;
1052 break;
1053
1054 case BOOKE_INTERRUPT_GUEST_DBELL:
1055 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1056
1057 /*
1058 * We are here because there is a pending guest interrupt
1059 * which could not be delivered as MSR_EE was not set. Once
1060 * we break from here we will retry delivery.
1061 */
1062 r = RESUME_GUEST;
1063 break;
1064
Alexander Graf95f2e922012-02-20 22:45:12 +01001065 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1066 r = RESUME_GUEST;
1067 break;
1068
Scott Woodd30f6e42011-12-20 15:34:43 +00001069 case BOOKE_INTERRUPT_HV_PRIV:
1070 r = emulation_exit(run, vcpu);
1071 break;
1072
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001073 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +00001074 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf0268597c2012-02-20 12:33:22 +01001075 /*
1076 * Program traps generated by user-level software must
1077 * be handled by the guest kernel.
1078 *
1079 * In GS mode, hypervisor privileged instructions trap
1080 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1081 * actual program interrupts, handled by the guest.
1082 */
Liu Yudaf5e272010-02-02 19:44:35 +08001083 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001084 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001085 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001086 break;
1087 }
1088
Scott Woodd30f6e42011-12-20 15:34:43 +00001089 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001090 break;
1091
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001092 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001093 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001094 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001095 r = RESUME_GUEST;
1096 break;
1097
Scott Wood4cd35f62011-06-14 18:34:31 -05001098#ifdef CONFIG_SPE
1099 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1100 if (vcpu->arch.shared->msr & MSR_SPE)
1101 kvmppc_vcpu_enable_spe(vcpu);
1102 else
1103 kvmppc_booke_queue_irqprio(vcpu,
1104 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001105 r = RESUME_GUEST;
1106 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001107 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001108
1109 case BOOKE_INTERRUPT_SPE_FP_DATA:
1110 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1111 r = RESUME_GUEST;
1112 break;
1113
1114 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1115 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1116 r = RESUME_GUEST;
1117 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001118#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f62011-06-14 18:34:31 -05001119 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1120 /*
1121 * Guest wants SPE, but host kernel doesn't support it. Send
1122 * an "unimplemented operation" program check to the guest.
1123 */
1124 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1125 r = RESUME_GUEST;
1126 break;
1127
1128 /*
1129 * These really should never happen without CONFIG_SPE,
1130 * as we should never enable the real MSR[SPE] in the guest.
1131 */
1132 case BOOKE_INTERRUPT_SPE_FP_DATA:
1133 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1134 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1135 __func__, exit_nr, vcpu->arch.pc);
1136 run->hw.hardware_exit_reason = exit_nr;
1137 r = RESUME_HOST;
1138 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001139#endif /* CONFIG_SPE_POSSIBLE */
1140
1141/*
1142 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1143 * see kvmppc_core_check_processor_compat().
1144 */
1145#ifdef CONFIG_ALTIVEC
1146 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1147 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1148 r = RESUME_GUEST;
1149 break;
1150
1151 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1152 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1153 r = RESUME_GUEST;
1154 break;
Scott Wood4cd35f62011-06-14 18:34:31 -05001155#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001156
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001157 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001158 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1159 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001160 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001161 r = RESUME_GUEST;
1162 break;
1163
1164 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001165 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001166 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001167 r = RESUME_GUEST;
1168 break;
1169
Alexander Graf011da892013-01-31 14:17:38 +01001170 case BOOKE_INTERRUPT_ALIGNMENT:
1171 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1172 vcpu->arch.fault_esr);
1173 r = RESUME_GUEST;
1174 break;
1175
Scott Woodd30f6e42011-12-20 15:34:43 +00001176#ifdef CONFIG_KVM_BOOKE_HV
1177 case BOOKE_INTERRUPT_HV_SYSCALL:
1178 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1179 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1180 } else {
1181 /*
1182 * hcall from guest userspace -- send privileged
1183 * instruction program check.
1184 */
1185 kvmppc_core_queue_program(vcpu, ESR_PPR);
1186 }
1187
1188 r = RESUME_GUEST;
1189 break;
1190#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001191 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001192 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1193 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1194 /* KVM PV hypercalls */
1195 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1196 r = RESUME_GUEST;
1197 } else {
1198 /* Guest syscalls */
1199 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1200 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001201 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001202 r = RESUME_GUEST;
1203 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001204#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001205
1206 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001207 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001208 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001209 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001210 gfn_t gfn;
1211
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001212#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001213 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1214 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1215 kvmppc_map_magic(vcpu);
1216 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1217 r = RESUME_GUEST;
1218
1219 break;
1220 }
1221#endif
1222
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001223 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001224 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001225 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001226 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001227 kvmppc_core_queue_dtlb_miss(vcpu,
1228 vcpu->arch.fault_dear,
1229 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001230 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001231 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001232 r = RESUME_GUEST;
1233 break;
1234 }
1235
Scott Woodf1e89022013-06-06 19:16:31 -05001236 idx = srcu_read_lock(&vcpu->kvm->srcu);
1237
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001238 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001239 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001240
1241 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1242 /* The guest TLB had a mapping, but the shadow TLB
1243 * didn't, and it is RAM. This could be because:
1244 * a) the entry is mapping the host kernel, or
1245 * b) the guest used a large mapping which we're faking
1246 * Either way, we need to satisfy the fault without
1247 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001248 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001249 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001250 r = RESUME_GUEST;
1251 } else {
1252 /* Guest has mapped and accessed a page which is not
1253 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001254 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001255 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001256 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001257 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001258 }
1259
Scott Woodf1e89022013-06-06 19:16:31 -05001260 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001261 break;
1262 }
1263
1264 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001265 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001266 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001267 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001268 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001269
1270 r = RESUME_GUEST;
1271
1272 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001273 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001274 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001275 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001276 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001277 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001278 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001279 break;
1280 }
1281
Hollis Blanchard7b701592008-12-02 15:51:58 -06001282 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001283
Scott Woodf1e89022013-06-06 19:16:31 -05001284 idx = srcu_read_lock(&vcpu->kvm->srcu);
1285
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001286 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001287 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001288
1289 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1290 /* The guest TLB had a mapping, but the shadow TLB
1291 * didn't. This could be because:
1292 * a) the entry is mapping the host kernel, or
1293 * b) the guest used a large mapping which we're faking
1294 * Either way, we need to satisfy the fault without
1295 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001296 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001297 } else {
1298 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001299 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001300 }
1301
Scott Woodf1e89022013-06-06 19:16:31 -05001302 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001303 break;
1304 }
1305
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001306 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301307 r = kvmppc_handle_debug(run, vcpu);
1308 if (r == RESUME_HOST)
1309 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001310 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001311 break;
1312 }
1313
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001314 default:
1315 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1316 BUG();
1317 }
1318
Mihai Caramanf5250472014-07-23 19:06:22 +03001319out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001320 /*
1321 * To avoid clobbering exit_reason, only check for signals if we
1322 * aren't already exiting to userspace for some other reason.
1323 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001324 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001325 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001326 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001327 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001328 else {
1329 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001330 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001331 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001332 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001333 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001334 }
1335
1336 return r;
1337}
1338
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001339static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1340{
1341 u32 old_tsr = vcpu->arch.tsr;
1342
1343 vcpu->arch.tsr = new_tsr;
1344
1345 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1346 arm_next_watchdog(vcpu);
1347
1348 update_timer_ints(vcpu);
1349}
1350
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001351/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1352int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1353{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001354 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001355 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001356
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001357 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001358 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001359 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001360 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001361
Scott Woodd30f6e42011-12-20 15:34:43 +00001362#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301363 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001364 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001365 vcpu->arch.shared->msr = 0;
1366#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001367
Hollis Blanchard082decf2010-08-07 10:33:56 -07001368 /* Eye-catching numbers so we know if the guest takes an interrupt
1369 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001370 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001371 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1372 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001373
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001374 kvmppc_init_timing_stats(vcpu);
1375
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001376 r = kvmppc_core_vcpu_setup(vcpu);
1377 kvmppc_sanity_check(vcpu);
1378 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001379}
1380
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001381int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1382{
1383 /* setup watchdog timer once */
1384 spin_lock_init(&vcpu->arch.wdt_lock);
1385 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1386 (unsigned long)vcpu);
1387
Bharat Bhushan2f699a52014-08-13 14:39:44 +05301388 /*
1389 * Clear DBSR.MRR to avoid guest debug interrupt as
1390 * this is of host interest
1391 */
1392 mtspr(SPRN_DBSR, DBSR_MRR);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001393 return 0;
1394}
1395
1396void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1397{
1398 del_timer_sync(&vcpu->arch.wdt_timer);
1399}
1400
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001401int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1402{
1403 int i;
1404
1405 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001406 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001407 regs->ctr = vcpu->arch.ctr;
1408 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001409 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001410 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301411 regs->srr0 = kvmppc_get_srr0(vcpu);
1412 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001413 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301414 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1415 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1416 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1417 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1418 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1419 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1420 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1421 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001422
1423 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001424 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001425
1426 return 0;
1427}
1428
1429int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1430{
1431 int i;
1432
1433 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001434 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001435 vcpu->arch.ctr = regs->ctr;
1436 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001437 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001438 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301439 kvmppc_set_srr0(vcpu, regs->srr0);
1440 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001441 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301442 kvmppc_set_sprg0(vcpu, regs->sprg0);
1443 kvmppc_set_sprg1(vcpu, regs->sprg1);
1444 kvmppc_set_sprg2(vcpu, regs->sprg2);
1445 kvmppc_set_sprg3(vcpu, regs->sprg3);
1446 kvmppc_set_sprg4(vcpu, regs->sprg4);
1447 kvmppc_set_sprg5(vcpu, regs->sprg5);
1448 kvmppc_set_sprg6(vcpu, regs->sprg6);
1449 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001450
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001451 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1452 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001453
1454 return 0;
1455}
1456
Scott Wood5ce941e2011-04-27 17:24:21 -05001457static void get_sregs_base(struct kvm_vcpu *vcpu,
1458 struct kvm_sregs *sregs)
1459{
1460 u64 tb = get_tb();
1461
1462 sregs->u.e.features |= KVM_SREGS_E_BASE;
1463
1464 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1465 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1466 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301467 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301468 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001469 sregs->u.e.tsr = vcpu->arch.tsr;
1470 sregs->u.e.tcr = vcpu->arch.tcr;
1471 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1472 sregs->u.e.tb = tb;
1473 sregs->u.e.vrsave = vcpu->arch.vrsave;
1474}
1475
1476static int set_sregs_base(struct kvm_vcpu *vcpu,
1477 struct kvm_sregs *sregs)
1478{
1479 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1480 return 0;
1481
1482 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1483 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1484 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301485 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301486 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001487 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001488 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001489
Scott Wooddfd4d472011-11-17 12:39:59 +00001490 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001491 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001492 kvmppc_emulate_dec(vcpu);
1493 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001494
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001495 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1496 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001497
1498 return 0;
1499}
1500
1501static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1502 struct kvm_sregs *sregs)
1503{
1504 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1505
Scott Wood841741f2011-09-02 17:39:37 -05001506 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001507 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1508 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1509 sregs->u.e.decar = vcpu->arch.decar;
1510 sregs->u.e.ivpr = vcpu->arch.ivpr;
1511}
1512
1513static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1514 struct kvm_sregs *sregs)
1515{
1516 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1517 return 0;
1518
Scott Wood841741f2011-09-02 17:39:37 -05001519 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001520 return -EINVAL;
1521
1522 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1523 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1524 vcpu->arch.decar = sregs->u.e.decar;
1525 vcpu->arch.ivpr = sregs->u.e.ivpr;
1526
1527 return 0;
1528}
1529
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301530int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001531{
1532 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1533
1534 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1535 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1536 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1537 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1538 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1539 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1540 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1541 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1542 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1543 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1544 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1545 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1546 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1547 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1548 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1549 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301550 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001551}
1552
1553int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1554{
1555 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1556 return 0;
1557
1558 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1559 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1560 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1561 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1562 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1563 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1564 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1565 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1566 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1567 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1568 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1569 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1570 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1571 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1572 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1573 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1574
1575 return 0;
1576}
1577
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001578int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1579 struct kvm_sregs *sregs)
1580{
Scott Wood5ce941e2011-04-27 17:24:21 -05001581 sregs->pvr = vcpu->arch.pvr;
1582
1583 get_sregs_base(vcpu, sregs);
1584 get_sregs_arch206(vcpu, sregs);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301585 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001586}
1587
1588int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1589 struct kvm_sregs *sregs)
1590{
Scott Wood5ce941e2011-04-27 17:24:21 -05001591 int ret;
1592
1593 if (vcpu->arch.pvr != sregs->pvr)
1594 return -EINVAL;
1595
1596 ret = set_sregs_base(vcpu, sregs);
1597 if (ret < 0)
1598 return ret;
1599
1600 ret = set_sregs_arch206(vcpu, sregs);
1601 if (ret < 0)
1602 return ret;
1603
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301604 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001605}
1606
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001607int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1608 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001609{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001610 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001611
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001612 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001613 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001614 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001615 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301616 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001617 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301618 break;
1619#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1620 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001621 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301622 break;
1623 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001624 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301625 break;
1626#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001627 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001628 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301629 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001630 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001631 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301632 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001633 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301634 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001635 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001636 break;
1637 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001638#if defined(CONFIG_64BIT)
1639 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001640 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001641 break;
1642#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001643 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001644 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001645 break;
1646 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001647 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001648 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001649 case KVM_REG_PPC_DEBUG_INST:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001650 *val = get_reg_val(id, KVMPPC_INST_EHPRIV_DEBUG);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001651 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001652 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001653 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001654 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001655 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001656 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001657 break;
1658 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001659
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001660 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001661}
1662
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001663int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1664 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001665{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001666 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001667
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001668 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001669 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001670 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001671 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301672 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001673 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301674 break;
1675#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1676 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001677 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301678 break;
1679 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001680 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301681 break;
1682#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001683 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001684 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301685 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001686 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001687 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301688 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001689 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001690 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001691 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001692 break;
1693 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001694#if defined(CONFIG_64BIT)
1695 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001696 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001697 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001698 break;
1699 }
1700#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001701 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001702 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001703 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1704 break;
1705 }
1706 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001707 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001708 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1709 break;
1710 }
1711 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001712 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001713 kvmppc_set_tsr(vcpu, tsr);
1714 break;
1715 }
1716 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001717 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001718 kvmppc_set_tcr(vcpu, tcr);
1719 break;
1720 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001721 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001722 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001723 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001724 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001725 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001726 break;
1727 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001728
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001729 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001730}
1731
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001732int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1733{
1734 return -ENOTSUPP;
1735}
1736
1737int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1738{
1739 return -ENOTSUPP;
1740}
1741
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001742int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1743 struct kvm_translation *tr)
1744{
Avi Kivity98001d82010-05-13 11:05:49 +03001745 int r;
1746
Avi Kivity98001d82010-05-13 11:05:49 +03001747 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001748 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001749}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001750
Alexander Graf4e755752009-10-30 05:47:01 +00001751int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1752{
1753 return -ENOTSUPP;
1754}
1755
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301756void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001757 struct kvm_memory_slot *dont)
1758{
1759}
1760
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301761int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001762 unsigned long npages)
1763{
1764 return 0;
1765}
1766
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001767int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001768 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001769 struct kvm_userspace_memory_region *mem)
1770{
1771 return 0;
1772}
1773
1774void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001775 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001776 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001777{
1778}
1779
1780void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001781{
1782}
1783
Mihai Caraman38f98822012-10-11 06:13:27 +00001784void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1785{
1786#if defined(CONFIG_64BIT)
1787 vcpu->arch.epcr = new_epcr;
1788#ifdef CONFIG_KVM_BOOKE_HV
1789 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1790 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1791 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1792#endif
1793#endif
1794}
1795
Scott Wooddfd4d472011-11-17 12:39:59 +00001796void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1797{
1798 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001799 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001800 update_timer_ints(vcpu);
1801}
1802
1803void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1804{
1805 set_bits(tsr_bits, &vcpu->arch.tsr);
1806 smp_wmb();
1807 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1808 kvm_vcpu_kick(vcpu);
1809}
1810
1811void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1812{
1813 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001814
1815 /*
1816 * We may have stopped the watchdog due to
1817 * being stuck on final expiration.
1818 */
1819 if (tsr_bits & (TSR_ENW | TSR_WIS))
1820 arm_next_watchdog(vcpu);
1821
Scott Wooddfd4d472011-11-17 12:39:59 +00001822 update_timer_ints(vcpu);
1823}
1824
1825void kvmppc_decrementer_func(unsigned long data)
1826{
1827 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1828
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001829 if (vcpu->arch.tcr & TCR_ARE) {
1830 vcpu->arch.dec = vcpu->arch.decar;
1831 kvmppc_emulate_dec(vcpu);
1832 }
1833
Scott Wooddfd4d472011-11-17 12:39:59 +00001834 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1835}
1836
Bharat Bhushance11e482013-07-04 12:27:47 +05301837static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1838 uint64_t addr, int index)
1839{
1840 switch (index) {
1841 case 0:
1842 dbg_reg->dbcr0 |= DBCR0_IAC1;
1843 dbg_reg->iac1 = addr;
1844 break;
1845 case 1:
1846 dbg_reg->dbcr0 |= DBCR0_IAC2;
1847 dbg_reg->iac2 = addr;
1848 break;
1849#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1850 case 2:
1851 dbg_reg->dbcr0 |= DBCR0_IAC3;
1852 dbg_reg->iac3 = addr;
1853 break;
1854 case 3:
1855 dbg_reg->dbcr0 |= DBCR0_IAC4;
1856 dbg_reg->iac4 = addr;
1857 break;
1858#endif
1859 default:
1860 return -EINVAL;
1861 }
1862
1863 dbg_reg->dbcr0 |= DBCR0_IDM;
1864 return 0;
1865}
1866
1867static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1868 int type, int index)
1869{
1870 switch (index) {
1871 case 0:
1872 if (type & KVMPPC_DEBUG_WATCH_READ)
1873 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1874 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1875 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1876 dbg_reg->dac1 = addr;
1877 break;
1878 case 1:
1879 if (type & KVMPPC_DEBUG_WATCH_READ)
1880 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1881 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1882 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1883 dbg_reg->dac2 = addr;
1884 break;
1885 default:
1886 return -EINVAL;
1887 }
1888
1889 dbg_reg->dbcr0 |= DBCR0_IDM;
1890 return 0;
1891}
1892void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1893{
1894 /* XXX: Add similar MSR protection for BookE-PR */
1895#ifdef CONFIG_KVM_BOOKE_HV
1896 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1897 if (set) {
1898 if (prot_bitmap & MSR_UCLE)
1899 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1900 if (prot_bitmap & MSR_DE)
1901 vcpu->arch.shadow_msrp |= MSRP_DEP;
1902 if (prot_bitmap & MSR_PMM)
1903 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1904 } else {
1905 if (prot_bitmap & MSR_UCLE)
1906 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1907 if (prot_bitmap & MSR_DE)
1908 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1909 if (prot_bitmap & MSR_PMM)
1910 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1911 }
1912#endif
1913}
1914
Alexander Graf7d15c062014-06-20 13:52:36 +02001915int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1916 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1917{
1918 int gtlb_index;
1919 gpa_t gpaddr;
1920
1921#ifdef CONFIG_KVM_E500V2
1922 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1923 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1924 pte->eaddr = eaddr;
1925 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1926 (eaddr & ~PAGE_MASK);
1927 pte->vpage = eaddr >> PAGE_SHIFT;
1928 pte->may_read = true;
1929 pte->may_write = true;
1930 pte->may_execute = true;
1931
1932 return 0;
1933 }
1934#endif
1935
1936 /* Check the guest TLB. */
1937 switch (xlid) {
1938 case XLATE_INST:
1939 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1940 break;
1941 case XLATE_DATA:
1942 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1943 break;
1944 default:
1945 BUG();
1946 }
1947
1948 /* Do we have a TLB entry at all? */
1949 if (gtlb_index < 0)
1950 return -ENOENT;
1951
1952 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1953
1954 pte->eaddr = eaddr;
1955 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1956 pte->vpage = eaddr >> PAGE_SHIFT;
1957
1958 /* XXX read permissions from the guest TLB */
1959 pte->may_read = true;
1960 pte->may_write = true;
1961 pte->may_execute = true;
1962
1963 return 0;
1964}
1965
Bharat Bhushance11e482013-07-04 12:27:47 +05301966int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1967 struct kvm_guest_debug *dbg)
1968{
1969 struct debug_reg *dbg_reg;
1970 int n, b = 0, w = 0;
1971
1972 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05301973 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301974 vcpu->guest_debug = 0;
1975 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1976 return 0;
1977 }
1978
1979 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1980 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05301981 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301982
1983 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05301984 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05301985
1986 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05301987 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05301988
1989#ifdef CONFIG_KVM_BOOKE_HV
1990 /*
1991 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1992 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1993 */
1994 dbg_reg->dbcr1 = 0;
1995 dbg_reg->dbcr2 = 0;
1996#else
1997 /*
1998 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1999 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2000 * is set.
2001 */
2002 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2003 DBCR1_IAC4US;
2004 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2005#endif
2006
2007 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2008 return 0;
2009
2010 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2011 uint64_t addr = dbg->arch.bp[n].addr;
2012 uint32_t type = dbg->arch.bp[n].type;
2013
2014 if (type == KVMPPC_DEBUG_NONE)
2015 continue;
2016
2017 if (type & !(KVMPPC_DEBUG_WATCH_READ |
2018 KVMPPC_DEBUG_WATCH_WRITE |
2019 KVMPPC_DEBUG_BREAKPOINT))
2020 return -EINVAL;
2021
2022 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2023 /* Setting H/W breakpoint */
2024 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2025 return -EINVAL;
2026 } else {
2027 /* Setting H/W watchpoint */
2028 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2029 type, w++))
2030 return -EINVAL;
2031 }
2032 }
2033
2034 return 0;
2035}
2036
Scott Wood94fa9d92011-12-20 15:34:22 +00002037void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2038{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002039 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002040 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002041}
2042
2043void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2044{
Scott Woodd30f6e42011-12-20 15:34:43 +00002045 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002046 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302047
2048 /* Clear pending debug event in DBSR */
2049 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002050}
2051
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302052void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2053{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302054 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302055}
2056
2057int kvmppc_core_init_vm(struct kvm *kvm)
2058{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302059 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302060}
2061
2062struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2063{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302064 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302065}
2066
2067void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2068{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302069 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302070}
2071
2072void kvmppc_core_destroy_vm(struct kvm *kvm)
2073{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302074 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302075}
2076
2077void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2078{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302079 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302080}
2081
2082void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2083{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302084 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002085}
2086
2087int __init kvmppc_booke_init(void)
2088{
Scott Woodd30f6e42011-12-20 15:34:43 +00002089#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002090 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002091 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002092 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002093 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002094 int i;
2095
2096 /* We install our own exception handlers by hijacking IVPR. IVPR must
2097 * be 16-bit aligned, so we need a 64KB allocation. */
2098 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2099 VCPU_SIZE_ORDER);
2100 if (!kvmppc_booke_handlers)
2101 return -ENOMEM;
2102
2103 /* XXX make sure our handlers are smaller than Linux's */
2104
2105 /* Copy our interrupt handlers to match host IVORs. That way we don't
2106 * have to swap the IVORs on every guest/host transition. */
2107 ivor[0] = mfspr(SPRN_IVOR0);
2108 ivor[1] = mfspr(SPRN_IVOR1);
2109 ivor[2] = mfspr(SPRN_IVOR2);
2110 ivor[3] = mfspr(SPRN_IVOR3);
2111 ivor[4] = mfspr(SPRN_IVOR4);
2112 ivor[5] = mfspr(SPRN_IVOR5);
2113 ivor[6] = mfspr(SPRN_IVOR6);
2114 ivor[7] = mfspr(SPRN_IVOR7);
2115 ivor[8] = mfspr(SPRN_IVOR8);
2116 ivor[9] = mfspr(SPRN_IVOR9);
2117 ivor[10] = mfspr(SPRN_IVOR10);
2118 ivor[11] = mfspr(SPRN_IVOR11);
2119 ivor[12] = mfspr(SPRN_IVOR12);
2120 ivor[13] = mfspr(SPRN_IVOR13);
2121 ivor[14] = mfspr(SPRN_IVOR14);
2122 ivor[15] = mfspr(SPRN_IVOR15);
2123
2124 for (i = 0; i < 16; i++) {
2125 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002126 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002127
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002128 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002129 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002130 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002131 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002132
2133 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2134 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2135 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002136#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002137 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002138}
2139
Hollis Blancharddb93f572008-11-05 09:36:18 -06002140void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002141{
2142 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2143 kvm_exit();
2144}