blob: 50df5e3072cc9271a774a9f7bac4125245464cc1 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053043
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060047unsigned long kvmppc_booke_handlers;
48
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "mmio", VCPU_STAT(mmio_exits) },
54 { "dcr", VCPU_STAT(dcr_exits) },
55 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050056 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
57 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
58 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
59 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
60 { "sysc", VCPU_STAT(syscall_exits) },
61 { "isi", VCPU_STAT(isi_exits) },
62 { "dsi", VCPU_STAT(dsi_exits) },
63 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
64 { "dec", VCPU_STAT(dec_exits) },
65 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050066 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000067 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020069 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050070 { NULL }
71};
72
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050073/* TODO: use vcpu_printf() */
74void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
75{
76 int i;
77
Alexander Graf666e7252010-07-29 14:47:43 +020078 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060079 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020080 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
81 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050082
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
84
85 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060086 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010087 kvmppc_get_gpr(vcpu, i),
88 kvmppc_get_gpr(vcpu, i+1),
89 kvmppc_get_gpr(vcpu, i+2),
90 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050091 }
92}
93
Scott Wood4cd35f62011-06-14 18:34:31 -050094#ifdef CONFIG_SPE
95void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
96{
97 preempt_disable();
98 enable_kernel_spe();
99 kvmppc_save_guest_spe(vcpu);
100 vcpu->arch.shadow_msr &= ~MSR_SPE;
101 preempt_enable();
102}
103
104static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
105{
106 preempt_disable();
107 enable_kernel_spe();
108 kvmppc_load_guest_spe(vcpu);
109 vcpu->arch.shadow_msr |= MSR_SPE;
110 preempt_enable();
111}
112
113static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
114{
115 if (vcpu->arch.shared->msr & MSR_SPE) {
116 if (!(vcpu->arch.shadow_msr & MSR_SPE))
117 kvmppc_vcpu_enable_spe(vcpu);
118 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
119 kvmppc_vcpu_disable_spe(vcpu);
120 }
121}
122#else
123static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124{
125}
126#endif
127
Alexander Graf7a08c272012-08-16 13:10:16 +0200128static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
129{
130#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
131 /* We always treat the FP bit as enabled from the host
132 perspective, so only need to adjust the shadow MSR */
133 vcpu->arch.shadow_msr &= ~MSR_FP;
134 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
135#endif
136}
137
Bharat Bhushance11e482013-07-04 12:27:47 +0530138static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
139{
140 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
141#ifndef CONFIG_KVM_BOOKE_HV
142 vcpu->arch.shadow_msr &= ~MSR_DE;
143 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
144#endif
145
146 /* Force enable debug interrupts when user space wants to debug */
147 if (vcpu->guest_debug) {
148#ifdef CONFIG_KVM_BOOKE_HV
149 /*
150 * Since there is no shadow MSR, sync MSR_DE into the guest
151 * visible MSR.
152 */
153 vcpu->arch.shared->msr |= MSR_DE;
154#else
155 vcpu->arch.shadow_msr |= MSR_DE;
156 vcpu->arch.shared->msr &= ~MSR_DE;
157#endif
158 }
159}
160
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500161/*
162 * Helper function for "full" MSR writes. No need to call this if only
163 * EE/CE/ME/DE/RI are changing.
164 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500165void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
166{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500167 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500168
Scott Woodd30f6e42011-12-20 15:34:43 +0000169#ifdef CONFIG_KVM_BOOKE_HV
170 new_msr |= MSR_GS;
171#endif
172
Scott Wood4cd35f62011-06-14 18:34:31 -0500173 vcpu->arch.shared->msr = new_msr;
174
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500175 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500176 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200177 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530178 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500179}
180
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600181static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
182 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600183{
Alexander Graf63460462012-08-08 00:44:52 +0200184 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600185 set_bit(priority, &vcpu->arch.pending_exceptions);
186}
187
Liu Yudaf5e272010-02-02 19:44:35 +0800188static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
189 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600190{
Liu Yudaf5e272010-02-02 19:44:35 +0800191 vcpu->arch.queued_dear = dear_flags;
192 vcpu->arch.queued_esr = esr_flags;
193 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
194}
195
196static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
197 ulong dear_flags, ulong esr_flags)
198{
199 vcpu->arch.queued_dear = dear_flags;
200 vcpu->arch.queued_esr = esr_flags;
201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
202}
203
204static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
205 ulong esr_flags)
206{
207 vcpu->arch.queued_esr = esr_flags;
208 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
209}
210
Alexander Graf011da892013-01-31 14:17:38 +0100211static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
212 ulong esr_flags)
213{
214 vcpu->arch.queued_dear = dear_flags;
215 vcpu->arch.queued_esr = esr_flags;
216 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
217}
218
Liu Yudaf5e272010-02-02 19:44:35 +0800219void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
220{
221 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600222 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600223}
224
225void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
226{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600227 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600228}
229
230int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
231{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600232 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600233}
234
Alexander Graf7706664d2009-12-21 20:21:24 +0100235void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
236{
237 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
238}
239
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600240void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
241 struct kvm_interrupt *irq)
242{
Alexander Grafc5335f12010-08-30 14:03:24 +0200243 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
244
245 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
246 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
247
248 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600249}
250
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000251void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200252{
253 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200254 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200255}
256
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000257static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
258{
259 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
260}
261
262static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
263{
264 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
265}
266
Scott Woodd30f6e42011-12-20 15:34:43 +0000267static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
268{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530269 kvmppc_set_srr0(vcpu, srr0);
270 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000271}
272
273static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
274{
275 vcpu->arch.csrr0 = srr0;
276 vcpu->arch.csrr1 = srr1;
277}
278
279static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
280{
281 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
282 vcpu->arch.dsrr0 = srr0;
283 vcpu->arch.dsrr1 = srr1;
284 } else {
285 set_guest_csrr(vcpu, srr0, srr1);
286 }
287}
288
289static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
290{
291 vcpu->arch.mcsrr0 = srr0;
292 vcpu->arch.mcsrr1 = srr1;
293}
294
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600295/* Deliver the interrupt of the corresponding priority, if possible. */
296static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
297 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500298{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600299 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000300 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100301 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200302 ulong crit_raw = vcpu->arch.shared->critical;
303 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
304 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200305 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000306 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000307 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200308
309 /* Truncate crit indicators in 32 bit mode */
310 if (!(vcpu->arch.shared->msr & MSR_SF)) {
311 crit_raw &= 0xffffffff;
312 crit_r1 &= 0xffffffff;
313 }
314
315 /* Critical section when crit == r1 */
316 crit = (crit_raw == crit_r1);
317 /* ... and we're in supervisor mode */
318 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500319
Alexander Grafc5335f12010-08-30 14:03:24 +0200320 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
321 priority = BOOKE_IRQPRIO_EXTERNAL;
322 keep_irq = true;
323 }
324
Scott Wood5df554ad2013-04-12 14:08:46 +0000325 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100326 update_epr = true;
327
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600328 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600329 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800330 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100331 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800332 update_dear = true;
333 /* fall through */
334 case BOOKE_IRQPRIO_INST_STORAGE:
335 case BOOKE_IRQPRIO_PROGRAM:
336 update_esr = true;
337 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600338 case BOOKE_IRQPRIO_ITLB_MISS:
339 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600340 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600341 case BOOKE_IRQPRIO_SPE_UNAVAIL:
342 case BOOKE_IRQPRIO_SPE_FP_DATA:
343 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600344 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600345 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000346 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000347 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500348 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000349 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600350 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000351 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200352 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000353 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000354 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000355 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500356 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600357 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200358 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000359 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000360 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500361 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600362 case BOOKE_IRQPRIO_DECREMENTER:
363 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000364 keep_irq = true;
365 /* fall through */
366 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000367 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200368 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200369 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000370 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000371 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500372 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600373 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200374 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000375 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000376 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000377 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500378 break;
379 }
380
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600381 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000382 switch (int_class) {
383 case INT_CLASS_NONCRIT:
384 set_guest_srr(vcpu, vcpu->arch.pc,
385 vcpu->arch.shared->msr);
386 break;
387 case INT_CLASS_CRIT:
388 set_guest_csrr(vcpu, vcpu->arch.pc,
389 vcpu->arch.shared->msr);
390 break;
391 case INT_CLASS_DBG:
392 set_guest_dsrr(vcpu, vcpu->arch.pc,
393 vcpu->arch.shared->msr);
394 break;
395 case INT_CLASS_MC:
396 set_guest_mcsrr(vcpu, vcpu->arch.pc,
397 vcpu->arch.shared->msr);
398 break;
399 }
400
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600401 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800402 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530403 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800404 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530405 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000406 if (update_epr == true) {
407 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
408 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000409 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
410 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
411 kvmppc_mpic_set_epr(vcpu);
412 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000413 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000414
415 new_msr &= msr_mask;
416#if defined(CONFIG_64BIT)
417 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
418 new_msr |= MSR_CM;
419#endif
420 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600421
Alexander Grafc5335f12010-08-30 14:03:24 +0200422 if (!keep_irq)
423 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600424 }
425
Scott Woodd30f6e42011-12-20 15:34:43 +0000426#ifdef CONFIG_KVM_BOOKE_HV
427 /*
428 * If an interrupt is pending but masked, raise a guest doorbell
429 * so that we are notified when the guest enables the relevant
430 * MSR bit.
431 */
432 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
433 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
434 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
435 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
436 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
437 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
438#endif
439
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600440 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500441}
442
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000443/*
444 * Return the number of jiffies until the next timeout. If the timeout is
445 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
446 * because the larger value can break the timer APIs.
447 */
448static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
449{
450 u64 tb, wdt_tb, wdt_ticks = 0;
451 u64 nr_jiffies = 0;
452 u32 period = TCR_GET_WP(vcpu->arch.tcr);
453
454 wdt_tb = 1ULL << (63 - period);
455 tb = get_tb();
456 /*
457 * The watchdog timeout will hapeen when TB bit corresponding
458 * to watchdog will toggle from 0 to 1.
459 */
460 if (tb & wdt_tb)
461 wdt_ticks = wdt_tb;
462
463 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
464
465 /* Convert timebase ticks to jiffies */
466 nr_jiffies = wdt_ticks;
467
468 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
469 nr_jiffies++;
470
471 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
472}
473
474static void arm_next_watchdog(struct kvm_vcpu *vcpu)
475{
476 unsigned long nr_jiffies;
477 unsigned long flags;
478
479 /*
480 * If TSR_ENW and TSR_WIS are not set then no need to exit to
481 * userspace, so clear the KVM_REQ_WATCHDOG request.
482 */
483 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
484 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
485
486 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
487 nr_jiffies = watchdog_next_timeout(vcpu);
488 /*
489 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
490 * then do not run the watchdog timer as this can break timer APIs.
491 */
492 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
493 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
494 else
495 del_timer(&vcpu->arch.wdt_timer);
496 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
497}
498
499void kvmppc_watchdog_func(unsigned long data)
500{
501 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
502 u32 tsr, new_tsr;
503 int final;
504
505 do {
506 new_tsr = tsr = vcpu->arch.tsr;
507 final = 0;
508
509 /* Time out event */
510 if (tsr & TSR_ENW) {
511 if (tsr & TSR_WIS)
512 final = 1;
513 else
514 new_tsr = tsr | TSR_WIS;
515 } else {
516 new_tsr = tsr | TSR_ENW;
517 }
518 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
519
520 if (new_tsr & TSR_WIS) {
521 smp_wmb();
522 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
523 kvm_vcpu_kick(vcpu);
524 }
525
526 /*
527 * If this is final watchdog expiry and some action is required
528 * then exit to userspace.
529 */
530 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
531 vcpu->arch.watchdog_enabled) {
532 smp_wmb();
533 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
534 kvm_vcpu_kick(vcpu);
535 }
536
537 /*
538 * Stop running the watchdog timer after final expiration to
539 * prevent the host from being flooded with timers if the
540 * guest sets a short period.
541 * Timers will resume when TSR/TCR is updated next time.
542 */
543 if (!final)
544 arm_next_watchdog(vcpu);
545}
546
Scott Wooddfd4d472011-11-17 12:39:59 +0000547static void update_timer_ints(struct kvm_vcpu *vcpu)
548{
549 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
550 kvmppc_core_queue_dec(vcpu);
551 else
552 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000553
554 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
555 kvmppc_core_queue_watchdog(vcpu);
556 else
557 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000558}
559
Scott Woodc59a6a32011-11-08 18:23:25 -0600560static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500561{
562 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500563 unsigned int priority;
564
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600565 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000566 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600567 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500568 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500569
570 priority = find_next_bit(pending,
571 BITS_PER_BYTE * sizeof(*pending),
572 priority + 1);
573 }
Alexander Graf90bba352010-07-29 14:47:51 +0200574
575 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600576 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500577}
578
Scott Woodc59a6a32011-11-08 18:23:25 -0600579/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000580int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600581{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000582 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600583 WARN_ON_ONCE(!irqs_disabled());
584
585 kvmppc_core_check_exceptions(vcpu);
586
Alexander Grafb8c649a2012-12-20 04:52:39 +0000587 if (vcpu->requests) {
588 /* Exception delivery raised request; start over */
589 return 1;
590 }
591
Scott Woodc59a6a32011-11-08 18:23:25 -0600592 if (vcpu->arch.shared->msr & MSR_WE) {
593 local_irq_enable();
594 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100595 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Wood6c85f522014-01-09 19:18:40 -0600596 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600597
598 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000599 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600600 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000601
602 return r;
603}
604
Alexander Graf7c973a22012-08-13 12:50:35 +0200605int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200606{
Alexander Graf7c973a22012-08-13 12:50:35 +0200607 int r = 1; /* Indicate we want to get back into the guest */
608
Alexander Graf2d8185d2012-08-10 12:31:12 +0200609 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
610 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200611#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200612 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
613 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200614#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200615
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000616 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
617 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
618 r = 0;
619 }
620
Alexander Graf1c810632013-01-04 18:12:48 +0100621 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
622 vcpu->run->epr.epr = 0;
623 vcpu->arch.epr_needed = true;
624 vcpu->run->exit_reason = KVM_EXIT_EPR;
625 r = 0;
626 }
627
Alexander Graf7c973a22012-08-13 12:50:35 +0200628 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200629}
630
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000631int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
632{
Alexander Graf7ee78852012-08-13 12:44:41 +0200633 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600634 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000635
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200636 if (!vcpu->arch.sane) {
637 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
638 return -EINVAL;
639 }
640
Alexander Graf7ee78852012-08-13 12:44:41 +0200641 s = kvmppc_prepare_to_enter(vcpu);
642 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200643 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600644 goto out;
645 }
Scott Wood6c85f522014-01-09 19:18:40 -0600646 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600647
Scott Wood8fae8452011-12-20 15:34:45 +0000648#ifdef CONFIG_PPC_FPU
649 /* Save userspace FPU state in stack */
650 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000651
652 /*
653 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
654 * as always using the FPU. Kernel usage of FP (via
655 * enable_kernel_fp()) in this thread must not occur while
656 * vcpu->fpu_active is set.
657 */
658 vcpu->fpu_active = 1;
659
660 kvmppc_load_guest_fp(vcpu);
661#endif
662
Bharat Bhushance11e482013-07-04 12:27:47 +0530663 /* Switch to guest debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600664 debug = vcpu->arch.shadow_dbg_reg;
665 switch_booke_debug_regs(&debug);
666 debug = current->thread.debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530667 current->thread.debug = vcpu->arch.shadow_dbg_reg;
668
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530669 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500670 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500671
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000672 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000673
Alexander Graf24afa372012-08-12 12:42:30 +0200674 /* No need for kvm_guest_exit. It's done in handle_exit.
675 We also get here with interrupts enabled. */
676
Bharat Bhushance11e482013-07-04 12:27:47 +0530677 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600678 switch_booke_debug_regs(&debug);
679 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530680
Scott Wood8fae8452011-12-20 15:34:45 +0000681#ifdef CONFIG_PPC_FPU
682 kvmppc_save_guest_fp(vcpu);
683
684 vcpu->fpu_active = 0;
Scott Wood8fae8452011-12-20 15:34:45 +0000685#endif
686
Scott Wood1d1ef222011-11-08 16:11:59 -0600687out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200688 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000689 return ret;
690}
691
Scott Woodd30f6e42011-12-20 15:34:43 +0000692static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
693{
694 enum emulation_result er;
695
696 er = kvmppc_emulate_instruction(run, vcpu);
697 switch (er) {
698 case EMULATE_DONE:
699 /* don't overwrite subtypes, just account kvm_stats */
700 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
701 /* Future optimization: only reload non-volatiles if
702 * they were actually modified by emulation. */
703 return RESUME_GUEST_NV;
704
Mihai Caraman51f04722014-07-23 19:06:21 +0300705 case EMULATE_AGAIN:
706 return RESUME_GUEST;
707
Scott Woodd30f6e42011-12-20 15:34:43 +0000708 case EMULATE_DO_DCR:
709 run->exit_reason = KVM_EXIT_DCR;
710 return RESUME_HOST;
711
712 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000713 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
714 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
715 /* For debugging, encode the failing instruction and
716 * report it to userspace. */
717 run->hw.hardware_exit_reason = ~0ULL << 32;
718 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000719 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000720 return RESUME_HOST;
721
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000722 case EMULATE_EXIT_USER:
723 return RESUME_HOST;
724
Scott Woodd30f6e42011-12-20 15:34:43 +0000725 default:
726 BUG();
727 }
728}
729
Bharat Bhushance11e482013-07-04 12:27:47 +0530730static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
731{
732 struct debug_reg *dbg_reg = &(vcpu->arch.shadow_dbg_reg);
733 u32 dbsr = vcpu->arch.dbsr;
734
735 run->debug.arch.status = 0;
736 run->debug.arch.address = vcpu->arch.pc;
737
738 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
739 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
740 } else {
741 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
742 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
743 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
744 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
745 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
746 run->debug.arch.address = dbg_reg->dac1;
747 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
748 run->debug.arch.address = dbg_reg->dac2;
749 }
750
751 return RESUME_HOST;
752}
753
Alexander Graf4e642cc2012-02-20 23:57:26 +0100754static void kvmppc_fill_pt_regs(struct pt_regs *regs)
755{
756 ulong r1, ip, msr, lr;
757
758 asm("mr %0, 1" : "=r"(r1));
759 asm("mflr %0" : "=r"(lr));
760 asm("mfmsr %0" : "=r"(msr));
761 asm("bl 1f; 1: mflr %0" : "=r"(ip));
762
763 memset(regs, 0, sizeof(*regs));
764 regs->gpr[1] = r1;
765 regs->nip = ip;
766 regs->msr = msr;
767 regs->link = lr;
768}
769
Bharat Bhushan6328e592012-06-20 05:56:53 +0000770/*
771 * For interrupts needed to be handled by host interrupt handlers,
772 * corresponding host handler are called from here in similar way
773 * (but not exact) as they are called from low level handler
774 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
775 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100776static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
777 unsigned int exit_nr)
778{
779 struct pt_regs regs;
780
781 switch (exit_nr) {
782 case BOOKE_INTERRUPT_EXTERNAL:
783 kvmppc_fill_pt_regs(&regs);
784 do_IRQ(&regs);
785 break;
786 case BOOKE_INTERRUPT_DECREMENTER:
787 kvmppc_fill_pt_regs(&regs);
788 timer_interrupt(&regs);
789 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800790#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100791 case BOOKE_INTERRUPT_DOORBELL:
792 kvmppc_fill_pt_regs(&regs);
793 doorbell_exception(&regs);
794 break;
795#endif
796 case BOOKE_INTERRUPT_MACHINE_CHECK:
797 /* FIXME */
798 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100799 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
800 kvmppc_fill_pt_regs(&regs);
801 performance_monitor_exception(&regs);
802 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000803 case BOOKE_INTERRUPT_WATCHDOG:
804 kvmppc_fill_pt_regs(&regs);
805#ifdef CONFIG_BOOKE_WDT
806 WatchdogException(&regs);
807#else
808 unknown_exception(&regs);
809#endif
810 break;
811 case BOOKE_INTERRUPT_CRITICAL:
812 unknown_exception(&regs);
813 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530814 case BOOKE_INTERRUPT_DEBUG:
815 /* Save DBSR before preemption is enabled */
816 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
817 kvmppc_clear_dbsr();
818 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100819 }
820}
821
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500822/**
823 * kvmppc_handle_exit
824 *
825 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
826 */
827int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
828 unsigned int exit_nr)
829{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500830 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200831 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500832 int idx;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500833
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600834 /* update before a new last_exit_type is rewritten */
835 kvmppc_update_timing_stats(vcpu);
836
Alexander Graf4e642cc2012-02-20 23:57:26 +0100837 /* restart interrupts if they were meant for the host */
838 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000839
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500840 local_irq_enable();
841
Alexander Graf97c95052012-08-02 15:10:00 +0200842 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200843 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200844
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500845 run->exit_reason = KVM_EXIT_UNKNOWN;
846 run->ready_for_interrupt_injection = 1;
847
848 switch (exit_nr) {
849 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100850 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
851 kvmppc_dump_vcpu(vcpu);
852 /* For debugging, send invalid exit reason to user space */
853 run->hw.hardware_exit_reason = ~1ULL << 32;
854 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
855 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500856 break;
857
858 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600859 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600860 r = RESUME_GUEST;
861 break;
862
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500863 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600864 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500865 r = RESUME_GUEST;
866 break;
867
Bharat Bhushan6328e592012-06-20 05:56:53 +0000868 case BOOKE_INTERRUPT_WATCHDOG:
869 r = RESUME_GUEST;
870 break;
871
Scott Woodd30f6e42011-12-20 15:34:43 +0000872 case BOOKE_INTERRUPT_DOORBELL:
873 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000874 r = RESUME_GUEST;
875 break;
876
877 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
878 kvmppc_account_exit(vcpu, GDBELL_EXITS);
879
880 /*
881 * We are here because there is a pending guest interrupt
882 * which could not be delivered as MSR_CE or MSR_ME was not
883 * set. Once we break from here we will retry delivery.
884 */
885 r = RESUME_GUEST;
886 break;
887
888 case BOOKE_INTERRUPT_GUEST_DBELL:
889 kvmppc_account_exit(vcpu, GDBELL_EXITS);
890
891 /*
892 * We are here because there is a pending guest interrupt
893 * which could not be delivered as MSR_EE was not set. Once
894 * we break from here we will retry delivery.
895 */
896 r = RESUME_GUEST;
897 break;
898
Alexander Graf95f2e922012-02-20 22:45:12 +0100899 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
900 r = RESUME_GUEST;
901 break;
902
Scott Woodd30f6e42011-12-20 15:34:43 +0000903 case BOOKE_INTERRUPT_HV_PRIV:
904 r = emulation_exit(run, vcpu);
905 break;
906
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500907 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000908 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +0100909 /*
910 * Program traps generated by user-level software must
911 * be handled by the guest kernel.
912 *
913 * In GS mode, hypervisor privileged instructions trap
914 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
915 * actual program interrupts, handled by the guest.
916 */
Liu Yudaf5e272010-02-02 19:44:35 +0800917 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500918 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600919 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500920 break;
921 }
922
Scott Woodd30f6e42011-12-20 15:34:43 +0000923 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500924 break;
925
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200926 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600927 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600928 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200929 r = RESUME_GUEST;
930 break;
931
Scott Wood4cd35f62011-06-14 18:34:31 -0500932#ifdef CONFIG_SPE
933 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
934 if (vcpu->arch.shared->msr & MSR_SPE)
935 kvmppc_vcpu_enable_spe(vcpu);
936 else
937 kvmppc_booke_queue_irqprio(vcpu,
938 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600939 r = RESUME_GUEST;
940 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500941 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600942
943 case BOOKE_INTERRUPT_SPE_FP_DATA:
944 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
945 r = RESUME_GUEST;
946 break;
947
948 case BOOKE_INTERRUPT_SPE_FP_ROUND:
949 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
950 r = RESUME_GUEST;
951 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500952#else
953 case BOOKE_INTERRUPT_SPE_UNAVAIL:
954 /*
955 * Guest wants SPE, but host kernel doesn't support it. Send
956 * an "unimplemented operation" program check to the guest.
957 */
958 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
959 r = RESUME_GUEST;
960 break;
961
962 /*
963 * These really should never happen without CONFIG_SPE,
964 * as we should never enable the real MSR[SPE] in the guest.
965 */
966 case BOOKE_INTERRUPT_SPE_FP_DATA:
967 case BOOKE_INTERRUPT_SPE_FP_ROUND:
968 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
969 __func__, exit_nr, vcpu->arch.pc);
970 run->hw.hardware_exit_reason = exit_nr;
971 r = RESUME_HOST;
972 break;
973#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600974
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500975 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800976 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
977 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600978 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500979 r = RESUME_GUEST;
980 break;
981
982 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800983 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600984 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500985 r = RESUME_GUEST;
986 break;
987
Alexander Graf011da892013-01-31 14:17:38 +0100988 case BOOKE_INTERRUPT_ALIGNMENT:
989 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
990 vcpu->arch.fault_esr);
991 r = RESUME_GUEST;
992 break;
993
Scott Woodd30f6e42011-12-20 15:34:43 +0000994#ifdef CONFIG_KVM_BOOKE_HV
995 case BOOKE_INTERRUPT_HV_SYSCALL:
996 if (!(vcpu->arch.shared->msr & MSR_PR)) {
997 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
998 } else {
999 /*
1000 * hcall from guest userspace -- send privileged
1001 * instruction program check.
1002 */
1003 kvmppc_core_queue_program(vcpu, ESR_PPR);
1004 }
1005
1006 r = RESUME_GUEST;
1007 break;
1008#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001009 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001010 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1011 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1012 /* KVM PV hypercalls */
1013 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1014 r = RESUME_GUEST;
1015 } else {
1016 /* Guest syscalls */
1017 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1018 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001019 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001020 r = RESUME_GUEST;
1021 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001022#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001023
1024 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001025 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001026 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001027 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001028 gfn_t gfn;
1029
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001030#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001031 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1032 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1033 kvmppc_map_magic(vcpu);
1034 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1035 r = RESUME_GUEST;
1036
1037 break;
1038 }
1039#endif
1040
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001041 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001042 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001043 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001044 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001045 kvmppc_core_queue_dtlb_miss(vcpu,
1046 vcpu->arch.fault_dear,
1047 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001048 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001049 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001050 r = RESUME_GUEST;
1051 break;
1052 }
1053
Scott Woodf1e89022013-06-06 19:16:31 -05001054 idx = srcu_read_lock(&vcpu->kvm->srcu);
1055
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001056 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001057 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001058
1059 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1060 /* The guest TLB had a mapping, but the shadow TLB
1061 * didn't, and it is RAM. This could be because:
1062 * a) the entry is mapping the host kernel, or
1063 * b) the guest used a large mapping which we're faking
1064 * Either way, we need to satisfy the fault without
1065 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001066 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001067 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001068 r = RESUME_GUEST;
1069 } else {
1070 /* Guest has mapped and accessed a page which is not
1071 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001072 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001073 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001074 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001075 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001076 }
1077
Scott Woodf1e89022013-06-06 19:16:31 -05001078 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001079 break;
1080 }
1081
1082 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001083 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001084 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001085 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001086 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001087
1088 r = RESUME_GUEST;
1089
1090 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001091 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001092 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001093 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001094 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001095 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001096 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001097 break;
1098 }
1099
Hollis Blanchard7b701592008-12-02 15:51:58 -06001100 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001101
Scott Woodf1e89022013-06-06 19:16:31 -05001102 idx = srcu_read_lock(&vcpu->kvm->srcu);
1103
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001104 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001105 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001106
1107 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1108 /* The guest TLB had a mapping, but the shadow TLB
1109 * didn't. This could be because:
1110 * a) the entry is mapping the host kernel, or
1111 * b) the guest used a large mapping which we're faking
1112 * Either way, we need to satisfy the fault without
1113 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001114 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001115 } else {
1116 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001117 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001118 }
1119
Scott Woodf1e89022013-06-06 19:16:31 -05001120 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001121 break;
1122 }
1123
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001124 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301125 r = kvmppc_handle_debug(run, vcpu);
1126 if (r == RESUME_HOST)
1127 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001128 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001129 break;
1130 }
1131
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001132 default:
1133 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1134 BUG();
1135 }
1136
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001137 /*
1138 * To avoid clobbering exit_reason, only check for signals if we
1139 * aren't already exiting to userspace for some other reason.
1140 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001141 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001142 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001143 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001144 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001145 else {
1146 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001147 kvmppc_fix_ee_before_entry();
Alexander Graf03660ba2012-02-28 12:00:41 +01001148 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001149 }
1150
1151 return r;
1152}
1153
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001154static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1155{
1156 u32 old_tsr = vcpu->arch.tsr;
1157
1158 vcpu->arch.tsr = new_tsr;
1159
1160 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1161 arm_next_watchdog(vcpu);
1162
1163 update_timer_ints(vcpu);
1164}
1165
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001166/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1167int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1168{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001169 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001170 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001171
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001172 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001173 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001174 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001175 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001176
Scott Woodd30f6e42011-12-20 15:34:43 +00001177#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301178 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001179 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001180 vcpu->arch.shared->msr = 0;
1181#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001182
Hollis Blanchard082decf2010-08-07 10:33:56 -07001183 /* Eye-catching numbers so we know if the guest takes an interrupt
1184 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001185 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001186 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1187 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001188
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001189 kvmppc_init_timing_stats(vcpu);
1190
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001191 r = kvmppc_core_vcpu_setup(vcpu);
1192 kvmppc_sanity_check(vcpu);
1193 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001194}
1195
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001196int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1197{
1198 /* setup watchdog timer once */
1199 spin_lock_init(&vcpu->arch.wdt_lock);
1200 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1201 (unsigned long)vcpu);
1202
1203 return 0;
1204}
1205
1206void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1207{
1208 del_timer_sync(&vcpu->arch.wdt_timer);
1209}
1210
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001211int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1212{
1213 int i;
1214
1215 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001216 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001217 regs->ctr = vcpu->arch.ctr;
1218 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001219 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001220 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301221 regs->srr0 = kvmppc_get_srr0(vcpu);
1222 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001223 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301224 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1225 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1226 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1227 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1228 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1229 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1230 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1231 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001232
1233 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001234 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001235
1236 return 0;
1237}
1238
1239int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1240{
1241 int i;
1242
1243 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001244 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001245 vcpu->arch.ctr = regs->ctr;
1246 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001247 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001248 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301249 kvmppc_set_srr0(vcpu, regs->srr0);
1250 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001251 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301252 kvmppc_set_sprg0(vcpu, regs->sprg0);
1253 kvmppc_set_sprg1(vcpu, regs->sprg1);
1254 kvmppc_set_sprg2(vcpu, regs->sprg2);
1255 kvmppc_set_sprg3(vcpu, regs->sprg3);
1256 kvmppc_set_sprg4(vcpu, regs->sprg4);
1257 kvmppc_set_sprg5(vcpu, regs->sprg5);
1258 kvmppc_set_sprg6(vcpu, regs->sprg6);
1259 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001260
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001261 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1262 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001263
1264 return 0;
1265}
1266
Scott Wood5ce941e2011-04-27 17:24:21 -05001267static void get_sregs_base(struct kvm_vcpu *vcpu,
1268 struct kvm_sregs *sregs)
1269{
1270 u64 tb = get_tb();
1271
1272 sregs->u.e.features |= KVM_SREGS_E_BASE;
1273
1274 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1275 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1276 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301277 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301278 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001279 sregs->u.e.tsr = vcpu->arch.tsr;
1280 sregs->u.e.tcr = vcpu->arch.tcr;
1281 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1282 sregs->u.e.tb = tb;
1283 sregs->u.e.vrsave = vcpu->arch.vrsave;
1284}
1285
1286static int set_sregs_base(struct kvm_vcpu *vcpu,
1287 struct kvm_sregs *sregs)
1288{
1289 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1290 return 0;
1291
1292 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1293 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1294 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301295 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301296 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001297 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001298 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001299
Scott Wooddfd4d472011-11-17 12:39:59 +00001300 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001301 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001302 kvmppc_emulate_dec(vcpu);
1303 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001304
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001305 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1306 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001307
1308 return 0;
1309}
1310
1311static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1312 struct kvm_sregs *sregs)
1313{
1314 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1315
Scott Wood841741f2011-09-02 17:39:37 -05001316 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001317 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1318 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1319 sregs->u.e.decar = vcpu->arch.decar;
1320 sregs->u.e.ivpr = vcpu->arch.ivpr;
1321}
1322
1323static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1324 struct kvm_sregs *sregs)
1325{
1326 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1327 return 0;
1328
Scott Wood841741f2011-09-02 17:39:37 -05001329 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001330 return -EINVAL;
1331
1332 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1333 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1334 vcpu->arch.decar = sregs->u.e.decar;
1335 vcpu->arch.ivpr = sregs->u.e.ivpr;
1336
1337 return 0;
1338}
1339
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301340int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001341{
1342 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1343
1344 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1345 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1346 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1347 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1348 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1349 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1350 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1351 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1352 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1353 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1354 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1355 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1356 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1357 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1358 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1359 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301360 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001361}
1362
1363int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1364{
1365 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1366 return 0;
1367
1368 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1369 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1370 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1371 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1372 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1373 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1374 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1375 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1376 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1377 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1378 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1379 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1380 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1381 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1382 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1383 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1384
1385 return 0;
1386}
1387
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001388int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1389 struct kvm_sregs *sregs)
1390{
Scott Wood5ce941e2011-04-27 17:24:21 -05001391 sregs->pvr = vcpu->arch.pvr;
1392
1393 get_sregs_base(vcpu, sregs);
1394 get_sregs_arch206(vcpu, sregs);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301395 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001396}
1397
1398int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1399 struct kvm_sregs *sregs)
1400{
Scott Wood5ce941e2011-04-27 17:24:21 -05001401 int ret;
1402
1403 if (vcpu->arch.pvr != sregs->pvr)
1404 return -EINVAL;
1405
1406 ret = set_sregs_base(vcpu, sregs);
1407 if (ret < 0)
1408 return ret;
1409
1410 ret = set_sregs_arch206(vcpu, sregs);
1411 if (ret < 0)
1412 return ret;
1413
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301414 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001415}
1416
Paul Mackerras31f34382011-12-12 12:26:50 +00001417int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1418{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001419 int r = 0;
1420 union kvmppc_one_reg val;
1421 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001422
1423 size = one_reg_size(reg->id);
1424 if (size > sizeof(val))
1425 return -EINVAL;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001426
1427 switch (reg->id) {
1428 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301429 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001430 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301431 case KVM_REG_PPC_IAC2:
1432 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1433 break;
1434#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1435 case KVM_REG_PPC_IAC3:
1436 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1437 break;
1438 case KVM_REG_PPC_IAC4:
1439 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1440 break;
1441#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001442 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301443 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1444 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001445 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301446 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001447 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001448 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301449 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001450 val = get_reg_val(reg->id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001451 break;
1452 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001453#if defined(CONFIG_64BIT)
1454 case KVM_REG_PPC_EPCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001455 val = get_reg_val(reg->id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001456 break;
1457#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001458 case KVM_REG_PPC_TCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001459 val = get_reg_val(reg->id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001460 break;
1461 case KVM_REG_PPC_TSR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001462 val = get_reg_val(reg->id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001463 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001464 case KVM_REG_PPC_DEBUG_INST:
Bharat Bhushanb12c7842013-07-04 12:27:45 +05301465 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001466 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001467 case KVM_REG_PPC_VRSAVE:
1468 val = get_reg_val(reg->id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001469 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001470 default:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301471 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001472 break;
1473 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001474
1475 if (r)
1476 return r;
1477
1478 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1479 r = -EFAULT;
1480
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001481 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001482}
1483
1484int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1485{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001486 int r = 0;
1487 union kvmppc_one_reg val;
1488 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001489
1490 size = one_reg_size(reg->id);
1491 if (size > sizeof(val))
1492 return -EINVAL;
1493
1494 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1495 return -EFAULT;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001496
1497 switch (reg->id) {
1498 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301499 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001500 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301501 case KVM_REG_PPC_IAC2:
1502 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1503 break;
1504#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1505 case KVM_REG_PPC_IAC3:
1506 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1507 break;
1508 case KVM_REG_PPC_IAC4:
1509 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1510 break;
1511#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001512 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301513 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1514 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001515 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301516 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001517 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001518 case KVM_REG_PPC_EPR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001519 u32 new_epr = set_reg_val(reg->id, val);
1520 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001521 break;
1522 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001523#if defined(CONFIG_64BIT)
1524 case KVM_REG_PPC_EPCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001525 u32 new_epcr = set_reg_val(reg->id, val);
1526 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001527 break;
1528 }
1529#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001530 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001531 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001532 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1533 break;
1534 }
1535 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001536 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001537 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1538 break;
1539 }
1540 case KVM_REG_PPC_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001541 u32 tsr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001542 kvmppc_set_tsr(vcpu, tsr);
1543 break;
1544 }
1545 case KVM_REG_PPC_TCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001546 u32 tcr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001547 kvmppc_set_tcr(vcpu, tcr);
1548 break;
1549 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001550 case KVM_REG_PPC_VRSAVE:
1551 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1552 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001553 default:
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301554 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001555 break;
1556 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001557
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001558 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001559}
1560
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001561int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1562{
1563 return -ENOTSUPP;
1564}
1565
1566int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1567{
1568 return -ENOTSUPP;
1569}
1570
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001571int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1572 struct kvm_translation *tr)
1573{
Avi Kivity98001d82010-05-13 11:05:49 +03001574 int r;
1575
Avi Kivity98001d82010-05-13 11:05:49 +03001576 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001577 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001578}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001579
Alexander Graf4e755752009-10-30 05:47:01 +00001580int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1581{
1582 return -ENOTSUPP;
1583}
1584
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301585void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001586 struct kvm_memory_slot *dont)
1587{
1588}
1589
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301590int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001591 unsigned long npages)
1592{
1593 return 0;
1594}
1595
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001596int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001597 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001598 struct kvm_userspace_memory_region *mem)
1599{
1600 return 0;
1601}
1602
1603void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001604 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001605 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001606{
1607}
1608
1609void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001610{
1611}
1612
Mihai Caraman38f98822012-10-11 06:13:27 +00001613void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1614{
1615#if defined(CONFIG_64BIT)
1616 vcpu->arch.epcr = new_epcr;
1617#ifdef CONFIG_KVM_BOOKE_HV
1618 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1619 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1620 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1621#endif
1622#endif
1623}
1624
Scott Wooddfd4d472011-11-17 12:39:59 +00001625void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1626{
1627 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001628 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001629 update_timer_ints(vcpu);
1630}
1631
1632void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1633{
1634 set_bits(tsr_bits, &vcpu->arch.tsr);
1635 smp_wmb();
1636 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1637 kvm_vcpu_kick(vcpu);
1638}
1639
1640void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1641{
1642 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001643
1644 /*
1645 * We may have stopped the watchdog due to
1646 * being stuck on final expiration.
1647 */
1648 if (tsr_bits & (TSR_ENW | TSR_WIS))
1649 arm_next_watchdog(vcpu);
1650
Scott Wooddfd4d472011-11-17 12:39:59 +00001651 update_timer_ints(vcpu);
1652}
1653
1654void kvmppc_decrementer_func(unsigned long data)
1655{
1656 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1657
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001658 if (vcpu->arch.tcr & TCR_ARE) {
1659 vcpu->arch.dec = vcpu->arch.decar;
1660 kvmppc_emulate_dec(vcpu);
1661 }
1662
Scott Wooddfd4d472011-11-17 12:39:59 +00001663 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1664}
1665
Bharat Bhushance11e482013-07-04 12:27:47 +05301666static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1667 uint64_t addr, int index)
1668{
1669 switch (index) {
1670 case 0:
1671 dbg_reg->dbcr0 |= DBCR0_IAC1;
1672 dbg_reg->iac1 = addr;
1673 break;
1674 case 1:
1675 dbg_reg->dbcr0 |= DBCR0_IAC2;
1676 dbg_reg->iac2 = addr;
1677 break;
1678#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1679 case 2:
1680 dbg_reg->dbcr0 |= DBCR0_IAC3;
1681 dbg_reg->iac3 = addr;
1682 break;
1683 case 3:
1684 dbg_reg->dbcr0 |= DBCR0_IAC4;
1685 dbg_reg->iac4 = addr;
1686 break;
1687#endif
1688 default:
1689 return -EINVAL;
1690 }
1691
1692 dbg_reg->dbcr0 |= DBCR0_IDM;
1693 return 0;
1694}
1695
1696static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1697 int type, int index)
1698{
1699 switch (index) {
1700 case 0:
1701 if (type & KVMPPC_DEBUG_WATCH_READ)
1702 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1703 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1704 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1705 dbg_reg->dac1 = addr;
1706 break;
1707 case 1:
1708 if (type & KVMPPC_DEBUG_WATCH_READ)
1709 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1710 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1711 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1712 dbg_reg->dac2 = addr;
1713 break;
1714 default:
1715 return -EINVAL;
1716 }
1717
1718 dbg_reg->dbcr0 |= DBCR0_IDM;
1719 return 0;
1720}
1721void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1722{
1723 /* XXX: Add similar MSR protection for BookE-PR */
1724#ifdef CONFIG_KVM_BOOKE_HV
1725 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1726 if (set) {
1727 if (prot_bitmap & MSR_UCLE)
1728 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1729 if (prot_bitmap & MSR_DE)
1730 vcpu->arch.shadow_msrp |= MSRP_DEP;
1731 if (prot_bitmap & MSR_PMM)
1732 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1733 } else {
1734 if (prot_bitmap & MSR_UCLE)
1735 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1736 if (prot_bitmap & MSR_DE)
1737 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1738 if (prot_bitmap & MSR_PMM)
1739 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1740 }
1741#endif
1742}
1743
1744int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1745 struct kvm_guest_debug *dbg)
1746{
1747 struct debug_reg *dbg_reg;
1748 int n, b = 0, w = 0;
1749
1750 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
1751 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1752 vcpu->guest_debug = 0;
1753 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1754 return 0;
1755 }
1756
1757 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1758 vcpu->guest_debug = dbg->control;
1759 vcpu->arch.shadow_dbg_reg.dbcr0 = 0;
1760 /* Set DBCR0_EDM in guest visible DBCR0 register. */
1761 vcpu->arch.dbg_reg.dbcr0 = DBCR0_EDM;
1762
1763 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
1764 vcpu->arch.shadow_dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
1765
1766 /* Code below handles only HW breakpoints */
1767 dbg_reg = &(vcpu->arch.shadow_dbg_reg);
1768
1769#ifdef CONFIG_KVM_BOOKE_HV
1770 /*
1771 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
1772 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
1773 */
1774 dbg_reg->dbcr1 = 0;
1775 dbg_reg->dbcr2 = 0;
1776#else
1777 /*
1778 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
1779 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
1780 * is set.
1781 */
1782 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
1783 DBCR1_IAC4US;
1784 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
1785#endif
1786
1787 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
1788 return 0;
1789
1790 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
1791 uint64_t addr = dbg->arch.bp[n].addr;
1792 uint32_t type = dbg->arch.bp[n].type;
1793
1794 if (type == KVMPPC_DEBUG_NONE)
1795 continue;
1796
1797 if (type & !(KVMPPC_DEBUG_WATCH_READ |
1798 KVMPPC_DEBUG_WATCH_WRITE |
1799 KVMPPC_DEBUG_BREAKPOINT))
1800 return -EINVAL;
1801
1802 if (type & KVMPPC_DEBUG_BREAKPOINT) {
1803 /* Setting H/W breakpoint */
1804 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
1805 return -EINVAL;
1806 } else {
1807 /* Setting H/W watchpoint */
1808 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
1809 type, w++))
1810 return -EINVAL;
1811 }
1812 }
1813
1814 return 0;
1815}
1816
Scott Wood94fa9d92011-12-20 15:34:22 +00001817void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1818{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001819 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00001820 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001821}
1822
1823void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1824{
Scott Woodd30f6e42011-12-20 15:34:43 +00001825 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001826 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05301827
1828 /* Clear pending debug event in DBSR */
1829 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00001830}
1831
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301832void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
1833{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301834 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301835}
1836
1837int kvmppc_core_init_vm(struct kvm *kvm)
1838{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301839 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301840}
1841
1842struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
1843{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301844 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301845}
1846
1847void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
1848{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301849 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301850}
1851
1852void kvmppc_core_destroy_vm(struct kvm *kvm)
1853{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301854 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301855}
1856
1857void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1858{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301859 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301860}
1861
1862void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
1863{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301864 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001865}
1866
1867int __init kvmppc_booke_init(void)
1868{
Scott Woodd30f6e42011-12-20 15:34:43 +00001869#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001870 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001871 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001872 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001873 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001874 int i;
1875
1876 /* We install our own exception handlers by hijacking IVPR. IVPR must
1877 * be 16-bit aligned, so we need a 64KB allocation. */
1878 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1879 VCPU_SIZE_ORDER);
1880 if (!kvmppc_booke_handlers)
1881 return -ENOMEM;
1882
1883 /* XXX make sure our handlers are smaller than Linux's */
1884
1885 /* Copy our interrupt handlers to match host IVORs. That way we don't
1886 * have to swap the IVORs on every guest/host transition. */
1887 ivor[0] = mfspr(SPRN_IVOR0);
1888 ivor[1] = mfspr(SPRN_IVOR1);
1889 ivor[2] = mfspr(SPRN_IVOR2);
1890 ivor[3] = mfspr(SPRN_IVOR3);
1891 ivor[4] = mfspr(SPRN_IVOR4);
1892 ivor[5] = mfspr(SPRN_IVOR5);
1893 ivor[6] = mfspr(SPRN_IVOR6);
1894 ivor[7] = mfspr(SPRN_IVOR7);
1895 ivor[8] = mfspr(SPRN_IVOR8);
1896 ivor[9] = mfspr(SPRN_IVOR9);
1897 ivor[10] = mfspr(SPRN_IVOR10);
1898 ivor[11] = mfspr(SPRN_IVOR11);
1899 ivor[12] = mfspr(SPRN_IVOR12);
1900 ivor[13] = mfspr(SPRN_IVOR13);
1901 ivor[14] = mfspr(SPRN_IVOR14);
1902 ivor[15] = mfspr(SPRN_IVOR15);
1903
1904 for (i = 0; i < 16; i++) {
1905 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001906 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001907
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001908 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001909 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001910 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001911 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001912
1913 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1914 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1915 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001916#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001917 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001918}
1919
Hollis Blancharddb93f572008-11-05 09:36:18 -06001920void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001921{
1922 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1923 kvm_exit();
1924}