blob: a972fb600a99f9aab04cd0fa23d37ffcd9e2825e [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Alexander Graf97c95052012-08-02 15:10:00 +020043#include "trace.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060045unsigned long kvmppc_booke_handlers;
46
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050047#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
48#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49
50struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050051 { "mmio", VCPU_STAT(mmio_exits) },
52 { "dcr", VCPU_STAT(dcr_exits) },
53 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
55 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
56 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
57 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
58 { "sysc", VCPU_STAT(syscall_exits) },
59 { "isi", VCPU_STAT(isi_exits) },
60 { "dsi", VCPU_STAT(dsi_exits) },
61 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
62 { "dec", VCPU_STAT(dec_exits) },
63 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050064 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000065 { "doorbell", VCPU_STAT(dbell_exits) },
66 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020067 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068 { NULL }
69};
70
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050071/* TODO: use vcpu_printf() */
72void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
73{
74 int i;
75
Alexander Graf666e7252010-07-29 14:47:43 +020076 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060077 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020078 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
79 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050080
81 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
82
83 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060084 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010085 kvmppc_get_gpr(vcpu, i),
86 kvmppc_get_gpr(vcpu, i+1),
87 kvmppc_get_gpr(vcpu, i+2),
88 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050089 }
90}
91
Scott Wood4cd35f62011-06-14 18:34:31 -050092#ifdef CONFIG_SPE
93void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
94{
95 preempt_disable();
96 enable_kernel_spe();
97 kvmppc_save_guest_spe(vcpu);
98 vcpu->arch.shadow_msr &= ~MSR_SPE;
99 preempt_enable();
100}
101
102static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
103{
104 preempt_disable();
105 enable_kernel_spe();
106 kvmppc_load_guest_spe(vcpu);
107 vcpu->arch.shadow_msr |= MSR_SPE;
108 preempt_enable();
109}
110
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113 if (vcpu->arch.shared->msr & MSR_SPE) {
114 if (!(vcpu->arch.shadow_msr & MSR_SPE))
115 kvmppc_vcpu_enable_spe(vcpu);
116 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
117 kvmppc_vcpu_disable_spe(vcpu);
118 }
119}
120#else
121static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
122{
123}
124#endif
125
Alexander Graf7a08c272012-08-16 13:10:16 +0200126static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
127{
128#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu->arch.shadow_msr &= ~MSR_FP;
132 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
133#endif
134}
135
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500136/*
137 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing.
139 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500140void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
141{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500142 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500143
Scott Woodd30f6e42011-12-20 15:34:43 +0000144#ifdef CONFIG_KVM_BOOKE_HV
145 new_msr |= MSR_GS;
146#endif
147
Scott Wood4cd35f62011-06-14 18:34:31 -0500148 vcpu->arch.shared->msr = new_msr;
149
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500150 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500151 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200152 kvmppc_vcpu_sync_fpu(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500153}
154
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600157{
Alexander Graf63460462012-08-08 00:44:52 +0200158 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600159 set_bit(priority, &vcpu->arch.pending_exceptions);
160}
161
Liu Yudaf5e272010-02-02 19:44:35 +0800162static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
163 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600164{
Liu Yudaf5e272010-02-02 19:44:35 +0800165 vcpu->arch.queued_dear = dear_flags;
166 vcpu->arch.queued_esr = esr_flags;
167 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
168}
169
170static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
171 ulong dear_flags, ulong esr_flags)
172{
173 vcpu->arch.queued_dear = dear_flags;
174 vcpu->arch.queued_esr = esr_flags;
175 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
176}
177
178static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
179 ulong esr_flags)
180{
181 vcpu->arch.queued_esr = esr_flags;
182 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
183}
184
Alexander Graf011da892013-01-31 14:17:38 +0100185static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
186 ulong esr_flags)
187{
188 vcpu->arch.queued_dear = dear_flags;
189 vcpu->arch.queued_esr = esr_flags;
190 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
191}
192
Liu Yudaf5e272010-02-02 19:44:35 +0800193void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
194{
195 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600196 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600197}
198
199void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
200{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600202}
203
204int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
205{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600206 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600207}
208
Alexander Graf7706664d2009-12-21 20:21:24 +0100209void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
210{
211 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
212}
213
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600214void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
215 struct kvm_interrupt *irq)
216{
Alexander Grafc5335f12010-08-30 14:03:24 +0200217 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
218
219 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
220 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
221
222 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600223}
224
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000225void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200226{
227 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200228 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200229}
230
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000231static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
232{
233 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
234}
235
236static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
237{
238 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
239}
240
Scott Woodd30f6e42011-12-20 15:34:43 +0000241static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
242{
243#ifdef CONFIG_KVM_BOOKE_HV
244 mtspr(SPRN_GSRR0, srr0);
245 mtspr(SPRN_GSRR1, srr1);
246#else
247 vcpu->arch.shared->srr0 = srr0;
248 vcpu->arch.shared->srr1 = srr1;
249#endif
250}
251
252static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
253{
254 vcpu->arch.csrr0 = srr0;
255 vcpu->arch.csrr1 = srr1;
256}
257
258static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
259{
260 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
261 vcpu->arch.dsrr0 = srr0;
262 vcpu->arch.dsrr1 = srr1;
263 } else {
264 set_guest_csrr(vcpu, srr0, srr1);
265 }
266}
267
268static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
269{
270 vcpu->arch.mcsrr0 = srr0;
271 vcpu->arch.mcsrr1 = srr1;
272}
273
274static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
275{
276#ifdef CONFIG_KVM_BOOKE_HV
277 return mfspr(SPRN_GDEAR);
278#else
279 return vcpu->arch.shared->dar;
280#endif
281}
282
283static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
284{
285#ifdef CONFIG_KVM_BOOKE_HV
286 mtspr(SPRN_GDEAR, dear);
287#else
288 vcpu->arch.shared->dar = dear;
289#endif
290}
291
292static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
293{
294#ifdef CONFIG_KVM_BOOKE_HV
295 return mfspr(SPRN_GESR);
296#else
297 return vcpu->arch.shared->esr;
298#endif
299}
300
301static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
302{
303#ifdef CONFIG_KVM_BOOKE_HV
304 mtspr(SPRN_GESR, esr);
305#else
306 vcpu->arch.shared->esr = esr;
307#endif
308}
309
Alexander Graf324b3e62013-01-04 18:28:51 +0100310static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
311{
312#ifdef CONFIG_KVM_BOOKE_HV
313 return mfspr(SPRN_GEPR);
314#else
315 return vcpu->arch.epr;
316#endif
317}
318
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600319/* Deliver the interrupt of the corresponding priority, if possible. */
320static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
321 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500322{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600323 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000324 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100325 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200326 ulong crit_raw = vcpu->arch.shared->critical;
327 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
328 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200329 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000330 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000331 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200332
333 /* Truncate crit indicators in 32 bit mode */
334 if (!(vcpu->arch.shared->msr & MSR_SF)) {
335 crit_raw &= 0xffffffff;
336 crit_r1 &= 0xffffffff;
337 }
338
339 /* Critical section when crit == r1 */
340 crit = (crit_raw == crit_r1);
341 /* ... and we're in supervisor mode */
342 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500343
Alexander Grafc5335f12010-08-30 14:03:24 +0200344 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
345 priority = BOOKE_IRQPRIO_EXTERNAL;
346 keep_irq = true;
347 }
348
Scott Wood5df554ad2013-04-12 14:08:46 +0000349 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100350 update_epr = true;
351
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600352 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600353 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800354 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100355 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800356 update_dear = true;
357 /* fall through */
358 case BOOKE_IRQPRIO_INST_STORAGE:
359 case BOOKE_IRQPRIO_PROGRAM:
360 update_esr = true;
361 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600362 case BOOKE_IRQPRIO_ITLB_MISS:
363 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600364 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600365 case BOOKE_IRQPRIO_SPE_UNAVAIL:
366 case BOOKE_IRQPRIO_SPE_FP_DATA:
367 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600368 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600369 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000370 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000371 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500372 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000373 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600374 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000375 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200376 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000377 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000378 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000379 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500380 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600381 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200382 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000383 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000384 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500385 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600386 case BOOKE_IRQPRIO_DECREMENTER:
387 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000388 keep_irq = true;
389 /* fall through */
390 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000391 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200392 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200393 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000394 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000395 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600397 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200398 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000399 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000400 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000401 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500402 break;
403 }
404
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600405 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000406 switch (int_class) {
407 case INT_CLASS_NONCRIT:
408 set_guest_srr(vcpu, vcpu->arch.pc,
409 vcpu->arch.shared->msr);
410 break;
411 case INT_CLASS_CRIT:
412 set_guest_csrr(vcpu, vcpu->arch.pc,
413 vcpu->arch.shared->msr);
414 break;
415 case INT_CLASS_DBG:
416 set_guest_dsrr(vcpu, vcpu->arch.pc,
417 vcpu->arch.shared->msr);
418 break;
419 case INT_CLASS_MC:
420 set_guest_mcsrr(vcpu, vcpu->arch.pc,
421 vcpu->arch.shared->msr);
422 break;
423 }
424
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600425 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800426 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000427 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800428 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000429 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554ad2013-04-12 14:08:46 +0000430 if (update_epr == true) {
431 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
432 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000433 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
434 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
435 kvmppc_mpic_set_epr(vcpu);
436 }
Scott Wood5df554ad2013-04-12 14:08:46 +0000437 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000438
439 new_msr &= msr_mask;
440#if defined(CONFIG_64BIT)
441 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
442 new_msr |= MSR_CM;
443#endif
444 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600445
Alexander Grafc5335f12010-08-30 14:03:24 +0200446 if (!keep_irq)
447 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600448 }
449
Scott Woodd30f6e42011-12-20 15:34:43 +0000450#ifdef CONFIG_KVM_BOOKE_HV
451 /*
452 * If an interrupt is pending but masked, raise a guest doorbell
453 * so that we are notified when the guest enables the relevant
454 * MSR bit.
455 */
456 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
457 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
458 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
459 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
460 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
461 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
462#endif
463
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600464 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500465}
466
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000467/*
468 * Return the number of jiffies until the next timeout. If the timeout is
469 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
470 * because the larger value can break the timer APIs.
471 */
472static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
473{
474 u64 tb, wdt_tb, wdt_ticks = 0;
475 u64 nr_jiffies = 0;
476 u32 period = TCR_GET_WP(vcpu->arch.tcr);
477
478 wdt_tb = 1ULL << (63 - period);
479 tb = get_tb();
480 /*
481 * The watchdog timeout will hapeen when TB bit corresponding
482 * to watchdog will toggle from 0 to 1.
483 */
484 if (tb & wdt_tb)
485 wdt_ticks = wdt_tb;
486
487 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
488
489 /* Convert timebase ticks to jiffies */
490 nr_jiffies = wdt_ticks;
491
492 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
493 nr_jiffies++;
494
495 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
496}
497
498static void arm_next_watchdog(struct kvm_vcpu *vcpu)
499{
500 unsigned long nr_jiffies;
501 unsigned long flags;
502
503 /*
504 * If TSR_ENW and TSR_WIS are not set then no need to exit to
505 * userspace, so clear the KVM_REQ_WATCHDOG request.
506 */
507 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
508 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
509
510 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
511 nr_jiffies = watchdog_next_timeout(vcpu);
512 /*
513 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
514 * then do not run the watchdog timer as this can break timer APIs.
515 */
516 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
517 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
518 else
519 del_timer(&vcpu->arch.wdt_timer);
520 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
521}
522
523void kvmppc_watchdog_func(unsigned long data)
524{
525 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
526 u32 tsr, new_tsr;
527 int final;
528
529 do {
530 new_tsr = tsr = vcpu->arch.tsr;
531 final = 0;
532
533 /* Time out event */
534 if (tsr & TSR_ENW) {
535 if (tsr & TSR_WIS)
536 final = 1;
537 else
538 new_tsr = tsr | TSR_WIS;
539 } else {
540 new_tsr = tsr | TSR_ENW;
541 }
542 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
543
544 if (new_tsr & TSR_WIS) {
545 smp_wmb();
546 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
547 kvm_vcpu_kick(vcpu);
548 }
549
550 /*
551 * If this is final watchdog expiry and some action is required
552 * then exit to userspace.
553 */
554 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
555 vcpu->arch.watchdog_enabled) {
556 smp_wmb();
557 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
558 kvm_vcpu_kick(vcpu);
559 }
560
561 /*
562 * Stop running the watchdog timer after final expiration to
563 * prevent the host from being flooded with timers if the
564 * guest sets a short period.
565 * Timers will resume when TSR/TCR is updated next time.
566 */
567 if (!final)
568 arm_next_watchdog(vcpu);
569}
570
Scott Wooddfd4d472011-11-17 12:39:59 +0000571static void update_timer_ints(struct kvm_vcpu *vcpu)
572{
573 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
574 kvmppc_core_queue_dec(vcpu);
575 else
576 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000577
578 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
579 kvmppc_core_queue_watchdog(vcpu);
580 else
581 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000582}
583
Scott Woodc59a6a32011-11-08 18:23:25 -0600584static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500585{
586 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500587 unsigned int priority;
588
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600589 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000590 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600591 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500592 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500593
594 priority = find_next_bit(pending,
595 BITS_PER_BYTE * sizeof(*pending),
596 priority + 1);
597 }
Alexander Graf90bba352010-07-29 14:47:51 +0200598
599 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600600 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500601}
602
Scott Woodc59a6a32011-11-08 18:23:25 -0600603/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000604int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600605{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000606 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600607 WARN_ON_ONCE(!irqs_disabled());
608
609 kvmppc_core_check_exceptions(vcpu);
610
Alexander Grafb8c649a2012-12-20 04:52:39 +0000611 if (vcpu->requests) {
612 /* Exception delivery raised request; start over */
613 return 1;
614 }
615
Scott Woodc59a6a32011-11-08 18:23:25 -0600616 if (vcpu->arch.shared->msr & MSR_WE) {
617 local_irq_enable();
618 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100619 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600620 local_irq_disable();
621
622 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000623 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600624 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000625
626 return r;
627}
628
Alexander Graf7c973a22012-08-13 12:50:35 +0200629int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200630{
Alexander Graf7c973a22012-08-13 12:50:35 +0200631 int r = 1; /* Indicate we want to get back into the guest */
632
Alexander Graf2d8185d2012-08-10 12:31:12 +0200633 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
634 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200635#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200636 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
637 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200638#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200639
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000640 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
641 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
642 r = 0;
643 }
644
Alexander Graf1c810632013-01-04 18:12:48 +0100645 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
646 vcpu->run->epr.epr = 0;
647 vcpu->arch.epr_needed = true;
648 vcpu->run->exit_reason = KVM_EXIT_EPR;
649 r = 0;
650 }
651
Alexander Graf7c973a22012-08-13 12:50:35 +0200652 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200653}
654
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000655int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
656{
Alexander Graf7ee78852012-08-13 12:44:41 +0200657 int ret, s;
Scott Wood8fae8452011-12-20 15:34:45 +0000658#ifdef CONFIG_PPC_FPU
659 unsigned int fpscr;
660 int fpexc_mode;
661 u64 fpr[32];
662#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000663
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200664 if (!vcpu->arch.sane) {
665 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
666 return -EINVAL;
667 }
668
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000669 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200670 s = kvmppc_prepare_to_enter(vcpu);
671 if (s <= 0) {
Alexander Graf24afa37b2012-08-12 12:42:30 +0200672 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200673 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600674 goto out;
675 }
676
Scott Wood8fae8452011-12-20 15:34:45 +0000677#ifdef CONFIG_PPC_FPU
678 /* Save userspace FPU state in stack */
679 enable_kernel_fp();
680 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
681 fpscr = current->thread.fpscr.val;
682 fpexc_mode = current->thread.fpexc_mode;
683
684 /* Restore guest FPU state to thread */
685 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
686 current->thread.fpscr.val = vcpu->arch.fpscr;
687
688 /*
689 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
690 * as always using the FPU. Kernel usage of FP (via
691 * enable_kernel_fp()) in this thread must not occur while
692 * vcpu->fpu_active is set.
693 */
694 vcpu->fpu_active = 1;
695
696 kvmppc_load_guest_fp(vcpu);
697#endif
698
Scott Wood5f1c2482013-07-10 17:47:39 -0500699 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500700
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000701 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000702
Alexander Graf24afa37b2012-08-12 12:42:30 +0200703 /* No need for kvm_guest_exit. It's done in handle_exit.
704 We also get here with interrupts enabled. */
705
Scott Wood8fae8452011-12-20 15:34:45 +0000706#ifdef CONFIG_PPC_FPU
707 kvmppc_save_guest_fp(vcpu);
708
709 vcpu->fpu_active = 0;
710
711 /* Save guest FPU state from thread */
712 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
713 vcpu->arch.fpscr = current->thread.fpscr.val;
714
715 /* Restore userspace FPU state from stack */
716 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
717 current->thread.fpscr.val = fpscr;
718 current->thread.fpexc_mode = fpexc_mode;
719#endif
720
Scott Wood1d1ef222011-11-08 16:11:59 -0600721out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200722 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000723 return ret;
724}
725
Scott Woodd30f6e42011-12-20 15:34:43 +0000726static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
727{
728 enum emulation_result er;
729
730 er = kvmppc_emulate_instruction(run, vcpu);
731 switch (er) {
732 case EMULATE_DONE:
733 /* don't overwrite subtypes, just account kvm_stats */
734 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
735 /* Future optimization: only reload non-volatiles if
736 * they were actually modified by emulation. */
737 return RESUME_GUEST_NV;
738
739 case EMULATE_DO_DCR:
740 run->exit_reason = KVM_EXIT_DCR;
741 return RESUME_HOST;
742
743 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000744 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
745 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
746 /* For debugging, encode the failing instruction and
747 * report it to userspace. */
748 run->hw.hardware_exit_reason = ~0ULL << 32;
749 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000750 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000751 return RESUME_HOST;
752
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000753 case EMULATE_EXIT_USER:
754 return RESUME_HOST;
755
Scott Woodd30f6e42011-12-20 15:34:43 +0000756 default:
757 BUG();
758 }
759}
760
Alexander Graf4e642cc2012-02-20 23:57:26 +0100761static void kvmppc_fill_pt_regs(struct pt_regs *regs)
762{
763 ulong r1, ip, msr, lr;
764
765 asm("mr %0, 1" : "=r"(r1));
766 asm("mflr %0" : "=r"(lr));
767 asm("mfmsr %0" : "=r"(msr));
768 asm("bl 1f; 1: mflr %0" : "=r"(ip));
769
770 memset(regs, 0, sizeof(*regs));
771 regs->gpr[1] = r1;
772 regs->nip = ip;
773 regs->msr = msr;
774 regs->link = lr;
775}
776
Bharat Bhushan6328e592012-06-20 05:56:53 +0000777/*
778 * For interrupts needed to be handled by host interrupt handlers,
779 * corresponding host handler are called from here in similar way
780 * (but not exact) as they are called from low level handler
781 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
782 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100783static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
784 unsigned int exit_nr)
785{
786 struct pt_regs regs;
787
788 switch (exit_nr) {
789 case BOOKE_INTERRUPT_EXTERNAL:
790 kvmppc_fill_pt_regs(&regs);
791 do_IRQ(&regs);
792 break;
793 case BOOKE_INTERRUPT_DECREMENTER:
794 kvmppc_fill_pt_regs(&regs);
795 timer_interrupt(&regs);
796 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800797#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100798 case BOOKE_INTERRUPT_DOORBELL:
799 kvmppc_fill_pt_regs(&regs);
800 doorbell_exception(&regs);
801 break;
802#endif
803 case BOOKE_INTERRUPT_MACHINE_CHECK:
804 /* FIXME */
805 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100806 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
807 kvmppc_fill_pt_regs(&regs);
808 performance_monitor_exception(&regs);
809 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000810 case BOOKE_INTERRUPT_WATCHDOG:
811 kvmppc_fill_pt_regs(&regs);
812#ifdef CONFIG_BOOKE_WDT
813 WatchdogException(&regs);
814#else
815 unknown_exception(&regs);
816#endif
817 break;
818 case BOOKE_INTERRUPT_CRITICAL:
819 unknown_exception(&regs);
820 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100821 }
822}
823
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500824/**
825 * kvmppc_handle_exit
826 *
827 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
828 */
829int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
830 unsigned int exit_nr)
831{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500832 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200833 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500834 int idx;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500835
Scott Wood7c11c0c2013-06-06 19:16:32 -0500836#ifdef CONFIG_PPC64
837 WARN_ON(local_paca->irq_happened != 0);
838#endif
839
840 /*
841 * We enter with interrupts disabled in hardware, but
842 * we need to call hard_irq_disable anyway to ensure that
843 * the software state is kept in sync.
844 */
845 hard_irq_disable();
846
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600847 /* update before a new last_exit_type is rewritten */
848 kvmppc_update_timing_stats(vcpu);
849
Alexander Graf4e642cc2012-02-20 23:57:26 +0100850 /* restart interrupts if they were meant for the host */
851 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000852
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500853 local_irq_enable();
854
Alexander Graf97c95052012-08-02 15:10:00 +0200855 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200856 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200857
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500858 run->exit_reason = KVM_EXIT_UNKNOWN;
859 run->ready_for_interrupt_injection = 1;
860
861 switch (exit_nr) {
862 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100863 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
864 kvmppc_dump_vcpu(vcpu);
865 /* For debugging, send invalid exit reason to user space */
866 run->hw.hardware_exit_reason = ~1ULL << 32;
867 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
868 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500869 break;
870
871 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600872 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600873 r = RESUME_GUEST;
874 break;
875
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500876 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600877 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500878 r = RESUME_GUEST;
879 break;
880
Bharat Bhushan6328e592012-06-20 05:56:53 +0000881 case BOOKE_INTERRUPT_WATCHDOG:
882 r = RESUME_GUEST;
883 break;
884
Scott Woodd30f6e42011-12-20 15:34:43 +0000885 case BOOKE_INTERRUPT_DOORBELL:
886 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000887 r = RESUME_GUEST;
888 break;
889
890 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
891 kvmppc_account_exit(vcpu, GDBELL_EXITS);
892
893 /*
894 * We are here because there is a pending guest interrupt
895 * which could not be delivered as MSR_CE or MSR_ME was not
896 * set. Once we break from here we will retry delivery.
897 */
898 r = RESUME_GUEST;
899 break;
900
901 case BOOKE_INTERRUPT_GUEST_DBELL:
902 kvmppc_account_exit(vcpu, GDBELL_EXITS);
903
904 /*
905 * We are here because there is a pending guest interrupt
906 * which could not be delivered as MSR_EE was not set. Once
907 * we break from here we will retry delivery.
908 */
909 r = RESUME_GUEST;
910 break;
911
Alexander Graf95f2e922012-02-20 22:45:12 +0100912 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
913 r = RESUME_GUEST;
914 break;
915
Scott Woodd30f6e42011-12-20 15:34:43 +0000916 case BOOKE_INTERRUPT_HV_PRIV:
917 r = emulation_exit(run, vcpu);
918 break;
919
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500920 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000921 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf0268597c2012-02-20 12:33:22 +0100922 /*
923 * Program traps generated by user-level software must
924 * be handled by the guest kernel.
925 *
926 * In GS mode, hypervisor privileged instructions trap
927 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
928 * actual program interrupts, handled by the guest.
929 */
Liu Yudaf5e272010-02-02 19:44:35 +0800930 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500931 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600932 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500933 break;
934 }
935
Scott Woodd30f6e42011-12-20 15:34:43 +0000936 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500937 break;
938
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200939 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600940 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600941 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200942 r = RESUME_GUEST;
943 break;
944
Scott Wood4cd35f62011-06-14 18:34:31 -0500945#ifdef CONFIG_SPE
946 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
947 if (vcpu->arch.shared->msr & MSR_SPE)
948 kvmppc_vcpu_enable_spe(vcpu);
949 else
950 kvmppc_booke_queue_irqprio(vcpu,
951 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600952 r = RESUME_GUEST;
953 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500954 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600955
956 case BOOKE_INTERRUPT_SPE_FP_DATA:
957 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
958 r = RESUME_GUEST;
959 break;
960
961 case BOOKE_INTERRUPT_SPE_FP_ROUND:
962 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
963 r = RESUME_GUEST;
964 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500965#else
966 case BOOKE_INTERRUPT_SPE_UNAVAIL:
967 /*
968 * Guest wants SPE, but host kernel doesn't support it. Send
969 * an "unimplemented operation" program check to the guest.
970 */
971 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
972 r = RESUME_GUEST;
973 break;
974
975 /*
976 * These really should never happen without CONFIG_SPE,
977 * as we should never enable the real MSR[SPE] in the guest.
978 */
979 case BOOKE_INTERRUPT_SPE_FP_DATA:
980 case BOOKE_INTERRUPT_SPE_FP_ROUND:
981 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
982 __func__, exit_nr, vcpu->arch.pc);
983 run->hw.hardware_exit_reason = exit_nr;
984 r = RESUME_HOST;
985 break;
986#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600987
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500988 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800989 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
990 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600991 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500992 r = RESUME_GUEST;
993 break;
994
995 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800996 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600997 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500998 r = RESUME_GUEST;
999 break;
1000
Alexander Graf011da892013-01-31 14:17:38 +01001001 case BOOKE_INTERRUPT_ALIGNMENT:
1002 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1003 vcpu->arch.fault_esr);
1004 r = RESUME_GUEST;
1005 break;
1006
Scott Woodd30f6e42011-12-20 15:34:43 +00001007#ifdef CONFIG_KVM_BOOKE_HV
1008 case BOOKE_INTERRUPT_HV_SYSCALL:
1009 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1010 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1011 } else {
1012 /*
1013 * hcall from guest userspace -- send privileged
1014 * instruction program check.
1015 */
1016 kvmppc_core_queue_program(vcpu, ESR_PPR);
1017 }
1018
1019 r = RESUME_GUEST;
1020 break;
1021#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001022 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001023 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1024 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1025 /* KVM PV hypercalls */
1026 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1027 r = RESUME_GUEST;
1028 } else {
1029 /* Guest syscalls */
1030 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1031 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001032 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001033 r = RESUME_GUEST;
1034 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001035#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001036
1037 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001038 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001039 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001040 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001041 gfn_t gfn;
1042
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001043#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001044 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1045 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1046 kvmppc_map_magic(vcpu);
1047 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1048 r = RESUME_GUEST;
1049
1050 break;
1051 }
1052#endif
1053
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001054 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001055 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001056 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001057 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001058 kvmppc_core_queue_dtlb_miss(vcpu,
1059 vcpu->arch.fault_dear,
1060 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001061 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001062 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001063 r = RESUME_GUEST;
1064 break;
1065 }
1066
Scott Woodf1e89022013-06-06 19:16:31 -05001067 idx = srcu_read_lock(&vcpu->kvm->srcu);
1068
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001069 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001070 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001071
1072 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1073 /* The guest TLB had a mapping, but the shadow TLB
1074 * didn't, and it is RAM. This could be because:
1075 * a) the entry is mapping the host kernel, or
1076 * b) the guest used a large mapping which we're faking
1077 * Either way, we need to satisfy the fault without
1078 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001079 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001080 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001081 r = RESUME_GUEST;
1082 } else {
1083 /* Guest has mapped and accessed a page which is not
1084 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001085 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001086 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001087 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001088 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001089 }
1090
Scott Woodf1e89022013-06-06 19:16:31 -05001091 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001092 break;
1093 }
1094
1095 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001096 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001097 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001098 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001099 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001100
1101 r = RESUME_GUEST;
1102
1103 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001104 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001105 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001106 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001107 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001108 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001109 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001110 break;
1111 }
1112
Hollis Blanchard7b701592008-12-02 15:51:58 -06001113 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001114
Scott Woodf1e89022013-06-06 19:16:31 -05001115 idx = srcu_read_lock(&vcpu->kvm->srcu);
1116
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001117 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001118 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001119
1120 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1121 /* The guest TLB had a mapping, but the shadow TLB
1122 * didn't. This could be because:
1123 * a) the entry is mapping the host kernel, or
1124 * b) the guest used a large mapping which we're faking
1125 * Either way, we need to satisfy the fault without
1126 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001127 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001128 } else {
1129 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001130 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001131 }
1132
Scott Woodf1e89022013-06-06 19:16:31 -05001133 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001134 break;
1135 }
1136
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001137 case BOOKE_INTERRUPT_DEBUG: {
1138 u32 dbsr;
1139
1140 vcpu->arch.pc = mfspr(SPRN_CSRR0);
1141
1142 /* clear IAC events in DBSR register */
1143 dbsr = mfspr(SPRN_DBSR);
1144 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1145 mtspr(SPRN_DBSR, dbsr);
1146
1147 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001148 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001149 r = RESUME_HOST;
1150 break;
1151 }
1152
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001153 default:
1154 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1155 BUG();
1156 }
1157
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001158 /*
1159 * To avoid clobbering exit_reason, only check for signals if we
1160 * aren't already exiting to userspace for some other reason.
1161 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001162 if (!(r & RESUME_HOST)) {
1163 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001164 s = kvmppc_prepare_to_enter(vcpu);
1165 if (s <= 0) {
Alexander Graf24afa37b2012-08-12 12:42:30 +02001166 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001167 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Alexander Graf24afa37b2012-08-12 12:42:30 +02001168 } else {
Scott Wood5f1c2482013-07-10 17:47:39 -05001169 kvmppc_fix_ee_before_entry();
Alexander Graf03660ba2012-02-28 12:00:41 +01001170 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001171 }
1172
1173 return r;
1174}
1175
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001176static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1177{
1178 u32 old_tsr = vcpu->arch.tsr;
1179
1180 vcpu->arch.tsr = new_tsr;
1181
1182 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1183 arm_next_watchdog(vcpu);
1184
1185 update_timer_ints(vcpu);
1186}
1187
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001188/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1189int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1190{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001191 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001192 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001193
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001194 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001195 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001196 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001197 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001198
Scott Woodd30f6e42011-12-20 15:34:43 +00001199#ifndef CONFIG_KVM_BOOKE_HV
1200 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001201 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001202 vcpu->arch.shared->msr = 0;
1203#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001204
Hollis Blanchard082decf2010-08-07 10:33:56 -07001205 /* Eye-catching numbers so we know if the guest takes an interrupt
1206 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001207 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001208 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1209 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001210
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001211 kvmppc_init_timing_stats(vcpu);
1212
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001213 r = kvmppc_core_vcpu_setup(vcpu);
1214 kvmppc_sanity_check(vcpu);
1215 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001216}
1217
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001218int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1219{
1220 /* setup watchdog timer once */
1221 spin_lock_init(&vcpu->arch.wdt_lock);
1222 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1223 (unsigned long)vcpu);
1224
1225 return 0;
1226}
1227
1228void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1229{
1230 del_timer_sync(&vcpu->arch.wdt_timer);
1231}
1232
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001233int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1234{
1235 int i;
1236
1237 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001238 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001239 regs->ctr = vcpu->arch.ctr;
1240 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001241 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001242 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001243 regs->srr0 = vcpu->arch.shared->srr0;
1244 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001245 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001246 regs->sprg0 = vcpu->arch.shared->sprg0;
1247 regs->sprg1 = vcpu->arch.shared->sprg1;
1248 regs->sprg2 = vcpu->arch.shared->sprg2;
1249 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001250 regs->sprg4 = vcpu->arch.shared->sprg4;
1251 regs->sprg5 = vcpu->arch.shared->sprg5;
1252 regs->sprg6 = vcpu->arch.shared->sprg6;
1253 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001254
1255 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001256 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001257
1258 return 0;
1259}
1260
1261int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1262{
1263 int i;
1264
1265 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001266 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001267 vcpu->arch.ctr = regs->ctr;
1268 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001269 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001270 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001271 vcpu->arch.shared->srr0 = regs->srr0;
1272 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001273 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001274 vcpu->arch.shared->sprg0 = regs->sprg0;
1275 vcpu->arch.shared->sprg1 = regs->sprg1;
1276 vcpu->arch.shared->sprg2 = regs->sprg2;
1277 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001278 vcpu->arch.shared->sprg4 = regs->sprg4;
1279 vcpu->arch.shared->sprg5 = regs->sprg5;
1280 vcpu->arch.shared->sprg6 = regs->sprg6;
1281 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001282
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001283 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1284 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001285
1286 return 0;
1287}
1288
Scott Wood5ce941e2011-04-27 17:24:21 -05001289static void get_sregs_base(struct kvm_vcpu *vcpu,
1290 struct kvm_sregs *sregs)
1291{
1292 u64 tb = get_tb();
1293
1294 sregs->u.e.features |= KVM_SREGS_E_BASE;
1295
1296 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1297 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1298 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001299 sregs->u.e.esr = get_guest_esr(vcpu);
1300 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001301 sregs->u.e.tsr = vcpu->arch.tsr;
1302 sregs->u.e.tcr = vcpu->arch.tcr;
1303 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1304 sregs->u.e.tb = tb;
1305 sregs->u.e.vrsave = vcpu->arch.vrsave;
1306}
1307
1308static int set_sregs_base(struct kvm_vcpu *vcpu,
1309 struct kvm_sregs *sregs)
1310{
1311 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1312 return 0;
1313
1314 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1315 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1316 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001317 set_guest_esr(vcpu, sregs->u.e.esr);
1318 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001319 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001320 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001321
Scott Wooddfd4d472011-11-17 12:39:59 +00001322 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001323 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001324 kvmppc_emulate_dec(vcpu);
1325 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001326
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001327 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1328 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001329
1330 return 0;
1331}
1332
1333static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1334 struct kvm_sregs *sregs)
1335{
1336 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1337
Scott Wood841741f2011-09-02 17:39:37 -05001338 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001339 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1340 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1341 sregs->u.e.decar = vcpu->arch.decar;
1342 sregs->u.e.ivpr = vcpu->arch.ivpr;
1343}
1344
1345static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1346 struct kvm_sregs *sregs)
1347{
1348 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1349 return 0;
1350
Scott Wood841741f2011-09-02 17:39:37 -05001351 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001352 return -EINVAL;
1353
1354 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1355 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1356 vcpu->arch.decar = sregs->u.e.decar;
1357 vcpu->arch.ivpr = sregs->u.e.ivpr;
1358
1359 return 0;
1360}
1361
1362void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1363{
1364 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1365
1366 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1367 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1368 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1369 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1370 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1371 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1372 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1373 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1374 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1375 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1376 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1377 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1378 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1379 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1380 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1381 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1382}
1383
1384int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1385{
1386 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1387 return 0;
1388
1389 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1390 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1391 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1392 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1393 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1394 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1395 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1396 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1397 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1398 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1399 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1400 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1401 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1402 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1403 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1404 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1405
1406 return 0;
1407}
1408
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001409int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1410 struct kvm_sregs *sregs)
1411{
Scott Wood5ce941e2011-04-27 17:24:21 -05001412 sregs->pvr = vcpu->arch.pvr;
1413
1414 get_sregs_base(vcpu, sregs);
1415 get_sregs_arch206(vcpu, sregs);
1416 kvmppc_core_get_sregs(vcpu, sregs);
1417 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001418}
1419
1420int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1421 struct kvm_sregs *sregs)
1422{
Scott Wood5ce941e2011-04-27 17:24:21 -05001423 int ret;
1424
1425 if (vcpu->arch.pvr != sregs->pvr)
1426 return -EINVAL;
1427
1428 ret = set_sregs_base(vcpu, sregs);
1429 if (ret < 0)
1430 return ret;
1431
1432 ret = set_sregs_arch206(vcpu, sregs);
1433 if (ret < 0)
1434 return ret;
1435
1436 return kvmppc_core_set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001437}
1438
Paul Mackerras31f34382011-12-12 12:26:50 +00001439int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1440{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001441 int r = 0;
1442 union kvmppc_one_reg val;
1443 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001444
1445 size = one_reg_size(reg->id);
1446 if (size > sizeof(val))
1447 return -EINVAL;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001448
1449 switch (reg->id) {
1450 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301451 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001452 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301453 case KVM_REG_PPC_IAC2:
1454 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac2);
1455 break;
1456#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1457 case KVM_REG_PPC_IAC3:
1458 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac3);
1459 break;
1460 case KVM_REG_PPC_IAC4:
1461 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.iac4);
1462 break;
1463#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001464 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301465 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac1);
1466 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001467 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301468 val = get_reg_val(reg->id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001469 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001470 case KVM_REG_PPC_EPR: {
1471 u32 epr = get_guest_epr(vcpu);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001472 val = get_reg_val(reg->id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001473 break;
1474 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001475#if defined(CONFIG_64BIT)
1476 case KVM_REG_PPC_EPCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001477 val = get_reg_val(reg->id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001478 break;
1479#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001480 case KVM_REG_PPC_TCR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001481 val = get_reg_val(reg->id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001482 break;
1483 case KVM_REG_PPC_TSR:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001484 val = get_reg_val(reg->id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001485 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001486 case KVM_REG_PPC_DEBUG_INST:
Bharat Bhushanb12c7842013-07-04 12:27:45 +05301487 val = get_reg_val(reg->id, KVMPPC_INST_EHPRIV_DEBUG);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001488 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001489 case KVM_REG_PPC_VRSAVE:
1490 val = get_reg_val(reg->id, vcpu->arch.vrsave);
1491 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001492 default:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001493 r = kvmppc_get_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001494 break;
1495 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001496
1497 if (r)
1498 return r;
1499
1500 if (copy_to_user((char __user *)(unsigned long)reg->addr, &val, size))
1501 r = -EFAULT;
1502
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001503 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001504}
1505
1506int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1507{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001508 int r = 0;
1509 union kvmppc_one_reg val;
1510 int size;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001511
1512 size = one_reg_size(reg->id);
1513 if (size > sizeof(val))
1514 return -EINVAL;
1515
1516 if (copy_from_user(&val, (char __user *)(unsigned long)reg->addr, size))
1517 return -EFAULT;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001518
1519 switch (reg->id) {
1520 case KVM_REG_PPC_IAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301521 vcpu->arch.dbg_reg.iac1 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001522 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301523 case KVM_REG_PPC_IAC2:
1524 vcpu->arch.dbg_reg.iac2 = set_reg_val(reg->id, val);
1525 break;
1526#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1527 case KVM_REG_PPC_IAC3:
1528 vcpu->arch.dbg_reg.iac3 = set_reg_val(reg->id, val);
1529 break;
1530 case KVM_REG_PPC_IAC4:
1531 vcpu->arch.dbg_reg.iac4 = set_reg_val(reg->id, val);
1532 break;
1533#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001534 case KVM_REG_PPC_DAC1:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301535 vcpu->arch.dbg_reg.dac1 = set_reg_val(reg->id, val);
1536 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001537 case KVM_REG_PPC_DAC2:
Bharat Bhushan547465e2013-07-04 12:27:46 +05301538 vcpu->arch.dbg_reg.dac2 = set_reg_val(reg->id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001539 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001540 case KVM_REG_PPC_EPR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001541 u32 new_epr = set_reg_val(reg->id, val);
1542 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001543 break;
1544 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001545#if defined(CONFIG_64BIT)
1546 case KVM_REG_PPC_EPCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001547 u32 new_epcr = set_reg_val(reg->id, val);
1548 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001549 break;
1550 }
1551#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001552 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001553 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001554 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1555 break;
1556 }
1557 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001558 u32 tsr_bits = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001559 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1560 break;
1561 }
1562 case KVM_REG_PPC_TSR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001563 u32 tsr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001564 kvmppc_set_tsr(vcpu, tsr);
1565 break;
1566 }
1567 case KVM_REG_PPC_TCR: {
Mihai Caraman35b299e2013-04-11 00:03:07 +00001568 u32 tcr = set_reg_val(reg->id, val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001569 kvmppc_set_tcr(vcpu, tcr);
1570 break;
1571 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001572 case KVM_REG_PPC_VRSAVE:
1573 vcpu->arch.vrsave = set_reg_val(reg->id, val);
1574 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001575 default:
Mihai Caraman35b299e2013-04-11 00:03:07 +00001576 r = kvmppc_set_one_reg(vcpu, reg->id, &val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001577 break;
1578 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001579
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001580 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001581}
1582
Bharat Bhushan092d62e2013-04-08 00:32:12 +00001583int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1584 struct kvm_guest_debug *dbg)
1585{
1586 return -EINVAL;
1587}
1588
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001589int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1590{
1591 return -ENOTSUPP;
1592}
1593
1594int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1595{
1596 return -ENOTSUPP;
1597}
1598
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001599int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1600 struct kvm_translation *tr)
1601{
Avi Kivity98001d82010-05-13 11:05:49 +03001602 int r;
1603
Avi Kivity98001d82010-05-13 11:05:49 +03001604 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001605 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001606}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001607
Alexander Graf4e755752009-10-30 05:47:01 +00001608int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1609{
1610 return -ENOTSUPP;
1611}
1612
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001613void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1614 struct kvm_memory_slot *dont)
1615{
1616}
1617
1618int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1619 unsigned long npages)
1620{
1621 return 0;
1622}
1623
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001624int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001625 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001626 struct kvm_userspace_memory_region *mem)
1627{
1628 return 0;
1629}
1630
1631void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001632 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001633 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001634{
1635}
1636
1637void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001638{
1639}
1640
Mihai Caraman38f98822012-10-11 06:13:27 +00001641void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1642{
1643#if defined(CONFIG_64BIT)
1644 vcpu->arch.epcr = new_epcr;
1645#ifdef CONFIG_KVM_BOOKE_HV
1646 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1647 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1648 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1649#endif
1650#endif
1651}
1652
Scott Wooddfd4d472011-11-17 12:39:59 +00001653void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1654{
1655 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001656 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001657 update_timer_ints(vcpu);
1658}
1659
1660void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1661{
1662 set_bits(tsr_bits, &vcpu->arch.tsr);
1663 smp_wmb();
1664 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1665 kvm_vcpu_kick(vcpu);
1666}
1667
1668void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1669{
1670 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001671
1672 /*
1673 * We may have stopped the watchdog due to
1674 * being stuck on final expiration.
1675 */
1676 if (tsr_bits & (TSR_ENW | TSR_WIS))
1677 arm_next_watchdog(vcpu);
1678
Scott Wooddfd4d472011-11-17 12:39:59 +00001679 update_timer_ints(vcpu);
1680}
1681
1682void kvmppc_decrementer_func(unsigned long data)
1683{
1684 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1685
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001686 if (vcpu->arch.tcr & TCR_ARE) {
1687 vcpu->arch.dec = vcpu->arch.decar;
1688 kvmppc_emulate_dec(vcpu);
1689 }
1690
Scott Wooddfd4d472011-11-17 12:39:59 +00001691 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1692}
1693
Scott Wood94fa9d92011-12-20 15:34:22 +00001694void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1695{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001696 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00001697 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001698}
1699
1700void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1701{
Scott Woodd30f6e42011-12-20 15:34:43 +00001702 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001703 vcpu->cpu = -1;
Scott Wood94fa9d92011-12-20 15:34:22 +00001704}
1705
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001706int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001707{
Scott Woodd30f6e42011-12-20 15:34:43 +00001708#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001709 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001710 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001711 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001712 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001713 int i;
1714
1715 /* We install our own exception handlers by hijacking IVPR. IVPR must
1716 * be 16-bit aligned, so we need a 64KB allocation. */
1717 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1718 VCPU_SIZE_ORDER);
1719 if (!kvmppc_booke_handlers)
1720 return -ENOMEM;
1721
1722 /* XXX make sure our handlers are smaller than Linux's */
1723
1724 /* Copy our interrupt handlers to match host IVORs. That way we don't
1725 * have to swap the IVORs on every guest/host transition. */
1726 ivor[0] = mfspr(SPRN_IVOR0);
1727 ivor[1] = mfspr(SPRN_IVOR1);
1728 ivor[2] = mfspr(SPRN_IVOR2);
1729 ivor[3] = mfspr(SPRN_IVOR3);
1730 ivor[4] = mfspr(SPRN_IVOR4);
1731 ivor[5] = mfspr(SPRN_IVOR5);
1732 ivor[6] = mfspr(SPRN_IVOR6);
1733 ivor[7] = mfspr(SPRN_IVOR7);
1734 ivor[8] = mfspr(SPRN_IVOR8);
1735 ivor[9] = mfspr(SPRN_IVOR9);
1736 ivor[10] = mfspr(SPRN_IVOR10);
1737 ivor[11] = mfspr(SPRN_IVOR11);
1738 ivor[12] = mfspr(SPRN_IVOR12);
1739 ivor[13] = mfspr(SPRN_IVOR13);
1740 ivor[14] = mfspr(SPRN_IVOR14);
1741 ivor[15] = mfspr(SPRN_IVOR15);
1742
1743 for (i = 0; i < 16; i++) {
1744 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001745 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001746
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001747 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001748 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001749 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001750 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001751
1752 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1753 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1754 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001755#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001756 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001757}
1758
Hollis Blancharddb93f572008-11-05 09:36:18 -06001759void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001760{
1761 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1762 kvm_exit();
1763}