blob: 97ae158ca4c8cccecd1b79a33efd0339eb057be2 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Alexander Graf97c95052012-08-02 15:10:00 +020043#include "trace.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060045unsigned long kvmppc_booke_handlers;
46
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050047#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
48#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49
50struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050051 { "mmio", VCPU_STAT(mmio_exits) },
52 { "dcr", VCPU_STAT(dcr_exits) },
53 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
55 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
56 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
57 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
58 { "sysc", VCPU_STAT(syscall_exits) },
59 { "isi", VCPU_STAT(isi_exits) },
60 { "dsi", VCPU_STAT(dsi_exits) },
61 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
62 { "dec", VCPU_STAT(dec_exits) },
63 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050064 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000065 { "doorbell", VCPU_STAT(dbell_exits) },
66 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020067 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068 { NULL }
69};
70
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050071/* TODO: use vcpu_printf() */
72void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
73{
74 int i;
75
Alexander Graf666e7252010-07-29 14:47:43 +020076 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060077 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020078 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
79 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050080
81 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
82
83 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060084 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010085 kvmppc_get_gpr(vcpu, i),
86 kvmppc_get_gpr(vcpu, i+1),
87 kvmppc_get_gpr(vcpu, i+2),
88 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050089 }
90}
91
Scott Wood4cd35f62011-06-14 18:34:31 -050092#ifdef CONFIG_SPE
93void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
94{
95 preempt_disable();
96 enable_kernel_spe();
97 kvmppc_save_guest_spe(vcpu);
98 vcpu->arch.shadow_msr &= ~MSR_SPE;
99 preempt_enable();
100}
101
102static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
103{
104 preempt_disable();
105 enable_kernel_spe();
106 kvmppc_load_guest_spe(vcpu);
107 vcpu->arch.shadow_msr |= MSR_SPE;
108 preempt_enable();
109}
110
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113 if (vcpu->arch.shared->msr & MSR_SPE) {
114 if (!(vcpu->arch.shadow_msr & MSR_SPE))
115 kvmppc_vcpu_enable_spe(vcpu);
116 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
117 kvmppc_vcpu_disable_spe(vcpu);
118 }
119}
120#else
121static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
122{
123}
124#endif
125
Alexander Graf7a08c272012-08-16 13:10:16 +0200126static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
127{
128#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu->arch.shadow_msr &= ~MSR_FP;
132 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
133#endif
134}
135
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500136/*
137 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing.
139 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500140void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
141{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500142 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500143
Scott Woodd30f6e42011-12-20 15:34:43 +0000144#ifdef CONFIG_KVM_BOOKE_HV
145 new_msr |= MSR_GS;
146#endif
147
Scott Wood4cd35f62011-06-14 18:34:31 -0500148 vcpu->arch.shared->msr = new_msr;
149
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500150 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500151 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200152 kvmppc_vcpu_sync_fpu(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500153}
154
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600157{
Alexander Graf63460462012-08-08 00:44:52 +0200158 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600159 set_bit(priority, &vcpu->arch.pending_exceptions);
160}
161
Liu Yudaf5e272010-02-02 19:44:35 +0800162static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
163 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600164{
Liu Yudaf5e272010-02-02 19:44:35 +0800165 vcpu->arch.queued_dear = dear_flags;
166 vcpu->arch.queued_esr = esr_flags;
167 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
168}
169
170static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
171 ulong dear_flags, ulong esr_flags)
172{
173 vcpu->arch.queued_dear = dear_flags;
174 vcpu->arch.queued_esr = esr_flags;
175 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
176}
177
178static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
179 ulong esr_flags)
180{
181 vcpu->arch.queued_esr = esr_flags;
182 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
183}
184
Alexander Graf011da892013-01-31 14:17:38 +0100185static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
186 ulong esr_flags)
187{
188 vcpu->arch.queued_dear = dear_flags;
189 vcpu->arch.queued_esr = esr_flags;
190 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
191}
192
Liu Yudaf5e272010-02-02 19:44:35 +0800193void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
194{
195 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600196 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600197}
198
199void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
200{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600202}
203
204int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
205{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600206 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600207}
208
Alexander Graf7706664d2009-12-21 20:21:24 +0100209void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
210{
211 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
212}
213
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600214void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
215 struct kvm_interrupt *irq)
216{
Alexander Grafc5335f12010-08-30 14:03:24 +0200217 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
218
219 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
220 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
221
222 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600223}
224
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000225void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200226{
227 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200228 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200229}
230
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000231static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
232{
233 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
234}
235
236static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
237{
238 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
239}
240
Scott Woodd30f6e42011-12-20 15:34:43 +0000241static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
242{
243#ifdef CONFIG_KVM_BOOKE_HV
244 mtspr(SPRN_GSRR0, srr0);
245 mtspr(SPRN_GSRR1, srr1);
246#else
247 vcpu->arch.shared->srr0 = srr0;
248 vcpu->arch.shared->srr1 = srr1;
249#endif
250}
251
252static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
253{
254 vcpu->arch.csrr0 = srr0;
255 vcpu->arch.csrr1 = srr1;
256}
257
258static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
259{
260 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
261 vcpu->arch.dsrr0 = srr0;
262 vcpu->arch.dsrr1 = srr1;
263 } else {
264 set_guest_csrr(vcpu, srr0, srr1);
265 }
266}
267
268static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
269{
270 vcpu->arch.mcsrr0 = srr0;
271 vcpu->arch.mcsrr1 = srr1;
272}
273
274static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
275{
276#ifdef CONFIG_KVM_BOOKE_HV
277 return mfspr(SPRN_GDEAR);
278#else
279 return vcpu->arch.shared->dar;
280#endif
281}
282
283static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
284{
285#ifdef CONFIG_KVM_BOOKE_HV
286 mtspr(SPRN_GDEAR, dear);
287#else
288 vcpu->arch.shared->dar = dear;
289#endif
290}
291
292static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
293{
294#ifdef CONFIG_KVM_BOOKE_HV
295 return mfspr(SPRN_GESR);
296#else
297 return vcpu->arch.shared->esr;
298#endif
299}
300
301static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
302{
303#ifdef CONFIG_KVM_BOOKE_HV
304 mtspr(SPRN_GESR, esr);
305#else
306 vcpu->arch.shared->esr = esr;
307#endif
308}
309
Alexander Graf324b3e62013-01-04 18:28:51 +0100310static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
311{
312#ifdef CONFIG_KVM_BOOKE_HV
313 return mfspr(SPRN_GEPR);
314#else
315 return vcpu->arch.epr;
316#endif
317}
318
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600319/* Deliver the interrupt of the corresponding priority, if possible. */
320static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
321 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500322{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600323 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000324 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100325 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200326 ulong crit_raw = vcpu->arch.shared->critical;
327 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
328 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200329 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000330 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000331 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200332
333 /* Truncate crit indicators in 32 bit mode */
334 if (!(vcpu->arch.shared->msr & MSR_SF)) {
335 crit_raw &= 0xffffffff;
336 crit_r1 &= 0xffffffff;
337 }
338
339 /* Critical section when crit == r1 */
340 crit = (crit_raw == crit_r1);
341 /* ... and we're in supervisor mode */
342 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500343
Alexander Grafc5335f12010-08-30 14:03:24 +0200344 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
345 priority = BOOKE_IRQPRIO_EXTERNAL;
346 keep_irq = true;
347 }
348
Alexander Graf1c810632013-01-04 18:12:48 +0100349 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled)
350 update_epr = true;
351
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600352 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600353 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800354 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100355 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800356 update_dear = true;
357 /* fall through */
358 case BOOKE_IRQPRIO_INST_STORAGE:
359 case BOOKE_IRQPRIO_PROGRAM:
360 update_esr = true;
361 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600362 case BOOKE_IRQPRIO_ITLB_MISS:
363 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600364 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600365 case BOOKE_IRQPRIO_SPE_UNAVAIL:
366 case BOOKE_IRQPRIO_SPE_FP_DATA:
367 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600368 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600369 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000370 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000371 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500372 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000373 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600374 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000375 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200376 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000377 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000378 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000379 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500380 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600381 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200382 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000383 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000384 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500385 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600386 case BOOKE_IRQPRIO_DECREMENTER:
387 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000388 keep_irq = true;
389 /* fall through */
390 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000391 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200392 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200393 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000394 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000395 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500396 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600397 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200398 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000399 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000400 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000401 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500402 break;
403 }
404
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600405 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000406 switch (int_class) {
407 case INT_CLASS_NONCRIT:
408 set_guest_srr(vcpu, vcpu->arch.pc,
409 vcpu->arch.shared->msr);
410 break;
411 case INT_CLASS_CRIT:
412 set_guest_csrr(vcpu, vcpu->arch.pc,
413 vcpu->arch.shared->msr);
414 break;
415 case INT_CLASS_DBG:
416 set_guest_dsrr(vcpu, vcpu->arch.pc,
417 vcpu->arch.shared->msr);
418 break;
419 case INT_CLASS_MC:
420 set_guest_mcsrr(vcpu, vcpu->arch.pc,
421 vcpu->arch.shared->msr);
422 break;
423 }
424
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600425 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800426 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000427 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800428 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000429 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Alexander Graf1c810632013-01-04 18:12:48 +0100430 if (update_epr == true)
431 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Mihai Caraman95e90b42012-10-11 06:13:26 +0000432
433 new_msr &= msr_mask;
434#if defined(CONFIG_64BIT)
435 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
436 new_msr |= MSR_CM;
437#endif
438 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600439
Alexander Grafc5335f12010-08-30 14:03:24 +0200440 if (!keep_irq)
441 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600442 }
443
Scott Woodd30f6e42011-12-20 15:34:43 +0000444#ifdef CONFIG_KVM_BOOKE_HV
445 /*
446 * If an interrupt is pending but masked, raise a guest doorbell
447 * so that we are notified when the guest enables the relevant
448 * MSR bit.
449 */
450 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
451 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
452 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
453 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
454 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
455 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
456#endif
457
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600458 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500459}
460
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000461/*
462 * Return the number of jiffies until the next timeout. If the timeout is
463 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
464 * because the larger value can break the timer APIs.
465 */
466static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
467{
468 u64 tb, wdt_tb, wdt_ticks = 0;
469 u64 nr_jiffies = 0;
470 u32 period = TCR_GET_WP(vcpu->arch.tcr);
471
472 wdt_tb = 1ULL << (63 - period);
473 tb = get_tb();
474 /*
475 * The watchdog timeout will hapeen when TB bit corresponding
476 * to watchdog will toggle from 0 to 1.
477 */
478 if (tb & wdt_tb)
479 wdt_ticks = wdt_tb;
480
481 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
482
483 /* Convert timebase ticks to jiffies */
484 nr_jiffies = wdt_ticks;
485
486 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
487 nr_jiffies++;
488
489 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
490}
491
492static void arm_next_watchdog(struct kvm_vcpu *vcpu)
493{
494 unsigned long nr_jiffies;
495 unsigned long flags;
496
497 /*
498 * If TSR_ENW and TSR_WIS are not set then no need to exit to
499 * userspace, so clear the KVM_REQ_WATCHDOG request.
500 */
501 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
502 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
503
504 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
505 nr_jiffies = watchdog_next_timeout(vcpu);
506 /*
507 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
508 * then do not run the watchdog timer as this can break timer APIs.
509 */
510 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
511 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
512 else
513 del_timer(&vcpu->arch.wdt_timer);
514 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
515}
516
517void kvmppc_watchdog_func(unsigned long data)
518{
519 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
520 u32 tsr, new_tsr;
521 int final;
522
523 do {
524 new_tsr = tsr = vcpu->arch.tsr;
525 final = 0;
526
527 /* Time out event */
528 if (tsr & TSR_ENW) {
529 if (tsr & TSR_WIS)
530 final = 1;
531 else
532 new_tsr = tsr | TSR_WIS;
533 } else {
534 new_tsr = tsr | TSR_ENW;
535 }
536 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
537
538 if (new_tsr & TSR_WIS) {
539 smp_wmb();
540 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
541 kvm_vcpu_kick(vcpu);
542 }
543
544 /*
545 * If this is final watchdog expiry and some action is required
546 * then exit to userspace.
547 */
548 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
549 vcpu->arch.watchdog_enabled) {
550 smp_wmb();
551 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
552 kvm_vcpu_kick(vcpu);
553 }
554
555 /*
556 * Stop running the watchdog timer after final expiration to
557 * prevent the host from being flooded with timers if the
558 * guest sets a short period.
559 * Timers will resume when TSR/TCR is updated next time.
560 */
561 if (!final)
562 arm_next_watchdog(vcpu);
563}
564
Scott Wooddfd4d472011-11-17 12:39:59 +0000565static void update_timer_ints(struct kvm_vcpu *vcpu)
566{
567 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
568 kvmppc_core_queue_dec(vcpu);
569 else
570 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000571
572 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
573 kvmppc_core_queue_watchdog(vcpu);
574 else
575 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000576}
577
Scott Woodc59a6a32011-11-08 18:23:25 -0600578static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500579{
580 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500581 unsigned int priority;
582
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600583 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000584 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600585 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500586 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500587
588 priority = find_next_bit(pending,
589 BITS_PER_BYTE * sizeof(*pending),
590 priority + 1);
591 }
Alexander Graf90bba352010-07-29 14:47:51 +0200592
593 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600594 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500595}
596
Scott Woodc59a6a32011-11-08 18:23:25 -0600597/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000598int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600599{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000600 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600601 WARN_ON_ONCE(!irqs_disabled());
602
603 kvmppc_core_check_exceptions(vcpu);
604
Alexander Grafb8c649a2012-12-20 04:52:39 +0000605 if (vcpu->requests) {
606 /* Exception delivery raised request; start over */
607 return 1;
608 }
609
Scott Woodc59a6a32011-11-08 18:23:25 -0600610 if (vcpu->arch.shared->msr & MSR_WE) {
611 local_irq_enable();
612 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100613 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600614 local_irq_disable();
615
616 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000617 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600618 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000619
620 return r;
621}
622
Alexander Graf7c973a22012-08-13 12:50:35 +0200623int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200624{
Alexander Graf7c973a22012-08-13 12:50:35 +0200625 int r = 1; /* Indicate we want to get back into the guest */
626
Alexander Graf2d8185d2012-08-10 12:31:12 +0200627 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
628 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200629#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200630 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
631 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200632#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200633
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000634 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
635 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
636 r = 0;
637 }
638
Alexander Graf1c810632013-01-04 18:12:48 +0100639 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
640 vcpu->run->epr.epr = 0;
641 vcpu->arch.epr_needed = true;
642 vcpu->run->exit_reason = KVM_EXIT_EPR;
643 r = 0;
644 }
645
Alexander Graf7c973a22012-08-13 12:50:35 +0200646 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200647}
648
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000649int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
650{
Alexander Graf7ee78852012-08-13 12:44:41 +0200651 int ret, s;
Scott Wood8fae8452011-12-20 15:34:45 +0000652#ifdef CONFIG_PPC_FPU
653 unsigned int fpscr;
654 int fpexc_mode;
655 u64 fpr[32];
656#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000657
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200658 if (!vcpu->arch.sane) {
659 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
660 return -EINVAL;
661 }
662
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000663 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200664 s = kvmppc_prepare_to_enter(vcpu);
665 if (s <= 0) {
Alexander Graf24afa37b2012-08-12 12:42:30 +0200666 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200667 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600668 goto out;
669 }
Alexander Grafbd2be682012-08-13 01:04:19 +0200670 kvmppc_lazy_ee_enable();
Scott Wood1d1ef222011-11-08 16:11:59 -0600671
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000672 kvm_guest_enter();
Scott Wood8fae8452011-12-20 15:34:45 +0000673
674#ifdef CONFIG_PPC_FPU
675 /* Save userspace FPU state in stack */
676 enable_kernel_fp();
677 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
678 fpscr = current->thread.fpscr.val;
679 fpexc_mode = current->thread.fpexc_mode;
680
681 /* Restore guest FPU state to thread */
682 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
683 current->thread.fpscr.val = vcpu->arch.fpscr;
684
685 /*
686 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
687 * as always using the FPU. Kernel usage of FP (via
688 * enable_kernel_fp()) in this thread must not occur while
689 * vcpu->fpu_active is set.
690 */
691 vcpu->fpu_active = 1;
692
693 kvmppc_load_guest_fp(vcpu);
694#endif
695
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000696 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000697
Alexander Graf24afa37b2012-08-12 12:42:30 +0200698 /* No need for kvm_guest_exit. It's done in handle_exit.
699 We also get here with interrupts enabled. */
700
Scott Wood8fae8452011-12-20 15:34:45 +0000701#ifdef CONFIG_PPC_FPU
702 kvmppc_save_guest_fp(vcpu);
703
704 vcpu->fpu_active = 0;
705
706 /* Save guest FPU state from thread */
707 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
708 vcpu->arch.fpscr = current->thread.fpscr.val;
709
710 /* Restore userspace FPU state from stack */
711 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
712 current->thread.fpscr.val = fpscr;
713 current->thread.fpexc_mode = fpexc_mode;
714#endif
715
Scott Wood1d1ef222011-11-08 16:11:59 -0600716out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200717 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000718 return ret;
719}
720
Scott Woodd30f6e42011-12-20 15:34:43 +0000721static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
722{
723 enum emulation_result er;
724
725 er = kvmppc_emulate_instruction(run, vcpu);
726 switch (er) {
727 case EMULATE_DONE:
728 /* don't overwrite subtypes, just account kvm_stats */
729 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
730 /* Future optimization: only reload non-volatiles if
731 * they were actually modified by emulation. */
732 return RESUME_GUEST_NV;
733
734 case EMULATE_DO_DCR:
735 run->exit_reason = KVM_EXIT_DCR;
736 return RESUME_HOST;
737
738 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000739 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
740 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
741 /* For debugging, encode the failing instruction and
742 * report it to userspace. */
743 run->hw.hardware_exit_reason = ~0ULL << 32;
744 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000745 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000746 return RESUME_HOST;
747
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000748 case EMULATE_EXIT_USER:
749 return RESUME_HOST;
750
Scott Woodd30f6e42011-12-20 15:34:43 +0000751 default:
752 BUG();
753 }
754}
755
Alexander Graf4e642cc2012-02-20 23:57:26 +0100756static void kvmppc_fill_pt_regs(struct pt_regs *regs)
757{
758 ulong r1, ip, msr, lr;
759
760 asm("mr %0, 1" : "=r"(r1));
761 asm("mflr %0" : "=r"(lr));
762 asm("mfmsr %0" : "=r"(msr));
763 asm("bl 1f; 1: mflr %0" : "=r"(ip));
764
765 memset(regs, 0, sizeof(*regs));
766 regs->gpr[1] = r1;
767 regs->nip = ip;
768 regs->msr = msr;
769 regs->link = lr;
770}
771
Bharat Bhushan6328e592012-06-20 05:56:53 +0000772/*
773 * For interrupts needed to be handled by host interrupt handlers,
774 * corresponding host handler are called from here in similar way
775 * (but not exact) as they are called from low level handler
776 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
777 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100778static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
779 unsigned int exit_nr)
780{
781 struct pt_regs regs;
782
783 switch (exit_nr) {
784 case BOOKE_INTERRUPT_EXTERNAL:
785 kvmppc_fill_pt_regs(&regs);
786 do_IRQ(&regs);
787 break;
788 case BOOKE_INTERRUPT_DECREMENTER:
789 kvmppc_fill_pt_regs(&regs);
790 timer_interrupt(&regs);
791 break;
792#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
793 case BOOKE_INTERRUPT_DOORBELL:
794 kvmppc_fill_pt_regs(&regs);
795 doorbell_exception(&regs);
796 break;
797#endif
798 case BOOKE_INTERRUPT_MACHINE_CHECK:
799 /* FIXME */
800 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100801 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
802 kvmppc_fill_pt_regs(&regs);
803 performance_monitor_exception(&regs);
804 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000805 case BOOKE_INTERRUPT_WATCHDOG:
806 kvmppc_fill_pt_regs(&regs);
807#ifdef CONFIG_BOOKE_WDT
808 WatchdogException(&regs);
809#else
810 unknown_exception(&regs);
811#endif
812 break;
813 case BOOKE_INTERRUPT_CRITICAL:
814 unknown_exception(&regs);
815 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100816 }
817}
818
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500819/**
820 * kvmppc_handle_exit
821 *
822 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
823 */
824int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
825 unsigned int exit_nr)
826{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500827 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200828 int s;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500829
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600830 /* update before a new last_exit_type is rewritten */
831 kvmppc_update_timing_stats(vcpu);
832
Alexander Graf4e642cc2012-02-20 23:57:26 +0100833 /* restart interrupts if they were meant for the host */
834 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000835
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500836 local_irq_enable();
837
Alexander Graf97c95052012-08-02 15:10:00 +0200838 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200839 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200840
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500841 run->exit_reason = KVM_EXIT_UNKNOWN;
842 run->ready_for_interrupt_injection = 1;
843
844 switch (exit_nr) {
845 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100846 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
847 kvmppc_dump_vcpu(vcpu);
848 /* For debugging, send invalid exit reason to user space */
849 run->hw.hardware_exit_reason = ~1ULL << 32;
850 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
851 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500852 break;
853
854 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600855 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600856 r = RESUME_GUEST;
857 break;
858
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500859 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600860 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500861 r = RESUME_GUEST;
862 break;
863
Bharat Bhushan6328e592012-06-20 05:56:53 +0000864 case BOOKE_INTERRUPT_WATCHDOG:
865 r = RESUME_GUEST;
866 break;
867
Scott Woodd30f6e42011-12-20 15:34:43 +0000868 case BOOKE_INTERRUPT_DOORBELL:
869 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000870 r = RESUME_GUEST;
871 break;
872
873 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
874 kvmppc_account_exit(vcpu, GDBELL_EXITS);
875
876 /*
877 * We are here because there is a pending guest interrupt
878 * which could not be delivered as MSR_CE or MSR_ME was not
879 * set. Once we break from here we will retry delivery.
880 */
881 r = RESUME_GUEST;
882 break;
883
884 case BOOKE_INTERRUPT_GUEST_DBELL:
885 kvmppc_account_exit(vcpu, GDBELL_EXITS);
886
887 /*
888 * We are here because there is a pending guest interrupt
889 * which could not be delivered as MSR_EE was not set. Once
890 * we break from here we will retry delivery.
891 */
892 r = RESUME_GUEST;
893 break;
894
Alexander Graf95f2e922012-02-20 22:45:12 +0100895 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
896 r = RESUME_GUEST;
897 break;
898
Scott Woodd30f6e42011-12-20 15:34:43 +0000899 case BOOKE_INTERRUPT_HV_PRIV:
900 r = emulation_exit(run, vcpu);
901 break;
902
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500903 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000904 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf0268597c2012-02-20 12:33:22 +0100905 /*
906 * Program traps generated by user-level software must
907 * be handled by the guest kernel.
908 *
909 * In GS mode, hypervisor privileged instructions trap
910 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
911 * actual program interrupts, handled by the guest.
912 */
Liu Yudaf5e272010-02-02 19:44:35 +0800913 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500914 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600915 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500916 break;
917 }
918
Scott Woodd30f6e42011-12-20 15:34:43 +0000919 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500920 break;
921
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200922 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600923 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600924 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200925 r = RESUME_GUEST;
926 break;
927
Scott Wood4cd35f62011-06-14 18:34:31 -0500928#ifdef CONFIG_SPE
929 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
930 if (vcpu->arch.shared->msr & MSR_SPE)
931 kvmppc_vcpu_enable_spe(vcpu);
932 else
933 kvmppc_booke_queue_irqprio(vcpu,
934 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600935 r = RESUME_GUEST;
936 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500937 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600938
939 case BOOKE_INTERRUPT_SPE_FP_DATA:
940 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
941 r = RESUME_GUEST;
942 break;
943
944 case BOOKE_INTERRUPT_SPE_FP_ROUND:
945 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
946 r = RESUME_GUEST;
947 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500948#else
949 case BOOKE_INTERRUPT_SPE_UNAVAIL:
950 /*
951 * Guest wants SPE, but host kernel doesn't support it. Send
952 * an "unimplemented operation" program check to the guest.
953 */
954 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
955 r = RESUME_GUEST;
956 break;
957
958 /*
959 * These really should never happen without CONFIG_SPE,
960 * as we should never enable the real MSR[SPE] in the guest.
961 */
962 case BOOKE_INTERRUPT_SPE_FP_DATA:
963 case BOOKE_INTERRUPT_SPE_FP_ROUND:
964 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
965 __func__, exit_nr, vcpu->arch.pc);
966 run->hw.hardware_exit_reason = exit_nr;
967 r = RESUME_HOST;
968 break;
969#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600970
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500971 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800972 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
973 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600974 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500975 r = RESUME_GUEST;
976 break;
977
978 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800979 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600980 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500981 r = RESUME_GUEST;
982 break;
983
Alexander Graf011da892013-01-31 14:17:38 +0100984 case BOOKE_INTERRUPT_ALIGNMENT:
985 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
986 vcpu->arch.fault_esr);
987 r = RESUME_GUEST;
988 break;
989
Scott Woodd30f6e42011-12-20 15:34:43 +0000990#ifdef CONFIG_KVM_BOOKE_HV
991 case BOOKE_INTERRUPT_HV_SYSCALL:
992 if (!(vcpu->arch.shared->msr & MSR_PR)) {
993 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
994 } else {
995 /*
996 * hcall from guest userspace -- send privileged
997 * instruction program check.
998 */
999 kvmppc_core_queue_program(vcpu, ESR_PPR);
1000 }
1001
1002 r = RESUME_GUEST;
1003 break;
1004#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001005 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001006 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1007 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1008 /* KVM PV hypercalls */
1009 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1010 r = RESUME_GUEST;
1011 } else {
1012 /* Guest syscalls */
1013 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1014 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001015 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001016 r = RESUME_GUEST;
1017 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001018#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001019
1020 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001021 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001022 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001023 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001024 gfn_t gfn;
1025
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001026#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001027 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1028 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1029 kvmppc_map_magic(vcpu);
1030 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1031 r = RESUME_GUEST;
1032
1033 break;
1034 }
1035#endif
1036
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001037 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001038 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001039 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001040 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001041 kvmppc_core_queue_dtlb_miss(vcpu,
1042 vcpu->arch.fault_dear,
1043 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001044 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001045 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001046 r = RESUME_GUEST;
1047 break;
1048 }
1049
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001050 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001051 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001052
1053 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1054 /* The guest TLB had a mapping, but the shadow TLB
1055 * didn't, and it is RAM. This could be because:
1056 * a) the entry is mapping the host kernel, or
1057 * b) the guest used a large mapping which we're faking
1058 * Either way, we need to satisfy the fault without
1059 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001060 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001061 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001062 r = RESUME_GUEST;
1063 } else {
1064 /* Guest has mapped and accessed a page which is not
1065 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001066 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001067 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001068 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001069 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001070 }
1071
1072 break;
1073 }
1074
1075 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001076 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001077 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001078 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001079 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001080
1081 r = RESUME_GUEST;
1082
1083 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001084 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001085 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001086 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001087 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001088 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001089 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001090 break;
1091 }
1092
Hollis Blanchard7b701592008-12-02 15:51:58 -06001093 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001094
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001095 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001096 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001097
1098 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1099 /* The guest TLB had a mapping, but the shadow TLB
1100 * didn't. This could be because:
1101 * a) the entry is mapping the host kernel, or
1102 * b) the guest used a large mapping which we're faking
1103 * Either way, we need to satisfy the fault without
1104 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001105 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001106 } else {
1107 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001108 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001109 }
1110
1111 break;
1112 }
1113
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001114 case BOOKE_INTERRUPT_DEBUG: {
1115 u32 dbsr;
1116
1117 vcpu->arch.pc = mfspr(SPRN_CSRR0);
1118
1119 /* clear IAC events in DBSR register */
1120 dbsr = mfspr(SPRN_DBSR);
1121 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1122 mtspr(SPRN_DBSR, dbsr);
1123
1124 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001125 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001126 r = RESUME_HOST;
1127 break;
1128 }
1129
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001130 default:
1131 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1132 BUG();
1133 }
1134
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001135 /*
1136 * To avoid clobbering exit_reason, only check for signals if we
1137 * aren't already exiting to userspace for some other reason.
1138 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001139 if (!(r & RESUME_HOST)) {
1140 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001141 s = kvmppc_prepare_to_enter(vcpu);
1142 if (s <= 0) {
Alexander Graf24afa37b2012-08-12 12:42:30 +02001143 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001144 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Alexander Graf24afa37b2012-08-12 12:42:30 +02001145 } else {
Alexander Grafbd2be682012-08-13 01:04:19 +02001146 kvmppc_lazy_ee_enable();
Alexander Graf03660ba2012-02-28 12:00:41 +01001147 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001148 }
1149
1150 return r;
1151}
1152
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001153static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1154{
1155 u32 old_tsr = vcpu->arch.tsr;
1156
1157 vcpu->arch.tsr = new_tsr;
1158
1159 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1160 arm_next_watchdog(vcpu);
1161
1162 update_timer_ints(vcpu);
1163}
1164
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001165/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1166int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1167{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001168 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001169 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001170
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001171 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001172 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001173 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001174 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001175
Scott Woodd30f6e42011-12-20 15:34:43 +00001176#ifndef CONFIG_KVM_BOOKE_HV
1177 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001178 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001179 vcpu->arch.shared->msr = 0;
1180#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001181
Hollis Blanchard082decf2010-08-07 10:33:56 -07001182 /* Eye-catching numbers so we know if the guest takes an interrupt
1183 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001184 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001185 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1186 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001187
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001188 kvmppc_init_timing_stats(vcpu);
1189
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001190 r = kvmppc_core_vcpu_setup(vcpu);
1191 kvmppc_sanity_check(vcpu);
1192 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001193}
1194
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001195int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1196{
1197 /* setup watchdog timer once */
1198 spin_lock_init(&vcpu->arch.wdt_lock);
1199 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1200 (unsigned long)vcpu);
1201
1202 return 0;
1203}
1204
1205void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1206{
1207 del_timer_sync(&vcpu->arch.wdt_timer);
1208}
1209
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001210int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1211{
1212 int i;
1213
1214 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001215 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001216 regs->ctr = vcpu->arch.ctr;
1217 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001218 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001219 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001220 regs->srr0 = vcpu->arch.shared->srr0;
1221 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001222 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001223 regs->sprg0 = vcpu->arch.shared->sprg0;
1224 regs->sprg1 = vcpu->arch.shared->sprg1;
1225 regs->sprg2 = vcpu->arch.shared->sprg2;
1226 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001227 regs->sprg4 = vcpu->arch.shared->sprg4;
1228 regs->sprg5 = vcpu->arch.shared->sprg5;
1229 regs->sprg6 = vcpu->arch.shared->sprg6;
1230 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001231
1232 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001233 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001234
1235 return 0;
1236}
1237
1238int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1239{
1240 int i;
1241
1242 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001243 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001244 vcpu->arch.ctr = regs->ctr;
1245 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001246 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001247 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001248 vcpu->arch.shared->srr0 = regs->srr0;
1249 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001250 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001251 vcpu->arch.shared->sprg0 = regs->sprg0;
1252 vcpu->arch.shared->sprg1 = regs->sprg1;
1253 vcpu->arch.shared->sprg2 = regs->sprg2;
1254 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001255 vcpu->arch.shared->sprg4 = regs->sprg4;
1256 vcpu->arch.shared->sprg5 = regs->sprg5;
1257 vcpu->arch.shared->sprg6 = regs->sprg6;
1258 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001259
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001260 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1261 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001262
1263 return 0;
1264}
1265
Scott Wood5ce941e2011-04-27 17:24:21 -05001266static void get_sregs_base(struct kvm_vcpu *vcpu,
1267 struct kvm_sregs *sregs)
1268{
1269 u64 tb = get_tb();
1270
1271 sregs->u.e.features |= KVM_SREGS_E_BASE;
1272
1273 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1274 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1275 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001276 sregs->u.e.esr = get_guest_esr(vcpu);
1277 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001278 sregs->u.e.tsr = vcpu->arch.tsr;
1279 sregs->u.e.tcr = vcpu->arch.tcr;
1280 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1281 sregs->u.e.tb = tb;
1282 sregs->u.e.vrsave = vcpu->arch.vrsave;
1283}
1284
1285static int set_sregs_base(struct kvm_vcpu *vcpu,
1286 struct kvm_sregs *sregs)
1287{
1288 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1289 return 0;
1290
1291 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1292 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1293 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001294 set_guest_esr(vcpu, sregs->u.e.esr);
1295 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001296 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001297 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001298
Scott Wooddfd4d472011-11-17 12:39:59 +00001299 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001300 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001301 kvmppc_emulate_dec(vcpu);
1302 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001303
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001304 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1305 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001306
1307 return 0;
1308}
1309
1310static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1311 struct kvm_sregs *sregs)
1312{
1313 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1314
Scott Wood841741f2011-09-02 17:39:37 -05001315 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001316 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1317 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1318 sregs->u.e.decar = vcpu->arch.decar;
1319 sregs->u.e.ivpr = vcpu->arch.ivpr;
1320}
1321
1322static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1323 struct kvm_sregs *sregs)
1324{
1325 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1326 return 0;
1327
Scott Wood841741f2011-09-02 17:39:37 -05001328 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001329 return -EINVAL;
1330
1331 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1332 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1333 vcpu->arch.decar = sregs->u.e.decar;
1334 vcpu->arch.ivpr = sregs->u.e.ivpr;
1335
1336 return 0;
1337}
1338
1339void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1340{
1341 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1342
1343 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1344 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1345 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1346 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1347 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1348 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1349 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1350 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1351 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1352 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1353 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1354 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1355 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1356 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1357 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1358 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1359}
1360
1361int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1362{
1363 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1364 return 0;
1365
1366 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1367 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1368 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1369 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1370 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1371 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1372 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1373 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1374 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1375 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1376 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1377 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1378 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1379 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1380 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1381 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1382
1383 return 0;
1384}
1385
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001386int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1387 struct kvm_sregs *sregs)
1388{
Scott Wood5ce941e2011-04-27 17:24:21 -05001389 sregs->pvr = vcpu->arch.pvr;
1390
1391 get_sregs_base(vcpu, sregs);
1392 get_sregs_arch206(vcpu, sregs);
1393 kvmppc_core_get_sregs(vcpu, sregs);
1394 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001395}
1396
1397int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1398 struct kvm_sregs *sregs)
1399{
Scott Wood5ce941e2011-04-27 17:24:21 -05001400 int ret;
1401
1402 if (vcpu->arch.pvr != sregs->pvr)
1403 return -EINVAL;
1404
1405 ret = set_sregs_base(vcpu, sregs);
1406 if (ret < 0)
1407 return ret;
1408
1409 ret = set_sregs_arch206(vcpu, sregs);
1410 if (ret < 0)
1411 return ret;
1412
1413 return kvmppc_core_set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001414}
1415
Paul Mackerras31f34382011-12-12 12:26:50 +00001416int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1417{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001418 int r = -EINVAL;
1419
1420 switch (reg->id) {
1421 case KVM_REG_PPC_IAC1:
1422 case KVM_REG_PPC_IAC2:
1423 case KVM_REG_PPC_IAC3:
1424 case KVM_REG_PPC_IAC4: {
1425 int iac = reg->id - KVM_REG_PPC_IAC1;
1426 r = copy_to_user((u64 __user *)(long)reg->addr,
1427 &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
1428 break;
1429 }
1430 case KVM_REG_PPC_DAC1:
1431 case KVM_REG_PPC_DAC2: {
1432 int dac = reg->id - KVM_REG_PPC_DAC1;
1433 r = copy_to_user((u64 __user *)(long)reg->addr,
1434 &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
1435 break;
1436 }
Alexander Graf324b3e62013-01-04 18:28:51 +01001437 case KVM_REG_PPC_EPR: {
1438 u32 epr = get_guest_epr(vcpu);
1439 r = put_user(epr, (u32 __user *)(long)reg->addr);
1440 break;
1441 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001442#if defined(CONFIG_64BIT)
1443 case KVM_REG_PPC_EPCR:
1444 r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
1445 break;
1446#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001447 case KVM_REG_PPC_TCR:
1448 r = put_user(vcpu->arch.tcr, (u32 __user *)(long)reg->addr);
1449 break;
1450 case KVM_REG_PPC_TSR:
1451 r = put_user(vcpu->arch.tsr, (u32 __user *)(long)reg->addr);
1452 break;
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001453 case KVM_REG_PPC_DEBUG_INST: {
1454 u32 opcode = KVMPPC_INST_EHPRIV;
1455 r = copy_to_user((u32 __user *)(long)reg->addr,
1456 &opcode, sizeof(u32));
1457 break;
1458 }
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001459 default:
1460 break;
1461 }
1462 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001463}
1464
1465int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1466{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001467 int r = -EINVAL;
1468
1469 switch (reg->id) {
1470 case KVM_REG_PPC_IAC1:
1471 case KVM_REG_PPC_IAC2:
1472 case KVM_REG_PPC_IAC3:
1473 case KVM_REG_PPC_IAC4: {
1474 int iac = reg->id - KVM_REG_PPC_IAC1;
1475 r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
1476 (u64 __user *)(long)reg->addr, sizeof(u64));
1477 break;
1478 }
1479 case KVM_REG_PPC_DAC1:
1480 case KVM_REG_PPC_DAC2: {
1481 int dac = reg->id - KVM_REG_PPC_DAC1;
1482 r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
1483 (u64 __user *)(long)reg->addr, sizeof(u64));
1484 break;
1485 }
Alexander Graf324b3e62013-01-04 18:28:51 +01001486 case KVM_REG_PPC_EPR: {
1487 u32 new_epr;
1488 r = get_user(new_epr, (u32 __user *)(long)reg->addr);
1489 if (!r)
1490 kvmppc_set_epr(vcpu, new_epr);
1491 break;
1492 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001493#if defined(CONFIG_64BIT)
1494 case KVM_REG_PPC_EPCR: {
1495 u32 new_epcr;
1496 r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
1497 if (r == 0)
1498 kvmppc_set_epcr(vcpu, new_epcr);
1499 break;
1500 }
1501#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001502 case KVM_REG_PPC_OR_TSR: {
1503 u32 tsr_bits;
1504 r = get_user(tsr_bits, (u32 __user *)(long)reg->addr);
1505 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1506 break;
1507 }
1508 case KVM_REG_PPC_CLEAR_TSR: {
1509 u32 tsr_bits;
1510 r = get_user(tsr_bits, (u32 __user *)(long)reg->addr);
1511 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1512 break;
1513 }
1514 case KVM_REG_PPC_TSR: {
1515 u32 tsr;
1516 r = get_user(tsr, (u32 __user *)(long)reg->addr);
1517 kvmppc_set_tsr(vcpu, tsr);
1518 break;
1519 }
1520 case KVM_REG_PPC_TCR: {
1521 u32 tcr;
1522 r = get_user(tcr, (u32 __user *)(long)reg->addr);
1523 kvmppc_set_tcr(vcpu, tcr);
1524 break;
1525 }
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001526 default:
1527 break;
1528 }
1529 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001530}
1531
Bharat Bhushan092d62e2013-04-08 00:32:12 +00001532int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1533 struct kvm_guest_debug *dbg)
1534{
1535 return -EINVAL;
1536}
1537
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001538int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1539{
1540 return -ENOTSUPP;
1541}
1542
1543int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1544{
1545 return -ENOTSUPP;
1546}
1547
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001548int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1549 struct kvm_translation *tr)
1550{
Avi Kivity98001d82010-05-13 11:05:49 +03001551 int r;
1552
Avi Kivity98001d82010-05-13 11:05:49 +03001553 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001554 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001555}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001556
Alexander Graf4e755752009-10-30 05:47:01 +00001557int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1558{
1559 return -ENOTSUPP;
1560}
1561
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001562void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1563 struct kvm_memory_slot *dont)
1564{
1565}
1566
1567int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1568 unsigned long npages)
1569{
1570 return 0;
1571}
1572
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001573int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001574 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001575 struct kvm_userspace_memory_region *mem)
1576{
1577 return 0;
1578}
1579
1580void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001581 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +09001582 const struct kvm_memory_slot *old)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001583{
1584}
1585
1586void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001587{
1588}
1589
Mihai Caraman38f98822012-10-11 06:13:27 +00001590void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1591{
1592#if defined(CONFIG_64BIT)
1593 vcpu->arch.epcr = new_epcr;
1594#ifdef CONFIG_KVM_BOOKE_HV
1595 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1596 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1597 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1598#endif
1599#endif
1600}
1601
Scott Wooddfd4d472011-11-17 12:39:59 +00001602void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1603{
1604 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001605 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001606 update_timer_ints(vcpu);
1607}
1608
1609void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1610{
1611 set_bits(tsr_bits, &vcpu->arch.tsr);
1612 smp_wmb();
1613 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1614 kvm_vcpu_kick(vcpu);
1615}
1616
1617void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1618{
1619 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001620
1621 /*
1622 * We may have stopped the watchdog due to
1623 * being stuck on final expiration.
1624 */
1625 if (tsr_bits & (TSR_ENW | TSR_WIS))
1626 arm_next_watchdog(vcpu);
1627
Scott Wooddfd4d472011-11-17 12:39:59 +00001628 update_timer_ints(vcpu);
1629}
1630
1631void kvmppc_decrementer_func(unsigned long data)
1632{
1633 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1634
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001635 if (vcpu->arch.tcr & TCR_ARE) {
1636 vcpu->arch.dec = vcpu->arch.decar;
1637 kvmppc_emulate_dec(vcpu);
1638 }
1639
Scott Wooddfd4d472011-11-17 12:39:59 +00001640 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1641}
1642
Scott Wood94fa9d92011-12-20 15:34:22 +00001643void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1644{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001645 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00001646 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001647}
1648
1649void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1650{
Scott Woodd30f6e42011-12-20 15:34:43 +00001651 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001652 vcpu->cpu = -1;
Scott Wood94fa9d92011-12-20 15:34:22 +00001653}
1654
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001655int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001656{
Scott Woodd30f6e42011-12-20 15:34:43 +00001657#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001658 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001659 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001660 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001661 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001662 int i;
1663
1664 /* We install our own exception handlers by hijacking IVPR. IVPR must
1665 * be 16-bit aligned, so we need a 64KB allocation. */
1666 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1667 VCPU_SIZE_ORDER);
1668 if (!kvmppc_booke_handlers)
1669 return -ENOMEM;
1670
1671 /* XXX make sure our handlers are smaller than Linux's */
1672
1673 /* Copy our interrupt handlers to match host IVORs. That way we don't
1674 * have to swap the IVORs on every guest/host transition. */
1675 ivor[0] = mfspr(SPRN_IVOR0);
1676 ivor[1] = mfspr(SPRN_IVOR1);
1677 ivor[2] = mfspr(SPRN_IVOR2);
1678 ivor[3] = mfspr(SPRN_IVOR3);
1679 ivor[4] = mfspr(SPRN_IVOR4);
1680 ivor[5] = mfspr(SPRN_IVOR5);
1681 ivor[6] = mfspr(SPRN_IVOR6);
1682 ivor[7] = mfspr(SPRN_IVOR7);
1683 ivor[8] = mfspr(SPRN_IVOR8);
1684 ivor[9] = mfspr(SPRN_IVOR9);
1685 ivor[10] = mfspr(SPRN_IVOR10);
1686 ivor[11] = mfspr(SPRN_IVOR11);
1687 ivor[12] = mfspr(SPRN_IVOR12);
1688 ivor[13] = mfspr(SPRN_IVOR13);
1689 ivor[14] = mfspr(SPRN_IVOR14);
1690 ivor[15] = mfspr(SPRN_IVOR15);
1691
1692 for (i = 0; i < 16; i++) {
1693 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001694 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001695
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001696 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001697 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001698 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001699 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001700
1701 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1702 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1703 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001704#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001705 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001706}
1707
Hollis Blancharddb93f572008-11-05 09:36:18 -06001708void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001709{
1710 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1711 kvm_exit();
1712}