blob: 020923e4313490445130517f220325f7485a4fa2 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Alexander Graf97c95052012-08-02 15:10:00 +020043#include "trace.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050044
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060045unsigned long kvmppc_booke_handlers;
46
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050047#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
48#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
49
50struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050051 { "mmio", VCPU_STAT(mmio_exits) },
52 { "dcr", VCPU_STAT(dcr_exits) },
53 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
55 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
56 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
57 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
58 { "sysc", VCPU_STAT(syscall_exits) },
59 { "isi", VCPU_STAT(isi_exits) },
60 { "dsi", VCPU_STAT(dsi_exits) },
61 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
62 { "dec", VCPU_STAT(dec_exits) },
63 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050064 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000065 { "doorbell", VCPU_STAT(dbell_exits) },
66 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020067 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068 { NULL }
69};
70
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050071/* TODO: use vcpu_printf() */
72void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
73{
74 int i;
75
Alexander Graf666e7252010-07-29 14:47:43 +020076 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060077 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020078 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
79 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050080
81 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
82
83 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060084 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010085 kvmppc_get_gpr(vcpu, i),
86 kvmppc_get_gpr(vcpu, i+1),
87 kvmppc_get_gpr(vcpu, i+2),
88 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050089 }
90}
91
Scott Wood4cd35f62011-06-14 18:34:31 -050092#ifdef CONFIG_SPE
93void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
94{
95 preempt_disable();
96 enable_kernel_spe();
97 kvmppc_save_guest_spe(vcpu);
98 vcpu->arch.shadow_msr &= ~MSR_SPE;
99 preempt_enable();
100}
101
102static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
103{
104 preempt_disable();
105 enable_kernel_spe();
106 kvmppc_load_guest_spe(vcpu);
107 vcpu->arch.shadow_msr |= MSR_SPE;
108 preempt_enable();
109}
110
111static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
112{
113 if (vcpu->arch.shared->msr & MSR_SPE) {
114 if (!(vcpu->arch.shadow_msr & MSR_SPE))
115 kvmppc_vcpu_enable_spe(vcpu);
116 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
117 kvmppc_vcpu_disable_spe(vcpu);
118 }
119}
120#else
121static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
122{
123}
124#endif
125
Alexander Graf7a08c272012-08-16 13:10:16 +0200126static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
127{
128#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
129 /* We always treat the FP bit as enabled from the host
130 perspective, so only need to adjust the shadow MSR */
131 vcpu->arch.shadow_msr &= ~MSR_FP;
132 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
133#endif
134}
135
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500136/*
137 * Helper function for "full" MSR writes. No need to call this if only
138 * EE/CE/ME/DE/RI are changing.
139 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500140void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
141{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500142 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500143
Scott Woodd30f6e42011-12-20 15:34:43 +0000144#ifdef CONFIG_KVM_BOOKE_HV
145 new_msr |= MSR_GS;
146#endif
147
Scott Wood4cd35f62011-06-14 18:34:31 -0500148 vcpu->arch.shared->msr = new_msr;
149
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500150 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500151 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200152 kvmppc_vcpu_sync_fpu(vcpu);
Scott Wood4cd35f62011-06-14 18:34:31 -0500153}
154
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600155static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
156 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600157{
Alexander Graf63460462012-08-08 00:44:52 +0200158 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600159 set_bit(priority, &vcpu->arch.pending_exceptions);
160}
161
Liu Yudaf5e272010-02-02 19:44:35 +0800162static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
163 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600164{
Liu Yudaf5e272010-02-02 19:44:35 +0800165 vcpu->arch.queued_dear = dear_flags;
166 vcpu->arch.queued_esr = esr_flags;
167 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
168}
169
170static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
171 ulong dear_flags, ulong esr_flags)
172{
173 vcpu->arch.queued_dear = dear_flags;
174 vcpu->arch.queued_esr = esr_flags;
175 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
176}
177
178static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
179 ulong esr_flags)
180{
181 vcpu->arch.queued_esr = esr_flags;
182 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
183}
184
Alexander Graf011da892013-01-31 14:17:38 +0100185static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
186 ulong esr_flags)
187{
188 vcpu->arch.queued_dear = dear_flags;
189 vcpu->arch.queued_esr = esr_flags;
190 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
191}
192
Liu Yudaf5e272010-02-02 19:44:35 +0800193void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
194{
195 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600196 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600197}
198
199void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
200{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600201 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600202}
203
204int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
205{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600206 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600207}
208
Alexander Graf7706664d2009-12-21 20:21:24 +0100209void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
210{
211 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
212}
213
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600214void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
215 struct kvm_interrupt *irq)
216{
Alexander Grafc5335f12010-08-30 14:03:24 +0200217 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
218
219 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
220 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
221
222 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600223}
224
Alexander Graf4496f972010-04-07 10:03:25 +0200225void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
226 struct kvm_interrupt *irq)
227{
228 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200229 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200230}
231
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000232static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
233{
234 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
235}
236
237static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
238{
239 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
240}
241
Scott Woodd30f6e42011-12-20 15:34:43 +0000242static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
243{
244#ifdef CONFIG_KVM_BOOKE_HV
245 mtspr(SPRN_GSRR0, srr0);
246 mtspr(SPRN_GSRR1, srr1);
247#else
248 vcpu->arch.shared->srr0 = srr0;
249 vcpu->arch.shared->srr1 = srr1;
250#endif
251}
252
253static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
254{
255 vcpu->arch.csrr0 = srr0;
256 vcpu->arch.csrr1 = srr1;
257}
258
259static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
260{
261 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
262 vcpu->arch.dsrr0 = srr0;
263 vcpu->arch.dsrr1 = srr1;
264 } else {
265 set_guest_csrr(vcpu, srr0, srr1);
266 }
267}
268
269static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
270{
271 vcpu->arch.mcsrr0 = srr0;
272 vcpu->arch.mcsrr1 = srr1;
273}
274
275static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
276{
277#ifdef CONFIG_KVM_BOOKE_HV
278 return mfspr(SPRN_GDEAR);
279#else
280 return vcpu->arch.shared->dar;
281#endif
282}
283
284static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
285{
286#ifdef CONFIG_KVM_BOOKE_HV
287 mtspr(SPRN_GDEAR, dear);
288#else
289 vcpu->arch.shared->dar = dear;
290#endif
291}
292
293static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
294{
295#ifdef CONFIG_KVM_BOOKE_HV
296 return mfspr(SPRN_GESR);
297#else
298 return vcpu->arch.shared->esr;
299#endif
300}
301
302static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
303{
304#ifdef CONFIG_KVM_BOOKE_HV
305 mtspr(SPRN_GESR, esr);
306#else
307 vcpu->arch.shared->esr = esr;
308#endif
309}
310
Alexander Graf324b3e62013-01-04 18:28:51 +0100311static unsigned long get_guest_epr(struct kvm_vcpu *vcpu)
312{
313#ifdef CONFIG_KVM_BOOKE_HV
314 return mfspr(SPRN_GEPR);
315#else
316 return vcpu->arch.epr;
317#endif
318}
319
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600320/* Deliver the interrupt of the corresponding priority, if possible. */
321static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
322 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500323{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600324 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000325 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100326 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200327 ulong crit_raw = vcpu->arch.shared->critical;
328 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
329 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200330 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000331 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000332 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200333
334 /* Truncate crit indicators in 32 bit mode */
335 if (!(vcpu->arch.shared->msr & MSR_SF)) {
336 crit_raw &= 0xffffffff;
337 crit_r1 &= 0xffffffff;
338 }
339
340 /* Critical section when crit == r1 */
341 crit = (crit_raw == crit_r1);
342 /* ... and we're in supervisor mode */
343 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500344
Alexander Grafc5335f12010-08-30 14:03:24 +0200345 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
346 priority = BOOKE_IRQPRIO_EXTERNAL;
347 keep_irq = true;
348 }
349
Alexander Graf1c810632013-01-04 18:12:48 +0100350 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_enabled)
351 update_epr = true;
352
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600353 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600354 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800355 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100356 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800357 update_dear = true;
358 /* fall through */
359 case BOOKE_IRQPRIO_INST_STORAGE:
360 case BOOKE_IRQPRIO_PROGRAM:
361 update_esr = true;
362 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600363 case BOOKE_IRQPRIO_ITLB_MISS:
364 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600365 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600366 case BOOKE_IRQPRIO_SPE_UNAVAIL:
367 case BOOKE_IRQPRIO_SPE_FP_DATA:
368 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600369 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600370 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000371 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000372 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500373 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000374 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600375 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000376 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200377 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000378 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000379 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000380 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500381 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600382 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200383 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000384 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000385 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500386 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600387 case BOOKE_IRQPRIO_DECREMENTER:
388 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000389 keep_irq = true;
390 /* fall through */
391 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000392 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200393 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200394 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000395 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000396 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500397 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600398 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200399 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000400 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000401 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000402 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500403 break;
404 }
405
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600406 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000407 switch (int_class) {
408 case INT_CLASS_NONCRIT:
409 set_guest_srr(vcpu, vcpu->arch.pc,
410 vcpu->arch.shared->msr);
411 break;
412 case INT_CLASS_CRIT:
413 set_guest_csrr(vcpu, vcpu->arch.pc,
414 vcpu->arch.shared->msr);
415 break;
416 case INT_CLASS_DBG:
417 set_guest_dsrr(vcpu, vcpu->arch.pc,
418 vcpu->arch.shared->msr);
419 break;
420 case INT_CLASS_MC:
421 set_guest_mcsrr(vcpu, vcpu->arch.pc,
422 vcpu->arch.shared->msr);
423 break;
424 }
425
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600426 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800427 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000428 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800429 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000430 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Alexander Graf1c810632013-01-04 18:12:48 +0100431 if (update_epr == true)
432 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Mihai Caraman95e90b42012-10-11 06:13:26 +0000433
434 new_msr &= msr_mask;
435#if defined(CONFIG_64BIT)
436 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
437 new_msr |= MSR_CM;
438#endif
439 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600440
Alexander Grafc5335f12010-08-30 14:03:24 +0200441 if (!keep_irq)
442 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600443 }
444
Scott Woodd30f6e42011-12-20 15:34:43 +0000445#ifdef CONFIG_KVM_BOOKE_HV
446 /*
447 * If an interrupt is pending but masked, raise a guest doorbell
448 * so that we are notified when the guest enables the relevant
449 * MSR bit.
450 */
451 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
452 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
453 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
454 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
455 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
456 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
457#endif
458
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600459 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500460}
461
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000462/*
463 * Return the number of jiffies until the next timeout. If the timeout is
464 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
465 * because the larger value can break the timer APIs.
466 */
467static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
468{
469 u64 tb, wdt_tb, wdt_ticks = 0;
470 u64 nr_jiffies = 0;
471 u32 period = TCR_GET_WP(vcpu->arch.tcr);
472
473 wdt_tb = 1ULL << (63 - period);
474 tb = get_tb();
475 /*
476 * The watchdog timeout will hapeen when TB bit corresponding
477 * to watchdog will toggle from 0 to 1.
478 */
479 if (tb & wdt_tb)
480 wdt_ticks = wdt_tb;
481
482 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
483
484 /* Convert timebase ticks to jiffies */
485 nr_jiffies = wdt_ticks;
486
487 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
488 nr_jiffies++;
489
490 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
491}
492
493static void arm_next_watchdog(struct kvm_vcpu *vcpu)
494{
495 unsigned long nr_jiffies;
496 unsigned long flags;
497
498 /*
499 * If TSR_ENW and TSR_WIS are not set then no need to exit to
500 * userspace, so clear the KVM_REQ_WATCHDOG request.
501 */
502 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
503 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
504
505 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
506 nr_jiffies = watchdog_next_timeout(vcpu);
507 /*
508 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
509 * then do not run the watchdog timer as this can break timer APIs.
510 */
511 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
512 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
513 else
514 del_timer(&vcpu->arch.wdt_timer);
515 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
516}
517
518void kvmppc_watchdog_func(unsigned long data)
519{
520 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
521 u32 tsr, new_tsr;
522 int final;
523
524 do {
525 new_tsr = tsr = vcpu->arch.tsr;
526 final = 0;
527
528 /* Time out event */
529 if (tsr & TSR_ENW) {
530 if (tsr & TSR_WIS)
531 final = 1;
532 else
533 new_tsr = tsr | TSR_WIS;
534 } else {
535 new_tsr = tsr | TSR_ENW;
536 }
537 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
538
539 if (new_tsr & TSR_WIS) {
540 smp_wmb();
541 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
542 kvm_vcpu_kick(vcpu);
543 }
544
545 /*
546 * If this is final watchdog expiry and some action is required
547 * then exit to userspace.
548 */
549 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
550 vcpu->arch.watchdog_enabled) {
551 smp_wmb();
552 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
553 kvm_vcpu_kick(vcpu);
554 }
555
556 /*
557 * Stop running the watchdog timer after final expiration to
558 * prevent the host from being flooded with timers if the
559 * guest sets a short period.
560 * Timers will resume when TSR/TCR is updated next time.
561 */
562 if (!final)
563 arm_next_watchdog(vcpu);
564}
565
Scott Wooddfd4d472011-11-17 12:39:59 +0000566static void update_timer_ints(struct kvm_vcpu *vcpu)
567{
568 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
569 kvmppc_core_queue_dec(vcpu);
570 else
571 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000572
573 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
574 kvmppc_core_queue_watchdog(vcpu);
575 else
576 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000577}
578
Scott Woodc59a6a32011-11-08 18:23:25 -0600579static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500580{
581 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500582 unsigned int priority;
583
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600584 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000585 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600586 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500587 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500588
589 priority = find_next_bit(pending,
590 BITS_PER_BYTE * sizeof(*pending),
591 priority + 1);
592 }
Alexander Graf90bba352010-07-29 14:47:51 +0200593
594 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600595 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500596}
597
Scott Woodc59a6a32011-11-08 18:23:25 -0600598/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000599int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600600{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000601 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600602 WARN_ON_ONCE(!irqs_disabled());
603
604 kvmppc_core_check_exceptions(vcpu);
605
Alexander Grafb8c649a2012-12-20 04:52:39 +0000606 if (vcpu->requests) {
607 /* Exception delivery raised request; start over */
608 return 1;
609 }
610
Scott Woodc59a6a32011-11-08 18:23:25 -0600611 if (vcpu->arch.shared->msr & MSR_WE) {
612 local_irq_enable();
613 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100614 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600615 local_irq_disable();
616
617 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000618 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600619 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000620
621 return r;
622}
623
Alexander Graf7c973a22012-08-13 12:50:35 +0200624int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200625{
Alexander Graf7c973a22012-08-13 12:50:35 +0200626 int r = 1; /* Indicate we want to get back into the guest */
627
Alexander Graf2d8185d2012-08-10 12:31:12 +0200628 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
629 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200630#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200631 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
632 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200633#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200634
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000635 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
636 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
637 r = 0;
638 }
639
Alexander Graf1c810632013-01-04 18:12:48 +0100640 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
641 vcpu->run->epr.epr = 0;
642 vcpu->arch.epr_needed = true;
643 vcpu->run->exit_reason = KVM_EXIT_EPR;
644 r = 0;
645 }
646
Alexander Graf7c973a22012-08-13 12:50:35 +0200647 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200648}
649
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000650int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
651{
Alexander Graf7ee78852012-08-13 12:44:41 +0200652 int ret, s;
Scott Wood8fae8452011-12-20 15:34:45 +0000653#ifdef CONFIG_PPC_FPU
654 unsigned int fpscr;
655 int fpexc_mode;
656 u64 fpr[32];
657#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000658
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200659 if (!vcpu->arch.sane) {
660 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
661 return -EINVAL;
662 }
663
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000664 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200665 s = kvmppc_prepare_to_enter(vcpu);
666 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +0200667 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +0200668 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600669 goto out;
670 }
Alexander Grafbd2be682012-08-13 01:04:19 +0200671 kvmppc_lazy_ee_enable();
Scott Wood1d1ef222011-11-08 16:11:59 -0600672
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000673 kvm_guest_enter();
Scott Wood8fae8452011-12-20 15:34:45 +0000674
675#ifdef CONFIG_PPC_FPU
676 /* Save userspace FPU state in stack */
677 enable_kernel_fp();
678 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
679 fpscr = current->thread.fpscr.val;
680 fpexc_mode = current->thread.fpexc_mode;
681
682 /* Restore guest FPU state to thread */
683 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
684 current->thread.fpscr.val = vcpu->arch.fpscr;
685
686 /*
687 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
688 * as always using the FPU. Kernel usage of FP (via
689 * enable_kernel_fp()) in this thread must not occur while
690 * vcpu->fpu_active is set.
691 */
692 vcpu->fpu_active = 1;
693
694 kvmppc_load_guest_fp(vcpu);
695#endif
696
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000697 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000698
Alexander Graf24afa372012-08-12 12:42:30 +0200699 /* No need for kvm_guest_exit. It's done in handle_exit.
700 We also get here with interrupts enabled. */
701
Scott Wood8fae8452011-12-20 15:34:45 +0000702#ifdef CONFIG_PPC_FPU
703 kvmppc_save_guest_fp(vcpu);
704
705 vcpu->fpu_active = 0;
706
707 /* Save guest FPU state from thread */
708 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
709 vcpu->arch.fpscr = current->thread.fpscr.val;
710
711 /* Restore userspace FPU state from stack */
712 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
713 current->thread.fpscr.val = fpscr;
714 current->thread.fpexc_mode = fpexc_mode;
715#endif
716
Scott Wood1d1ef222011-11-08 16:11:59 -0600717out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200718 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000719 return ret;
720}
721
Scott Woodd30f6e42011-12-20 15:34:43 +0000722static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
723{
724 enum emulation_result er;
725
726 er = kvmppc_emulate_instruction(run, vcpu);
727 switch (er) {
728 case EMULATE_DONE:
729 /* don't overwrite subtypes, just account kvm_stats */
730 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
731 /* Future optimization: only reload non-volatiles if
732 * they were actually modified by emulation. */
733 return RESUME_GUEST_NV;
734
735 case EMULATE_DO_DCR:
736 run->exit_reason = KVM_EXIT_DCR;
737 return RESUME_HOST;
738
739 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000740 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
741 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
742 /* For debugging, encode the failing instruction and
743 * report it to userspace. */
744 run->hw.hardware_exit_reason = ~0ULL << 32;
745 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000746 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000747 return RESUME_HOST;
748
749 default:
750 BUG();
751 }
752}
753
Alexander Graf4e642cc2012-02-20 23:57:26 +0100754static void kvmppc_fill_pt_regs(struct pt_regs *regs)
755{
756 ulong r1, ip, msr, lr;
757
758 asm("mr %0, 1" : "=r"(r1));
759 asm("mflr %0" : "=r"(lr));
760 asm("mfmsr %0" : "=r"(msr));
761 asm("bl 1f; 1: mflr %0" : "=r"(ip));
762
763 memset(regs, 0, sizeof(*regs));
764 regs->gpr[1] = r1;
765 regs->nip = ip;
766 regs->msr = msr;
767 regs->link = lr;
768}
769
Bharat Bhushan6328e592012-06-20 05:56:53 +0000770/*
771 * For interrupts needed to be handled by host interrupt handlers,
772 * corresponding host handler are called from here in similar way
773 * (but not exact) as they are called from low level handler
774 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
775 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100776static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
777 unsigned int exit_nr)
778{
779 struct pt_regs regs;
780
781 switch (exit_nr) {
782 case BOOKE_INTERRUPT_EXTERNAL:
783 kvmppc_fill_pt_regs(&regs);
784 do_IRQ(&regs);
785 break;
786 case BOOKE_INTERRUPT_DECREMENTER:
787 kvmppc_fill_pt_regs(&regs);
788 timer_interrupt(&regs);
789 break;
790#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
791 case BOOKE_INTERRUPT_DOORBELL:
792 kvmppc_fill_pt_regs(&regs);
793 doorbell_exception(&regs);
794 break;
795#endif
796 case BOOKE_INTERRUPT_MACHINE_CHECK:
797 /* FIXME */
798 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100799 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
800 kvmppc_fill_pt_regs(&regs);
801 performance_monitor_exception(&regs);
802 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000803 case BOOKE_INTERRUPT_WATCHDOG:
804 kvmppc_fill_pt_regs(&regs);
805#ifdef CONFIG_BOOKE_WDT
806 WatchdogException(&regs);
807#else
808 unknown_exception(&regs);
809#endif
810 break;
811 case BOOKE_INTERRUPT_CRITICAL:
812 unknown_exception(&regs);
813 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100814 }
815}
816
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500817/**
818 * kvmppc_handle_exit
819 *
820 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
821 */
822int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
823 unsigned int exit_nr)
824{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500825 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200826 int s;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500827
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600828 /* update before a new last_exit_type is rewritten */
829 kvmppc_update_timing_stats(vcpu);
830
Alexander Graf4e642cc2012-02-20 23:57:26 +0100831 /* restart interrupts if they were meant for the host */
832 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000833
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500834 local_irq_enable();
835
Alexander Graf97c95052012-08-02 15:10:00 +0200836 trace_kvm_exit(exit_nr, vcpu);
Alexander Graf706fb732012-08-12 11:29:09 +0200837 kvm_guest_exit();
Alexander Graf97c95052012-08-02 15:10:00 +0200838
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500839 run->exit_reason = KVM_EXIT_UNKNOWN;
840 run->ready_for_interrupt_injection = 1;
841
842 switch (exit_nr) {
843 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100844 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
845 kvmppc_dump_vcpu(vcpu);
846 /* For debugging, send invalid exit reason to user space */
847 run->hw.hardware_exit_reason = ~1ULL << 32;
848 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
849 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500850 break;
851
852 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600853 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600854 r = RESUME_GUEST;
855 break;
856
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500857 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600858 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500859 r = RESUME_GUEST;
860 break;
861
Bharat Bhushan6328e592012-06-20 05:56:53 +0000862 case BOOKE_INTERRUPT_WATCHDOG:
863 r = RESUME_GUEST;
864 break;
865
Scott Woodd30f6e42011-12-20 15:34:43 +0000866 case BOOKE_INTERRUPT_DOORBELL:
867 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000868 r = RESUME_GUEST;
869 break;
870
871 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
872 kvmppc_account_exit(vcpu, GDBELL_EXITS);
873
874 /*
875 * We are here because there is a pending guest interrupt
876 * which could not be delivered as MSR_CE or MSR_ME was not
877 * set. Once we break from here we will retry delivery.
878 */
879 r = RESUME_GUEST;
880 break;
881
882 case BOOKE_INTERRUPT_GUEST_DBELL:
883 kvmppc_account_exit(vcpu, GDBELL_EXITS);
884
885 /*
886 * We are here because there is a pending guest interrupt
887 * which could not be delivered as MSR_EE was not set. Once
888 * we break from here we will retry delivery.
889 */
890 r = RESUME_GUEST;
891 break;
892
Alexander Graf95f2e922012-02-20 22:45:12 +0100893 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
894 r = RESUME_GUEST;
895 break;
896
Scott Woodd30f6e42011-12-20 15:34:43 +0000897 case BOOKE_INTERRUPT_HV_PRIV:
898 r = emulation_exit(run, vcpu);
899 break;
900
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500901 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000902 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +0100903 /*
904 * Program traps generated by user-level software must
905 * be handled by the guest kernel.
906 *
907 * In GS mode, hypervisor privileged instructions trap
908 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
909 * actual program interrupts, handled by the guest.
910 */
Liu Yudaf5e272010-02-02 19:44:35 +0800911 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500912 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600913 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500914 break;
915 }
916
Scott Woodd30f6e42011-12-20 15:34:43 +0000917 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500918 break;
919
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200920 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600921 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600922 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200923 r = RESUME_GUEST;
924 break;
925
Scott Wood4cd35f62011-06-14 18:34:31 -0500926#ifdef CONFIG_SPE
927 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
928 if (vcpu->arch.shared->msr & MSR_SPE)
929 kvmppc_vcpu_enable_spe(vcpu);
930 else
931 kvmppc_booke_queue_irqprio(vcpu,
932 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600933 r = RESUME_GUEST;
934 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500935 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600936
937 case BOOKE_INTERRUPT_SPE_FP_DATA:
938 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
939 r = RESUME_GUEST;
940 break;
941
942 case BOOKE_INTERRUPT_SPE_FP_ROUND:
943 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
944 r = RESUME_GUEST;
945 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500946#else
947 case BOOKE_INTERRUPT_SPE_UNAVAIL:
948 /*
949 * Guest wants SPE, but host kernel doesn't support it. Send
950 * an "unimplemented operation" program check to the guest.
951 */
952 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
953 r = RESUME_GUEST;
954 break;
955
956 /*
957 * These really should never happen without CONFIG_SPE,
958 * as we should never enable the real MSR[SPE] in the guest.
959 */
960 case BOOKE_INTERRUPT_SPE_FP_DATA:
961 case BOOKE_INTERRUPT_SPE_FP_ROUND:
962 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
963 __func__, exit_nr, vcpu->arch.pc);
964 run->hw.hardware_exit_reason = exit_nr;
965 r = RESUME_HOST;
966 break;
967#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600968
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500969 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800970 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
971 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600972 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500973 r = RESUME_GUEST;
974 break;
975
976 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800977 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600978 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500979 r = RESUME_GUEST;
980 break;
981
Alexander Graf011da892013-01-31 14:17:38 +0100982 case BOOKE_INTERRUPT_ALIGNMENT:
983 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
984 vcpu->arch.fault_esr);
985 r = RESUME_GUEST;
986 break;
987
Scott Woodd30f6e42011-12-20 15:34:43 +0000988#ifdef CONFIG_KVM_BOOKE_HV
989 case BOOKE_INTERRUPT_HV_SYSCALL:
990 if (!(vcpu->arch.shared->msr & MSR_PR)) {
991 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
992 } else {
993 /*
994 * hcall from guest userspace -- send privileged
995 * instruction program check.
996 */
997 kvmppc_core_queue_program(vcpu, ESR_PPR);
998 }
999
1000 r = RESUME_GUEST;
1001 break;
1002#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001003 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001004 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1005 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1006 /* KVM PV hypercalls */
1007 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1008 r = RESUME_GUEST;
1009 } else {
1010 /* Guest syscalls */
1011 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1012 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001013 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001014 r = RESUME_GUEST;
1015 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001016#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001017
1018 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001019 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001020 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001021 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001022 gfn_t gfn;
1023
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001024#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001025 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1026 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1027 kvmppc_map_magic(vcpu);
1028 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1029 r = RESUME_GUEST;
1030
1031 break;
1032 }
1033#endif
1034
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001035 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001036 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001037 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001038 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001039 kvmppc_core_queue_dtlb_miss(vcpu,
1040 vcpu->arch.fault_dear,
1041 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001042 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001043 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001044 r = RESUME_GUEST;
1045 break;
1046 }
1047
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001048 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001049 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001050
1051 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1052 /* The guest TLB had a mapping, but the shadow TLB
1053 * didn't, and it is RAM. This could be because:
1054 * a) the entry is mapping the host kernel, or
1055 * b) the guest used a large mapping which we're faking
1056 * Either way, we need to satisfy the fault without
1057 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001058 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001059 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001060 r = RESUME_GUEST;
1061 } else {
1062 /* Guest has mapped and accessed a page which is not
1063 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001064 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001065 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001066 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001067 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001068 }
1069
1070 break;
1071 }
1072
1073 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001074 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001075 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001076 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001077 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001078
1079 r = RESUME_GUEST;
1080
1081 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001082 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001083 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001084 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001085 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001086 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001087 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001088 break;
1089 }
1090
Hollis Blanchard7b701592008-12-02 15:51:58 -06001091 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001092
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001093 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001094 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001095
1096 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1097 /* The guest TLB had a mapping, but the shadow TLB
1098 * didn't. This could be because:
1099 * a) the entry is mapping the host kernel, or
1100 * b) the guest used a large mapping which we're faking
1101 * Either way, we need to satisfy the fault without
1102 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001103 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001104 } else {
1105 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001106 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001107 }
1108
1109 break;
1110 }
1111
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001112 case BOOKE_INTERRUPT_DEBUG: {
1113 u32 dbsr;
1114
1115 vcpu->arch.pc = mfspr(SPRN_CSRR0);
1116
1117 /* clear IAC events in DBSR register */
1118 dbsr = mfspr(SPRN_DBSR);
1119 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
1120 mtspr(SPRN_DBSR, dbsr);
1121
1122 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001123 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001124 r = RESUME_HOST;
1125 break;
1126 }
1127
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001128 default:
1129 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1130 BUG();
1131 }
1132
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001133 /*
1134 * To avoid clobbering exit_reason, only check for signals if we
1135 * aren't already exiting to userspace for some other reason.
1136 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001137 if (!(r & RESUME_HOST)) {
1138 local_irq_disable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001139 s = kvmppc_prepare_to_enter(vcpu);
1140 if (s <= 0) {
Alexander Graf24afa372012-08-12 12:42:30 +02001141 local_irq_enable();
Alexander Graf7ee78852012-08-13 12:44:41 +02001142 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Alexander Graf24afa372012-08-12 12:42:30 +02001143 } else {
Alexander Grafbd2be682012-08-13 01:04:19 +02001144 kvmppc_lazy_ee_enable();
Alexander Graf03660ba2012-02-28 12:00:41 +01001145 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001146 }
1147
1148 return r;
1149}
1150
1151/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1152int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1153{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001154 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001155 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001156
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001157 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001158 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001159 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001160 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001161
Scott Woodd30f6e42011-12-20 15:34:43 +00001162#ifndef CONFIG_KVM_BOOKE_HV
1163 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001164 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001165 vcpu->arch.shared->msr = 0;
1166#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001167
Hollis Blanchard082decf2010-08-07 10:33:56 -07001168 /* Eye-catching numbers so we know if the guest takes an interrupt
1169 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001170 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001171 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1172 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001173
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001174 kvmppc_init_timing_stats(vcpu);
1175
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001176 r = kvmppc_core_vcpu_setup(vcpu);
1177 kvmppc_sanity_check(vcpu);
1178 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001179}
1180
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001181int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1182{
1183 /* setup watchdog timer once */
1184 spin_lock_init(&vcpu->arch.wdt_lock);
1185 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1186 (unsigned long)vcpu);
1187
1188 return 0;
1189}
1190
1191void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1192{
1193 del_timer_sync(&vcpu->arch.wdt_timer);
1194}
1195
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001196int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1197{
1198 int i;
1199
1200 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001201 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001202 regs->ctr = vcpu->arch.ctr;
1203 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001204 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001205 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001206 regs->srr0 = vcpu->arch.shared->srr0;
1207 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001208 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001209 regs->sprg0 = vcpu->arch.shared->sprg0;
1210 regs->sprg1 = vcpu->arch.shared->sprg1;
1211 regs->sprg2 = vcpu->arch.shared->sprg2;
1212 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001213 regs->sprg4 = vcpu->arch.shared->sprg4;
1214 regs->sprg5 = vcpu->arch.shared->sprg5;
1215 regs->sprg6 = vcpu->arch.shared->sprg6;
1216 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001217
1218 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001219 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001220
1221 return 0;
1222}
1223
1224int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1225{
1226 int i;
1227
1228 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001229 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001230 vcpu->arch.ctr = regs->ctr;
1231 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001232 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001233 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001234 vcpu->arch.shared->srr0 = regs->srr0;
1235 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001236 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001237 vcpu->arch.shared->sprg0 = regs->sprg0;
1238 vcpu->arch.shared->sprg1 = regs->sprg1;
1239 vcpu->arch.shared->sprg2 = regs->sprg2;
1240 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001241 vcpu->arch.shared->sprg4 = regs->sprg4;
1242 vcpu->arch.shared->sprg5 = regs->sprg5;
1243 vcpu->arch.shared->sprg6 = regs->sprg6;
1244 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001245
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001246 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1247 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001248
1249 return 0;
1250}
1251
Scott Wood5ce941e2011-04-27 17:24:21 -05001252static void get_sregs_base(struct kvm_vcpu *vcpu,
1253 struct kvm_sregs *sregs)
1254{
1255 u64 tb = get_tb();
1256
1257 sregs->u.e.features |= KVM_SREGS_E_BASE;
1258
1259 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1260 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1261 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001262 sregs->u.e.esr = get_guest_esr(vcpu);
1263 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001264 sregs->u.e.tsr = vcpu->arch.tsr;
1265 sregs->u.e.tcr = vcpu->arch.tcr;
1266 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1267 sregs->u.e.tb = tb;
1268 sregs->u.e.vrsave = vcpu->arch.vrsave;
1269}
1270
1271static int set_sregs_base(struct kvm_vcpu *vcpu,
1272 struct kvm_sregs *sregs)
1273{
1274 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1275 return 0;
1276
1277 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1278 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1279 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001280 set_guest_esr(vcpu, sregs->u.e.esr);
1281 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001282 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001283 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001284
Scott Wooddfd4d472011-11-17 12:39:59 +00001285 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001286 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001287 kvmppc_emulate_dec(vcpu);
1288 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001289
1290 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001291 u32 old_tsr = vcpu->arch.tsr;
1292
Scott Wooddfd4d472011-11-17 12:39:59 +00001293 vcpu->arch.tsr = sregs->u.e.tsr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001294
1295 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1296 arm_next_watchdog(vcpu);
1297
Scott Wooddfd4d472011-11-17 12:39:59 +00001298 update_timer_ints(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001299 }
1300
1301 return 0;
1302}
1303
1304static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1305 struct kvm_sregs *sregs)
1306{
1307 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1308
Scott Wood841741f2011-09-02 17:39:37 -05001309 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001310 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1311 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1312 sregs->u.e.decar = vcpu->arch.decar;
1313 sregs->u.e.ivpr = vcpu->arch.ivpr;
1314}
1315
1316static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1317 struct kvm_sregs *sregs)
1318{
1319 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1320 return 0;
1321
Scott Wood841741f2011-09-02 17:39:37 -05001322 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001323 return -EINVAL;
1324
1325 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1326 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1327 vcpu->arch.decar = sregs->u.e.decar;
1328 vcpu->arch.ivpr = sregs->u.e.ivpr;
1329
1330 return 0;
1331}
1332
1333void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1334{
1335 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1336
1337 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1338 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1339 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1340 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1341 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1342 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1343 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1344 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1345 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1346 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1347 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1348 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1349 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1350 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1351 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1352 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1353}
1354
1355int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1356{
1357 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1358 return 0;
1359
1360 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1361 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1362 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1363 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1364 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1365 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1366 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1367 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1368 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1369 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1370 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1371 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1372 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1373 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1374 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1375 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1376
1377 return 0;
1378}
1379
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001380int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1381 struct kvm_sregs *sregs)
1382{
Scott Wood5ce941e2011-04-27 17:24:21 -05001383 sregs->pvr = vcpu->arch.pvr;
1384
1385 get_sregs_base(vcpu, sregs);
1386 get_sregs_arch206(vcpu, sregs);
1387 kvmppc_core_get_sregs(vcpu, sregs);
1388 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001389}
1390
1391int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1392 struct kvm_sregs *sregs)
1393{
Scott Wood5ce941e2011-04-27 17:24:21 -05001394 int ret;
1395
1396 if (vcpu->arch.pvr != sregs->pvr)
1397 return -EINVAL;
1398
1399 ret = set_sregs_base(vcpu, sregs);
1400 if (ret < 0)
1401 return ret;
1402
1403 ret = set_sregs_arch206(vcpu, sregs);
1404 if (ret < 0)
1405 return ret;
1406
1407 return kvmppc_core_set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001408}
1409
Paul Mackerras31f34382011-12-12 12:26:50 +00001410int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1411{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001412 int r = -EINVAL;
1413
1414 switch (reg->id) {
1415 case KVM_REG_PPC_IAC1:
1416 case KVM_REG_PPC_IAC2:
1417 case KVM_REG_PPC_IAC3:
1418 case KVM_REG_PPC_IAC4: {
1419 int iac = reg->id - KVM_REG_PPC_IAC1;
1420 r = copy_to_user((u64 __user *)(long)reg->addr,
1421 &vcpu->arch.dbg_reg.iac[iac], sizeof(u64));
1422 break;
1423 }
1424 case KVM_REG_PPC_DAC1:
1425 case KVM_REG_PPC_DAC2: {
1426 int dac = reg->id - KVM_REG_PPC_DAC1;
1427 r = copy_to_user((u64 __user *)(long)reg->addr,
1428 &vcpu->arch.dbg_reg.dac[dac], sizeof(u64));
1429 break;
1430 }
Alexander Graf324b3e62013-01-04 18:28:51 +01001431 case KVM_REG_PPC_EPR: {
1432 u32 epr = get_guest_epr(vcpu);
1433 r = put_user(epr, (u32 __user *)(long)reg->addr);
1434 break;
1435 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001436#if defined(CONFIG_64BIT)
1437 case KVM_REG_PPC_EPCR:
1438 r = put_user(vcpu->arch.epcr, (u32 __user *)(long)reg->addr);
1439 break;
1440#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001441 default:
1442 break;
1443 }
1444 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001445}
1446
1447int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1448{
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001449 int r = -EINVAL;
1450
1451 switch (reg->id) {
1452 case KVM_REG_PPC_IAC1:
1453 case KVM_REG_PPC_IAC2:
1454 case KVM_REG_PPC_IAC3:
1455 case KVM_REG_PPC_IAC4: {
1456 int iac = reg->id - KVM_REG_PPC_IAC1;
1457 r = copy_from_user(&vcpu->arch.dbg_reg.iac[iac],
1458 (u64 __user *)(long)reg->addr, sizeof(u64));
1459 break;
1460 }
1461 case KVM_REG_PPC_DAC1:
1462 case KVM_REG_PPC_DAC2: {
1463 int dac = reg->id - KVM_REG_PPC_DAC1;
1464 r = copy_from_user(&vcpu->arch.dbg_reg.dac[dac],
1465 (u64 __user *)(long)reg->addr, sizeof(u64));
1466 break;
1467 }
Alexander Graf324b3e62013-01-04 18:28:51 +01001468 case KVM_REG_PPC_EPR: {
1469 u32 new_epr;
1470 r = get_user(new_epr, (u32 __user *)(long)reg->addr);
1471 if (!r)
1472 kvmppc_set_epr(vcpu, new_epr);
1473 break;
1474 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001475#if defined(CONFIG_64BIT)
1476 case KVM_REG_PPC_EPCR: {
1477 u32 new_epcr;
1478 r = get_user(new_epcr, (u32 __user *)(long)reg->addr);
1479 if (r == 0)
1480 kvmppc_set_epcr(vcpu, new_epcr);
1481 break;
1482 }
1483#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001484 default:
1485 break;
1486 }
1487 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001488}
1489
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001490int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1491{
1492 return -ENOTSUPP;
1493}
1494
1495int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1496{
1497 return -ENOTSUPP;
1498}
1499
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001500int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1501 struct kvm_translation *tr)
1502{
Avi Kivity98001d82010-05-13 11:05:49 +03001503 int r;
1504
Avi Kivity98001d82010-05-13 11:05:49 +03001505 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001506 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001507}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001508
Alexander Graf4e755752009-10-30 05:47:01 +00001509int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1510{
1511 return -ENOTSUPP;
1512}
1513
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001514void kvmppc_core_free_memslot(struct kvm_memory_slot *free,
1515 struct kvm_memory_slot *dont)
1516{
1517}
1518
1519int kvmppc_core_create_memslot(struct kvm_memory_slot *slot,
1520 unsigned long npages)
1521{
1522 return 0;
1523}
1524
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001525int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001526 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001527 struct kvm_userspace_memory_region *mem)
1528{
1529 return 0;
1530}
1531
1532void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001533 struct kvm_userspace_memory_region *mem,
1534 struct kvm_memory_slot old)
1535{
1536}
1537
1538void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001539{
1540}
1541
Mihai Caraman38f98822012-10-11 06:13:27 +00001542void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1543{
1544#if defined(CONFIG_64BIT)
1545 vcpu->arch.epcr = new_epcr;
1546#ifdef CONFIG_KVM_BOOKE_HV
1547 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1548 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1549 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1550#endif
1551#endif
1552}
1553
Scott Wooddfd4d472011-11-17 12:39:59 +00001554void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1555{
1556 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001557 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001558 update_timer_ints(vcpu);
1559}
1560
1561void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1562{
1563 set_bits(tsr_bits, &vcpu->arch.tsr);
1564 smp_wmb();
1565 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1566 kvm_vcpu_kick(vcpu);
1567}
1568
1569void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1570{
1571 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001572
1573 /*
1574 * We may have stopped the watchdog due to
1575 * being stuck on final expiration.
1576 */
1577 if (tsr_bits & (TSR_ENW | TSR_WIS))
1578 arm_next_watchdog(vcpu);
1579
Scott Wooddfd4d472011-11-17 12:39:59 +00001580 update_timer_ints(vcpu);
1581}
1582
1583void kvmppc_decrementer_func(unsigned long data)
1584{
1585 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1586
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001587 if (vcpu->arch.tcr & TCR_ARE) {
1588 vcpu->arch.dec = vcpu->arch.decar;
1589 kvmppc_emulate_dec(vcpu);
1590 }
1591
Scott Wooddfd4d472011-11-17 12:39:59 +00001592 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1593}
1594
Scott Wood94fa9d92011-12-20 15:34:22 +00001595void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1596{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001597 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00001598 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001599}
1600
1601void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1602{
Scott Woodd30f6e42011-12-20 15:34:43 +00001603 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00001604 vcpu->cpu = -1;
Scott Wood94fa9d92011-12-20 15:34:22 +00001605}
1606
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001607int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001608{
Scott Woodd30f6e42011-12-20 15:34:43 +00001609#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001610 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001611 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001612 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001613 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001614 int i;
1615
1616 /* We install our own exception handlers by hijacking IVPR. IVPR must
1617 * be 16-bit aligned, so we need a 64KB allocation. */
1618 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1619 VCPU_SIZE_ORDER);
1620 if (!kvmppc_booke_handlers)
1621 return -ENOMEM;
1622
1623 /* XXX make sure our handlers are smaller than Linux's */
1624
1625 /* Copy our interrupt handlers to match host IVORs. That way we don't
1626 * have to swap the IVORs on every guest/host transition. */
1627 ivor[0] = mfspr(SPRN_IVOR0);
1628 ivor[1] = mfspr(SPRN_IVOR1);
1629 ivor[2] = mfspr(SPRN_IVOR2);
1630 ivor[3] = mfspr(SPRN_IVOR3);
1631 ivor[4] = mfspr(SPRN_IVOR4);
1632 ivor[5] = mfspr(SPRN_IVOR5);
1633 ivor[6] = mfspr(SPRN_IVOR6);
1634 ivor[7] = mfspr(SPRN_IVOR7);
1635 ivor[8] = mfspr(SPRN_IVOR8);
1636 ivor[9] = mfspr(SPRN_IVOR9);
1637 ivor[10] = mfspr(SPRN_IVOR10);
1638 ivor[11] = mfspr(SPRN_IVOR11);
1639 ivor[12] = mfspr(SPRN_IVOR12);
1640 ivor[13] = mfspr(SPRN_IVOR13);
1641 ivor[14] = mfspr(SPRN_IVOR14);
1642 ivor[15] = mfspr(SPRN_IVOR15);
1643
1644 for (i = 0; i < 16; i++) {
1645 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001646 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001647
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001648 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001649 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001650 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001651 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00001652
1653 handler_len = handler[max_ivor + 1] - handler[max_ivor];
1654 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
1655 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001656#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001657 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001658}
1659
Hollis Blancharddb93f572008-11-05 09:36:18 -06001660void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001661{
1662 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1663 kvm_exit();
1664}