blob: 72f13f4a06e0d0bb16e442b9840f87c9c8b1339b [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f62011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050039
Scott Woodd30f6e42011-12-20 15:34:43 +000040#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060041#include "booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050042
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060043unsigned long kvmppc_booke_handlers;
44
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050045#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
46#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
47
48struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049 { "mmio", VCPU_STAT(mmio_exits) },
50 { "dcr", VCPU_STAT(dcr_exits) },
51 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050052 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
53 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
54 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
55 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
56 { "sysc", VCPU_STAT(syscall_exits) },
57 { "isi", VCPU_STAT(isi_exits) },
58 { "dsi", VCPU_STAT(dsi_exits) },
59 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
60 { "dec", VCPU_STAT(dec_exits) },
61 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050062 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000063 { "doorbell", VCPU_STAT(dbell_exits) },
64 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050065 { NULL }
66};
67
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068/* TODO: use vcpu_printf() */
69void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
70{
71 int i;
72
Alexander Graf666e7252010-07-29 14:47:43 +020073 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060074 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020075 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
76 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050077
78 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
79
80 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060081 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010082 kvmppc_get_gpr(vcpu, i),
83 kvmppc_get_gpr(vcpu, i+1),
84 kvmppc_get_gpr(vcpu, i+2),
85 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050086 }
87}
88
Scott Wood4cd35f62011-06-14 18:34:31 -050089#ifdef CONFIG_SPE
90void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
91{
92 preempt_disable();
93 enable_kernel_spe();
94 kvmppc_save_guest_spe(vcpu);
95 vcpu->arch.shadow_msr &= ~MSR_SPE;
96 preempt_enable();
97}
98
99static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
100{
101 preempt_disable();
102 enable_kernel_spe();
103 kvmppc_load_guest_spe(vcpu);
104 vcpu->arch.shadow_msr |= MSR_SPE;
105 preempt_enable();
106}
107
108static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
109{
110 if (vcpu->arch.shared->msr & MSR_SPE) {
111 if (!(vcpu->arch.shadow_msr & MSR_SPE))
112 kvmppc_vcpu_enable_spe(vcpu);
113 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
114 kvmppc_vcpu_disable_spe(vcpu);
115 }
116}
117#else
118static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
119{
120}
121#endif
122
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500123/*
124 * Helper function for "full" MSR writes. No need to call this if only
125 * EE/CE/ME/DE/RI are changing.
126 */
Scott Wood4cd35f62011-06-14 18:34:31 -0500127void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
128{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500129 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f62011-06-14 18:34:31 -0500130
Scott Woodd30f6e42011-12-20 15:34:43 +0000131#ifdef CONFIG_KVM_BOOKE_HV
132 new_msr |= MSR_GS;
133#endif
134
Scott Wood4cd35f62011-06-14 18:34:31 -0500135 vcpu->arch.shared->msr = new_msr;
136
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500137 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f62011-06-14 18:34:31 -0500138 kvmppc_vcpu_sync_spe(vcpu);
139}
140
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600141static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
142 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600143{
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600144 set_bit(priority, &vcpu->arch.pending_exceptions);
145}
146
Liu Yudaf5e272010-02-02 19:44:35 +0800147static void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
148 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600149{
Liu Yudaf5e272010-02-02 19:44:35 +0800150 vcpu->arch.queued_dear = dear_flags;
151 vcpu->arch.queued_esr = esr_flags;
152 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
153}
154
155static void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
156 ulong dear_flags, ulong esr_flags)
157{
158 vcpu->arch.queued_dear = dear_flags;
159 vcpu->arch.queued_esr = esr_flags;
160 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
161}
162
163static void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu,
164 ulong esr_flags)
165{
166 vcpu->arch.queued_esr = esr_flags;
167 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
168}
169
170void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
171{
172 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600173 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600174}
175
176void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
177{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600178 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600179}
180
181int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
182{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600183 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600184}
185
Alexander Graf7706664d2009-12-21 20:21:24 +0100186void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
187{
188 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
189}
190
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600191void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
192 struct kvm_interrupt *irq)
193{
Alexander Grafc5335f12010-08-30 14:03:24 +0200194 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
195
196 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
197 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
198
199 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600200}
201
Alexander Graf4496f972010-04-07 10:03:25 +0200202void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu,
203 struct kvm_interrupt *irq)
204{
205 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200206 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200207}
208
Scott Woodd30f6e42011-12-20 15:34:43 +0000209static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
210{
211#ifdef CONFIG_KVM_BOOKE_HV
212 mtspr(SPRN_GSRR0, srr0);
213 mtspr(SPRN_GSRR1, srr1);
214#else
215 vcpu->arch.shared->srr0 = srr0;
216 vcpu->arch.shared->srr1 = srr1;
217#endif
218}
219
220static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
221{
222 vcpu->arch.csrr0 = srr0;
223 vcpu->arch.csrr1 = srr1;
224}
225
226static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
227{
228 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
229 vcpu->arch.dsrr0 = srr0;
230 vcpu->arch.dsrr1 = srr1;
231 } else {
232 set_guest_csrr(vcpu, srr0, srr1);
233 }
234}
235
236static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
237{
238 vcpu->arch.mcsrr0 = srr0;
239 vcpu->arch.mcsrr1 = srr1;
240}
241
242static unsigned long get_guest_dear(struct kvm_vcpu *vcpu)
243{
244#ifdef CONFIG_KVM_BOOKE_HV
245 return mfspr(SPRN_GDEAR);
246#else
247 return vcpu->arch.shared->dar;
248#endif
249}
250
251static void set_guest_dear(struct kvm_vcpu *vcpu, unsigned long dear)
252{
253#ifdef CONFIG_KVM_BOOKE_HV
254 mtspr(SPRN_GDEAR, dear);
255#else
256 vcpu->arch.shared->dar = dear;
257#endif
258}
259
260static unsigned long get_guest_esr(struct kvm_vcpu *vcpu)
261{
262#ifdef CONFIG_KVM_BOOKE_HV
263 return mfspr(SPRN_GESR);
264#else
265 return vcpu->arch.shared->esr;
266#endif
267}
268
269static void set_guest_esr(struct kvm_vcpu *vcpu, u32 esr)
270{
271#ifdef CONFIG_KVM_BOOKE_HV
272 mtspr(SPRN_GESR, esr);
273#else
274 vcpu->arch.shared->esr = esr;
275#endif
276}
277
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600278/* Deliver the interrupt of the corresponding priority, if possible. */
279static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
280 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500281{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600282 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000283 ulong msr_mask = 0;
Liu Yudaf5e272010-02-02 19:44:35 +0800284 bool update_esr = false, update_dear = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200285 ulong crit_raw = vcpu->arch.shared->critical;
286 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
287 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200288 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000289 enum int_class int_class;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200290
291 /* Truncate crit indicators in 32 bit mode */
292 if (!(vcpu->arch.shared->msr & MSR_SF)) {
293 crit_raw &= 0xffffffff;
294 crit_r1 &= 0xffffffff;
295 }
296
297 /* Critical section when crit == r1 */
298 crit = (crit_raw == crit_r1);
299 /* ... and we're in supervisor mode */
300 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500301
Alexander Grafc5335f12010-08-30 14:03:24 +0200302 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
303 priority = BOOKE_IRQPRIO_EXTERNAL;
304 keep_irq = true;
305 }
306
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600307 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600308 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800309 case BOOKE_IRQPRIO_DATA_STORAGE:
310 update_dear = true;
311 /* fall through */
312 case BOOKE_IRQPRIO_INST_STORAGE:
313 case BOOKE_IRQPRIO_PROGRAM:
314 update_esr = true;
315 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600316 case BOOKE_IRQPRIO_ITLB_MISS:
317 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600318 case BOOKE_IRQPRIO_FP_UNAVAIL:
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600319 case BOOKE_IRQPRIO_SPE_UNAVAIL:
320 case BOOKE_IRQPRIO_SPE_FP_DATA:
321 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600322 case BOOKE_IRQPRIO_AP_UNAVAIL:
323 case BOOKE_IRQPRIO_ALIGNMENT:
324 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000325 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000326 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500327 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600328 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000329 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200330 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000331 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000332 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000333 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500334 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600335 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200336 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000337 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000338 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500339 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600340 case BOOKE_IRQPRIO_DECREMENTER:
341 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000342 keep_irq = true;
343 /* fall through */
344 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000345 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200346 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200347 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000348 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000349 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500350 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600351 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200352 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000353 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000354 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000355 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500356 break;
357 }
358
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600359 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000360 switch (int_class) {
361 case INT_CLASS_NONCRIT:
362 set_guest_srr(vcpu, vcpu->arch.pc,
363 vcpu->arch.shared->msr);
364 break;
365 case INT_CLASS_CRIT:
366 set_guest_csrr(vcpu, vcpu->arch.pc,
367 vcpu->arch.shared->msr);
368 break;
369 case INT_CLASS_DBG:
370 set_guest_dsrr(vcpu, vcpu->arch.pc,
371 vcpu->arch.shared->msr);
372 break;
373 case INT_CLASS_MC:
374 set_guest_mcsrr(vcpu, vcpu->arch.pc,
375 vcpu->arch.shared->msr);
376 break;
377 }
378
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600379 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800380 if (update_esr == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000381 set_guest_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800382 if (update_dear == true)
Scott Woodd30f6e42011-12-20 15:34:43 +0000383 set_guest_dear(vcpu, vcpu->arch.queued_dear);
Alexander Graf666e7252010-07-29 14:47:43 +0200384 kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600385
Alexander Grafc5335f12010-08-30 14:03:24 +0200386 if (!keep_irq)
387 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600388 }
389
Scott Woodd30f6e42011-12-20 15:34:43 +0000390#ifdef CONFIG_KVM_BOOKE_HV
391 /*
392 * If an interrupt is pending but masked, raise a guest doorbell
393 * so that we are notified when the guest enables the relevant
394 * MSR bit.
395 */
396 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
397 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
398 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
399 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
400 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
401 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
402#endif
403
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600404 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500405}
406
Scott Wooddfd4d472011-11-17 12:39:59 +0000407static void update_timer_ints(struct kvm_vcpu *vcpu)
408{
409 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
410 kvmppc_core_queue_dec(vcpu);
411 else
412 kvmppc_core_dequeue_dec(vcpu);
413}
414
Scott Woodc59a6a32011-11-08 18:23:25 -0600415static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500416{
417 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500418 unsigned int priority;
419
Scott Wooddfd4d472011-11-17 12:39:59 +0000420 if (vcpu->requests) {
421 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu)) {
422 smp_mb();
423 update_timer_ints(vcpu);
424 }
425 }
426
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600427 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000428 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600429 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500430 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500431
432 priority = find_next_bit(pending,
433 BITS_PER_BYTE * sizeof(*pending),
434 priority + 1);
435 }
Alexander Graf90bba352010-07-29 14:47:51 +0200436
437 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600438 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500439}
440
Scott Woodc59a6a32011-11-08 18:23:25 -0600441/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000442int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600443{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000444 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600445 WARN_ON_ONCE(!irqs_disabled());
446
447 kvmppc_core_check_exceptions(vcpu);
448
449 if (vcpu->arch.shared->msr & MSR_WE) {
450 local_irq_enable();
451 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100452 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Woodc59a6a32011-11-08 18:23:25 -0600453 local_irq_disable();
454
455 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000456 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600457 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000458
459 return r;
460}
461
462/*
463 * Common checks before entering the guest world. Call with interrupts
464 * disabled.
465 *
466 * returns !0 if a signal is pending and check_signal is true
467 */
Alexander Graf03660ba2012-02-28 12:00:41 +0100468static int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu)
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000469{
470 int r = 0;
471
472 WARN_ON_ONCE(!irqs_disabled());
473 while (true) {
474 if (need_resched()) {
475 local_irq_enable();
476 cond_resched();
477 local_irq_disable();
478 continue;
479 }
480
Alexander Graf03660ba2012-02-28 12:00:41 +0100481 if (signal_pending(current)) {
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000482 r = 1;
483 break;
484 }
485
486 if (kvmppc_core_prepare_to_enter(vcpu)) {
487 /* interrupts got enabled in between, so we
488 are back at square 1 */
489 continue;
490 }
491
492 break;
493 }
494
495 return r;
Scott Woodc59a6a32011-11-08 18:23:25 -0600496}
497
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000498int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
499{
500 int ret;
Scott Wood8fae8452011-12-20 15:34:45 +0000501#ifdef CONFIG_PPC_FPU
502 unsigned int fpscr;
503 int fpexc_mode;
504 u64 fpr[32];
505#endif
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000506
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200507 if (!vcpu->arch.sane) {
508 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
509 return -EINVAL;
510 }
511
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000512 local_irq_disable();
Alexander Graf03660ba2012-02-28 12:00:41 +0100513 if (kvmppc_prepare_to_enter(vcpu)) {
Scott Wood1d1ef222011-11-08 16:11:59 -0600514 kvm_run->exit_reason = KVM_EXIT_INTR;
515 ret = -EINTR;
516 goto out;
517 }
518
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000519 kvm_guest_enter();
Scott Wood8fae8452011-12-20 15:34:45 +0000520
521#ifdef CONFIG_PPC_FPU
522 /* Save userspace FPU state in stack */
523 enable_kernel_fp();
524 memcpy(fpr, current->thread.fpr, sizeof(current->thread.fpr));
525 fpscr = current->thread.fpscr.val;
526 fpexc_mode = current->thread.fpexc_mode;
527
528 /* Restore guest FPU state to thread */
529 memcpy(current->thread.fpr, vcpu->arch.fpr, sizeof(vcpu->arch.fpr));
530 current->thread.fpscr.val = vcpu->arch.fpscr;
531
532 /*
533 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
534 * as always using the FPU. Kernel usage of FP (via
535 * enable_kernel_fp()) in this thread must not occur while
536 * vcpu->fpu_active is set.
537 */
538 vcpu->fpu_active = 1;
539
540 kvmppc_load_guest_fp(vcpu);
541#endif
542
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000543 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000544
545#ifdef CONFIG_PPC_FPU
546 kvmppc_save_guest_fp(vcpu);
547
548 vcpu->fpu_active = 0;
549
550 /* Save guest FPU state from thread */
551 memcpy(vcpu->arch.fpr, current->thread.fpr, sizeof(vcpu->arch.fpr));
552 vcpu->arch.fpscr = current->thread.fpscr.val;
553
554 /* Restore userspace FPU state from stack */
555 memcpy(current->thread.fpr, fpr, sizeof(current->thread.fpr));
556 current->thread.fpscr.val = fpscr;
557 current->thread.fpexc_mode = fpexc_mode;
558#endif
559
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000560 kvm_guest_exit();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000561
Scott Wood1d1ef222011-11-08 16:11:59 -0600562out:
563 local_irq_enable();
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000564 return ret;
565}
566
Scott Woodd30f6e42011-12-20 15:34:43 +0000567static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
568{
569 enum emulation_result er;
570
571 er = kvmppc_emulate_instruction(run, vcpu);
572 switch (er) {
573 case EMULATE_DONE:
574 /* don't overwrite subtypes, just account kvm_stats */
575 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
576 /* Future optimization: only reload non-volatiles if
577 * they were actually modified by emulation. */
578 return RESUME_GUEST_NV;
579
580 case EMULATE_DO_DCR:
581 run->exit_reason = KVM_EXIT_DCR;
582 return RESUME_HOST;
583
584 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000585 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
586 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
587 /* For debugging, encode the failing instruction and
588 * report it to userspace. */
589 run->hw.hardware_exit_reason = ~0ULL << 32;
590 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000591 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000592 return RESUME_HOST;
593
594 default:
595 BUG();
596 }
597}
598
Alexander Graf4e642cc2012-02-20 23:57:26 +0100599static void kvmppc_fill_pt_regs(struct pt_regs *regs)
600{
601 ulong r1, ip, msr, lr;
602
603 asm("mr %0, 1" : "=r"(r1));
604 asm("mflr %0" : "=r"(lr));
605 asm("mfmsr %0" : "=r"(msr));
606 asm("bl 1f; 1: mflr %0" : "=r"(ip));
607
608 memset(regs, 0, sizeof(*regs));
609 regs->gpr[1] = r1;
610 regs->nip = ip;
611 regs->msr = msr;
612 regs->link = lr;
613}
614
615static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
616 unsigned int exit_nr)
617{
618 struct pt_regs regs;
619
620 switch (exit_nr) {
621 case BOOKE_INTERRUPT_EXTERNAL:
622 kvmppc_fill_pt_regs(&regs);
623 do_IRQ(&regs);
624 break;
625 case BOOKE_INTERRUPT_DECREMENTER:
626 kvmppc_fill_pt_regs(&regs);
627 timer_interrupt(&regs);
628 break;
629#if defined(CONFIG_PPC_FSL_BOOK3E) || defined(CONFIG_PPC_BOOK3E_64)
630 case BOOKE_INTERRUPT_DOORBELL:
631 kvmppc_fill_pt_regs(&regs);
632 doorbell_exception(&regs);
633 break;
634#endif
635 case BOOKE_INTERRUPT_MACHINE_CHECK:
636 /* FIXME */
637 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100638 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
639 kvmppc_fill_pt_regs(&regs);
640 performance_monitor_exception(&regs);
641 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100642 }
643}
644
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500645/**
646 * kvmppc_handle_exit
647 *
648 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
649 */
650int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
651 unsigned int exit_nr)
652{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500653 int r = RESUME_HOST;
654
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600655 /* update before a new last_exit_type is rewritten */
656 kvmppc_update_timing_stats(vcpu);
657
Alexander Graf4e642cc2012-02-20 23:57:26 +0100658 /* restart interrupts if they were meant for the host */
659 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000660
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500661 local_irq_enable();
662
663 run->exit_reason = KVM_EXIT_UNKNOWN;
664 run->ready_for_interrupt_injection = 1;
665
666 switch (exit_nr) {
667 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +0100668 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
669 kvmppc_dump_vcpu(vcpu);
670 /* For debugging, send invalid exit reason to user space */
671 run->hw.hardware_exit_reason = ~1ULL << 32;
672 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
673 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500674 break;
675
676 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600677 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -0600678 r = RESUME_GUEST;
679 break;
680
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500681 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -0600682 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500683 r = RESUME_GUEST;
684 break;
685
Scott Woodd30f6e42011-12-20 15:34:43 +0000686 case BOOKE_INTERRUPT_DOORBELL:
687 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +0000688 r = RESUME_GUEST;
689 break;
690
691 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
692 kvmppc_account_exit(vcpu, GDBELL_EXITS);
693
694 /*
695 * We are here because there is a pending guest interrupt
696 * which could not be delivered as MSR_CE or MSR_ME was not
697 * set. Once we break from here we will retry delivery.
698 */
699 r = RESUME_GUEST;
700 break;
701
702 case BOOKE_INTERRUPT_GUEST_DBELL:
703 kvmppc_account_exit(vcpu, GDBELL_EXITS);
704
705 /*
706 * We are here because there is a pending guest interrupt
707 * which could not be delivered as MSR_EE was not set. Once
708 * we break from here we will retry delivery.
709 */
710 r = RESUME_GUEST;
711 break;
712
Alexander Graf95f2e922012-02-20 22:45:12 +0100713 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
714 r = RESUME_GUEST;
715 break;
716
Scott Woodd30f6e42011-12-20 15:34:43 +0000717 case BOOKE_INTERRUPT_HV_PRIV:
718 r = emulation_exit(run, vcpu);
719 break;
720
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500721 case BOOKE_INTERRUPT_PROGRAM:
Scott Woodd30f6e42011-12-20 15:34:43 +0000722 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf0268597c2012-02-20 12:33:22 +0100723 /*
724 * Program traps generated by user-level software must
725 * be handled by the guest kernel.
726 *
727 * In GS mode, hypervisor privileged instructions trap
728 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
729 * actual program interrupts, handled by the guest.
730 */
Liu Yudaf5e272010-02-02 19:44:35 +0800731 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500732 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600733 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500734 break;
735 }
736
Scott Woodd30f6e42011-12-20 15:34:43 +0000737 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500738 break;
739
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200740 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600741 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600742 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +0200743 r = RESUME_GUEST;
744 break;
745
Scott Wood4cd35f62011-06-14 18:34:31 -0500746#ifdef CONFIG_SPE
747 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
748 if (vcpu->arch.shared->msr & MSR_SPE)
749 kvmppc_vcpu_enable_spe(vcpu);
750 else
751 kvmppc_booke_queue_irqprio(vcpu,
752 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600753 r = RESUME_GUEST;
754 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500755 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600756
757 case BOOKE_INTERRUPT_SPE_FP_DATA:
758 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
759 r = RESUME_GUEST;
760 break;
761
762 case BOOKE_INTERRUPT_SPE_FP_ROUND:
763 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
764 r = RESUME_GUEST;
765 break;
Scott Wood4cd35f62011-06-14 18:34:31 -0500766#else
767 case BOOKE_INTERRUPT_SPE_UNAVAIL:
768 /*
769 * Guest wants SPE, but host kernel doesn't support it. Send
770 * an "unimplemented operation" program check to the guest.
771 */
772 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
773 r = RESUME_GUEST;
774 break;
775
776 /*
777 * These really should never happen without CONFIG_SPE,
778 * as we should never enable the real MSR[SPE] in the guest.
779 */
780 case BOOKE_INTERRUPT_SPE_FP_DATA:
781 case BOOKE_INTERRUPT_SPE_FP_ROUND:
782 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
783 __func__, exit_nr, vcpu->arch.pc);
784 run->hw.hardware_exit_reason = exit_nr;
785 r = RESUME_HOST;
786 break;
787#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600788
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500789 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800790 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
791 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600792 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500793 r = RESUME_GUEST;
794 break;
795
796 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +0800797 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600798 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500799 r = RESUME_GUEST;
800 break;
801
Scott Woodd30f6e42011-12-20 15:34:43 +0000802#ifdef CONFIG_KVM_BOOKE_HV
803 case BOOKE_INTERRUPT_HV_SYSCALL:
804 if (!(vcpu->arch.shared->msr & MSR_PR)) {
805 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
806 } else {
807 /*
808 * hcall from guest userspace -- send privileged
809 * instruction program check.
810 */
811 kvmppc_core_queue_program(vcpu, ESR_PPR);
812 }
813
814 r = RESUME_GUEST;
815 break;
816#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500817 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +0200818 if (!(vcpu->arch.shared->msr & MSR_PR) &&
819 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
820 /* KVM PV hypercalls */
821 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
822 r = RESUME_GUEST;
823 } else {
824 /* Guest syscalls */
825 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
826 }
Hollis Blanchard7b701592008-12-02 15:51:58 -0600827 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500828 r = RESUME_GUEST;
829 break;
Scott Woodd30f6e42011-12-20 15:34:43 +0000830#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500831
832 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500833 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600834 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -0600835 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500836 gfn_t gfn;
837
Alexander Grafbf7ca4b2012-02-15 23:40:00 +0000838#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -0500839 if (!(vcpu->arch.shared->msr & MSR_PR) &&
840 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
841 kvmppc_map_magic(vcpu);
842 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
843 r = RESUME_GUEST;
844
845 break;
846 }
847#endif
848
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500849 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600850 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600851 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500852 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +0800853 kvmppc_core_queue_dtlb_miss(vcpu,
854 vcpu->arch.fault_dear,
855 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600856 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600857 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500858 r = RESUME_GUEST;
859 break;
860 }
861
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600862 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -0600863 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500864
865 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
866 /* The guest TLB had a mapping, but the shadow TLB
867 * didn't, and it is RAM. This could be because:
868 * a) the entry is mapping the host kernel, or
869 * b) the guest used a large mapping which we're faking
870 * Either way, we need to satisfy the fault without
871 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -0600872 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600873 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500874 r = RESUME_GUEST;
875 } else {
876 /* Guest has mapped and accessed a page which is not
877 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -0600878 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +0100879 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500880 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600881 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500882 }
883
884 break;
885 }
886
887 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500888 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -0600889 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500890 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600891 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500892
893 r = RESUME_GUEST;
894
895 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600896 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600897 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500898 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600899 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600900 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -0600901 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500902 break;
903 }
904
Hollis Blanchard7b701592008-12-02 15:51:58 -0600905 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500906
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600907 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -0600908 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500909
910 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
911 /* The guest TLB had a mapping, but the shadow TLB
912 * didn't. This could be because:
913 * a) the entry is mapping the host kernel, or
914 * b) the guest used a large mapping which we're faking
915 * Either way, we need to satisfy the fault without
916 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -0600917 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500918 } else {
919 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600920 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500921 }
922
923 break;
924 }
925
Hollis Blanchard6a0ab732008-07-25 13:54:49 -0500926 case BOOKE_INTERRUPT_DEBUG: {
927 u32 dbsr;
928
929 vcpu->arch.pc = mfspr(SPRN_CSRR0);
930
931 /* clear IAC events in DBSR register */
932 dbsr = mfspr(SPRN_DBSR);
933 dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
934 mtspr(SPRN_DBSR, dbsr);
935
936 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -0600937 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -0500938 r = RESUME_HOST;
939 break;
940 }
941
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500942 default:
943 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
944 BUG();
945 }
946
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000947 /*
948 * To avoid clobbering exit_reason, only check for signals if we
949 * aren't already exiting to userspace for some other reason.
950 */
Alexander Graf03660ba2012-02-28 12:00:41 +0100951 if (!(r & RESUME_HOST)) {
952 local_irq_disable();
953 if (kvmppc_prepare_to_enter(vcpu)) {
954 run->exit_reason = KVM_EXIT_INTR;
955 r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
956 kvmppc_account_exit(vcpu, SIGNAL_EXITS);
957 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500958 }
959
960 return r;
961}
962
963/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
964int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
965{
Hollis Blanchard082decf2010-08-07 10:33:56 -0700966 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200967 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -0700968
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500969 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -0600970 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +0100971 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +0000972 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500973
Scott Woodd30f6e42011-12-20 15:34:43 +0000974#ifndef CONFIG_KVM_BOOKE_HV
975 vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -0500976 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +0000977 vcpu->arch.shared->msr = 0;
978#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -0500979
Hollis Blanchard082decf2010-08-07 10:33:56 -0700980 /* Eye-catching numbers so we know if the guest takes an interrupt
981 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500982 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -0700983 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
984 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500985
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600986 kvmppc_init_timing_stats(vcpu);
987
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200988 r = kvmppc_core_vcpu_setup(vcpu);
989 kvmppc_sanity_check(vcpu);
990 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500991}
992
993int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
994{
995 int i;
996
997 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +0100998 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500999 regs->ctr = vcpu->arch.ctr;
1000 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001001 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001002 regs->msr = vcpu->arch.shared->msr;
Alexander Grafde7906c2010-07-29 14:47:46 +02001003 regs->srr0 = vcpu->arch.shared->srr0;
1004 regs->srr1 = vcpu->arch.shared->srr1;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001005 regs->pid = vcpu->arch.pid;
Alexander Grafa73a9592010-07-29 14:47:47 +02001006 regs->sprg0 = vcpu->arch.shared->sprg0;
1007 regs->sprg1 = vcpu->arch.shared->sprg1;
1008 regs->sprg2 = vcpu->arch.shared->sprg2;
1009 regs->sprg3 = vcpu->arch.shared->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001010 regs->sprg4 = vcpu->arch.shared->sprg4;
1011 regs->sprg5 = vcpu->arch.shared->sprg5;
1012 regs->sprg6 = vcpu->arch.shared->sprg6;
1013 regs->sprg7 = vcpu->arch.shared->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001014
1015 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001016 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001017
1018 return 0;
1019}
1020
1021int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1022{
1023 int i;
1024
1025 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001026 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001027 vcpu->arch.ctr = regs->ctr;
1028 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001029 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001030 kvmppc_set_msr(vcpu, regs->msr);
Alexander Grafde7906c2010-07-29 14:47:46 +02001031 vcpu->arch.shared->srr0 = regs->srr0;
1032 vcpu->arch.shared->srr1 = regs->srr1;
Scott Wood5ce941e2011-04-27 17:24:21 -05001033 kvmppc_set_pid(vcpu, regs->pid);
Alexander Grafa73a9592010-07-29 14:47:47 +02001034 vcpu->arch.shared->sprg0 = regs->sprg0;
1035 vcpu->arch.shared->sprg1 = regs->sprg1;
1036 vcpu->arch.shared->sprg2 = regs->sprg2;
1037 vcpu->arch.shared->sprg3 = regs->sprg3;
Scott Woodb5904972011-11-08 18:23:30 -06001038 vcpu->arch.shared->sprg4 = regs->sprg4;
1039 vcpu->arch.shared->sprg5 = regs->sprg5;
1040 vcpu->arch.shared->sprg6 = regs->sprg6;
1041 vcpu->arch.shared->sprg7 = regs->sprg7;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001042
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001043 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1044 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001045
1046 return 0;
1047}
1048
Scott Wood5ce941e2011-04-27 17:24:21 -05001049static void get_sregs_base(struct kvm_vcpu *vcpu,
1050 struct kvm_sregs *sregs)
1051{
1052 u64 tb = get_tb();
1053
1054 sregs->u.e.features |= KVM_SREGS_E_BASE;
1055
1056 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1057 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1058 sregs->u.e.mcsr = vcpu->arch.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001059 sregs->u.e.esr = get_guest_esr(vcpu);
1060 sregs->u.e.dear = get_guest_dear(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001061 sregs->u.e.tsr = vcpu->arch.tsr;
1062 sregs->u.e.tcr = vcpu->arch.tcr;
1063 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1064 sregs->u.e.tb = tb;
1065 sregs->u.e.vrsave = vcpu->arch.vrsave;
1066}
1067
1068static int set_sregs_base(struct kvm_vcpu *vcpu,
1069 struct kvm_sregs *sregs)
1070{
1071 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1072 return 0;
1073
1074 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1075 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1076 vcpu->arch.mcsr = sregs->u.e.mcsr;
Scott Woodd30f6e42011-12-20 15:34:43 +00001077 set_guest_esr(vcpu, sregs->u.e.esr);
1078 set_guest_dear(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001079 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001080 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001081
Scott Wooddfd4d472011-11-17 12:39:59 +00001082 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001083 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001084 kvmppc_emulate_dec(vcpu);
1085 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001086
1087 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR) {
Scott Wooddfd4d472011-11-17 12:39:59 +00001088 vcpu->arch.tsr = sregs->u.e.tsr;
1089 update_timer_ints(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001090 }
1091
1092 return 0;
1093}
1094
1095static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1096 struct kvm_sregs *sregs)
1097{
1098 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1099
Scott Wood841741f2011-09-02 17:39:37 -05001100 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001101 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1102 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1103 sregs->u.e.decar = vcpu->arch.decar;
1104 sregs->u.e.ivpr = vcpu->arch.ivpr;
1105}
1106
1107static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1108 struct kvm_sregs *sregs)
1109{
1110 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1111 return 0;
1112
Scott Wood841741f2011-09-02 17:39:37 -05001113 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001114 return -EINVAL;
1115
1116 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1117 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1118 vcpu->arch.decar = sregs->u.e.decar;
1119 vcpu->arch.ivpr = sregs->u.e.ivpr;
1120
1121 return 0;
1122}
1123
1124void kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1125{
1126 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1127
1128 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1129 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1130 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1131 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1132 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1133 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1134 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1135 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1136 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1137 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1138 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1139 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1140 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1141 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1142 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1143 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
1144}
1145
1146int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1147{
1148 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1149 return 0;
1150
1151 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1152 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1153 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1154 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1155 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1156 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1157 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1158 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1159 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1160 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1161 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1162 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1163 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1164 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1165 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1166 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1167
1168 return 0;
1169}
1170
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001171int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1172 struct kvm_sregs *sregs)
1173{
Scott Wood5ce941e2011-04-27 17:24:21 -05001174 sregs->pvr = vcpu->arch.pvr;
1175
1176 get_sregs_base(vcpu, sregs);
1177 get_sregs_arch206(vcpu, sregs);
1178 kvmppc_core_get_sregs(vcpu, sregs);
1179 return 0;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001180}
1181
1182int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1183 struct kvm_sregs *sregs)
1184{
Scott Wood5ce941e2011-04-27 17:24:21 -05001185 int ret;
1186
1187 if (vcpu->arch.pvr != sregs->pvr)
1188 return -EINVAL;
1189
1190 ret = set_sregs_base(vcpu, sregs);
1191 if (ret < 0)
1192 return ret;
1193
1194 ret = set_sregs_arch206(vcpu, sregs);
1195 if (ret < 0)
1196 return ret;
1197
1198 return kvmppc_core_set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001199}
1200
Paul Mackerras31f34382011-12-12 12:26:50 +00001201int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1202{
1203 return -EINVAL;
1204}
1205
1206int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg)
1207{
1208 return -EINVAL;
1209}
1210
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001211int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1212{
1213 return -ENOTSUPP;
1214}
1215
1216int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1217{
1218 return -ENOTSUPP;
1219}
1220
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001221int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1222 struct kvm_translation *tr)
1223{
Avi Kivity98001d82010-05-13 11:05:49 +03001224 int r;
1225
Avi Kivity98001d82010-05-13 11:05:49 +03001226 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001227 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001228}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001229
Alexander Graf4e755752009-10-30 05:47:01 +00001230int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1231{
1232 return -ENOTSUPP;
1233}
1234
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001235int kvmppc_core_prepare_memory_region(struct kvm *kvm,
1236 struct kvm_userspace_memory_region *mem)
1237{
1238 return 0;
1239}
1240
1241void kvmppc_core_commit_memory_region(struct kvm *kvm,
1242 struct kvm_userspace_memory_region *mem)
1243{
1244}
1245
Scott Wooddfd4d472011-11-17 12:39:59 +00001246void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1247{
1248 vcpu->arch.tcr = new_tcr;
1249 update_timer_ints(vcpu);
1250}
1251
1252void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1253{
1254 set_bits(tsr_bits, &vcpu->arch.tsr);
1255 smp_wmb();
1256 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1257 kvm_vcpu_kick(vcpu);
1258}
1259
1260void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1261{
1262 clear_bits(tsr_bits, &vcpu->arch.tsr);
1263 update_timer_ints(vcpu);
1264}
1265
1266void kvmppc_decrementer_func(unsigned long data)
1267{
1268 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
1269
1270 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1271}
1272
Scott Wood94fa9d92011-12-20 15:34:22 +00001273void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1274{
Scott Woodd30f6e42011-12-20 15:34:43 +00001275 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00001276}
1277
1278void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
1279{
Scott Woodd30f6e42011-12-20 15:34:43 +00001280 current->thread.kvm_vcpu = NULL;
Scott Wood94fa9d92011-12-20 15:34:22 +00001281}
1282
Stephen Rothwell2986b8c2009-06-02 11:46:14 +10001283int __init kvmppc_booke_init(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001284{
Scott Woodd30f6e42011-12-20 15:34:43 +00001285#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001286 unsigned long ivor[16];
1287 unsigned long max_ivor = 0;
1288 int i;
1289
1290 /* We install our own exception handlers by hijacking IVPR. IVPR must
1291 * be 16-bit aligned, so we need a 64KB allocation. */
1292 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
1293 VCPU_SIZE_ORDER);
1294 if (!kvmppc_booke_handlers)
1295 return -ENOMEM;
1296
1297 /* XXX make sure our handlers are smaller than Linux's */
1298
1299 /* Copy our interrupt handlers to match host IVORs. That way we don't
1300 * have to swap the IVORs on every guest/host transition. */
1301 ivor[0] = mfspr(SPRN_IVOR0);
1302 ivor[1] = mfspr(SPRN_IVOR1);
1303 ivor[2] = mfspr(SPRN_IVOR2);
1304 ivor[3] = mfspr(SPRN_IVOR3);
1305 ivor[4] = mfspr(SPRN_IVOR4);
1306 ivor[5] = mfspr(SPRN_IVOR5);
1307 ivor[6] = mfspr(SPRN_IVOR6);
1308 ivor[7] = mfspr(SPRN_IVOR7);
1309 ivor[8] = mfspr(SPRN_IVOR8);
1310 ivor[9] = mfspr(SPRN_IVOR9);
1311 ivor[10] = mfspr(SPRN_IVOR10);
1312 ivor[11] = mfspr(SPRN_IVOR11);
1313 ivor[12] = mfspr(SPRN_IVOR12);
1314 ivor[13] = mfspr(SPRN_IVOR13);
1315 ivor[14] = mfspr(SPRN_IVOR14);
1316 ivor[15] = mfspr(SPRN_IVOR15);
1317
1318 for (i = 0; i < 16; i++) {
1319 if (ivor[i] > max_ivor)
1320 max_ivor = ivor[i];
1321
1322 memcpy((void *)kvmppc_booke_handlers + ivor[i],
1323 kvmppc_handlers_start + i * kvmppc_handler_len,
1324 kvmppc_handler_len);
1325 }
1326 flush_icache_range(kvmppc_booke_handlers,
1327 kvmppc_booke_handlers + max_ivor + kvmppc_handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00001328#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06001329 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001330}
1331
Hollis Blancharddb93f572008-11-05 09:36:18 -06001332void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001333{
1334 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
1335 kvm_exit();
1336}