blob: ae458f0fd061efea7cdd569c1bdb32970503659c [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2007
Scott Wood4cd35f672011-06-14 18:34:31 -050016 * Copyright 2010-2011 Freescale Semiconductor, Inc.
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050017 *
18 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
19 * Christian Ehrhardt <ehrhardt@linux.vnet.ibm.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 * Scott Wood <scottwood@freescale.com>
21 * Varun Sethi <varun.sethi@freescale.com>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050022 */
23
24#include <linux/errno.h>
25#include <linux/err.h>
26#include <linux/kvm_host.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090027#include <linux/gfp.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050028#include <linux/module.h>
29#include <linux/vmalloc.h>
30#include <linux/fs.h>
Hollis Blanchard7924bd42008-12-02 15:51:55 -060031
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050032#include <asm/cputable.h>
33#include <asm/uaccess.h>
34#include <asm/kvm_ppc.h>
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060035#include <asm/cacheflush.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000036#include <asm/dbell.h>
37#include <asm/hw_irq.h>
38#include <asm/irq.h>
Mihai Caramanb50df192012-10-11 06:13:19 +000039#include <asm/time.h>
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Scott Woodd30f6e42011-12-20 15:34:43 +000041#include "timing.h"
Hollis Blanchard75f74f02008-11-05 09:36:16 -060042#include "booke.h"
Aneesh Kumar K.Vdba291f2013-10-07 22:17:58 +053043
44#define CREATE_TRACE_POINTS
45#include "trace_booke.h"
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050046
Hollis Blanchardd9fbd032008-11-05 09:36:13 -060047unsigned long kvmppc_booke_handlers;
48
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050049#define VM_STAT(x) offsetof(struct kvm, stat.x), KVM_STAT_VM
50#define VCPU_STAT(x) offsetof(struct kvm_vcpu, stat.x), KVM_STAT_VCPU
51
52struct kvm_stats_debugfs_item debugfs_entries[] = {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050053 { "mmio", VCPU_STAT(mmio_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050054 { "sig", VCPU_STAT(signal_exits) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050055 { "itlb_r", VCPU_STAT(itlb_real_miss_exits) },
56 { "itlb_v", VCPU_STAT(itlb_virt_miss_exits) },
57 { "dtlb_r", VCPU_STAT(dtlb_real_miss_exits) },
58 { "dtlb_v", VCPU_STAT(dtlb_virt_miss_exits) },
59 { "sysc", VCPU_STAT(syscall_exits) },
60 { "isi", VCPU_STAT(isi_exits) },
61 { "dsi", VCPU_STAT(dsi_exits) },
62 { "inst_emu", VCPU_STAT(emulated_inst_exits) },
63 { "dec", VCPU_STAT(dec_exits) },
64 { "ext_intr", VCPU_STAT(ext_intr_exits) },
Paolo Bonzinif7819512015-02-04 18:20:58 +010065 { "halt_successful_poll", VCPU_STAT(halt_successful_poll) },
Hollis Blanchard45c5eb62008-04-25 17:55:49 -050066 { "halt_wakeup", VCPU_STAT(halt_wakeup) },
Scott Woodd30f6e42011-12-20 15:34:43 +000067 { "doorbell", VCPU_STAT(dbell_exits) },
68 { "guest doorbell", VCPU_STAT(gdbell_exits) },
Alexander Grafcf1c5ca2012-08-01 12:56:51 +020069 { "remote_tlb_flush", VM_STAT(remote_tlb_flush) },
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050070 { NULL }
71};
72
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050073/* TODO: use vcpu_printf() */
74void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
75{
76 int i;
77
Alexander Graf666e7252010-07-29 14:47:43 +020078 printk("pc: %08lx msr: %08llx\n", vcpu->arch.pc, vcpu->arch.shared->msr);
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060079 printk("lr: %08lx ctr: %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
Alexander Grafde7906c2010-07-29 14:47:46 +020080 printk("srr0: %08llx srr1: %08llx\n", vcpu->arch.shared->srr0,
81 vcpu->arch.shared->srr1);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050082
83 printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);
84
85 for (i = 0; i < 32; i += 4) {
Hollis Blanchard5cf8ca22008-11-05 09:36:19 -060086 printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
Alexander Graf8e5b26b2010-01-08 02:58:01 +010087 kvmppc_get_gpr(vcpu, i),
88 kvmppc_get_gpr(vcpu, i+1),
89 kvmppc_get_gpr(vcpu, i+2),
90 kvmppc_get_gpr(vcpu, i+3));
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050091 }
92}
93
Scott Wood4cd35f672011-06-14 18:34:31 -050094#ifdef CONFIG_SPE
95void kvmppc_vcpu_disable_spe(struct kvm_vcpu *vcpu)
96{
97 preempt_disable();
98 enable_kernel_spe();
99 kvmppc_save_guest_spe(vcpu);
100 vcpu->arch.shadow_msr &= ~MSR_SPE;
101 preempt_enable();
102}
103
104static void kvmppc_vcpu_enable_spe(struct kvm_vcpu *vcpu)
105{
106 preempt_disable();
107 enable_kernel_spe();
108 kvmppc_load_guest_spe(vcpu);
109 vcpu->arch.shadow_msr |= MSR_SPE;
110 preempt_enable();
111}
112
113static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
114{
115 if (vcpu->arch.shared->msr & MSR_SPE) {
116 if (!(vcpu->arch.shadow_msr & MSR_SPE))
117 kvmppc_vcpu_enable_spe(vcpu);
118 } else if (vcpu->arch.shadow_msr & MSR_SPE) {
119 kvmppc_vcpu_disable_spe(vcpu);
120 }
121}
122#else
123static void kvmppc_vcpu_sync_spe(struct kvm_vcpu *vcpu)
124{
125}
126#endif
127
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300128/*
129 * Load up guest vcpu FP state if it's needed.
130 * It also set the MSR_FP in thread so that host know
131 * we're holding FPU, and then host can help to save
132 * guest vcpu FP state if other threads require to use FPU.
133 * This simulates an FP unavailable fault.
134 *
135 * It requires to be called with preemption disabled.
136 */
137static inline void kvmppc_load_guest_fp(struct kvm_vcpu *vcpu)
138{
139#ifdef CONFIG_PPC_FPU
140 if (!(current->thread.regs->msr & MSR_FP)) {
141 enable_kernel_fp();
142 load_fp_state(&vcpu->arch.fp);
143 current->thread.fp_save_area = &vcpu->arch.fp;
144 current->thread.regs->msr |= MSR_FP;
145 }
146#endif
147}
148
149/*
150 * Save guest vcpu FP state into thread.
151 * It requires to be called with preemption disabled.
152 */
153static inline void kvmppc_save_guest_fp(struct kvm_vcpu *vcpu)
154{
155#ifdef CONFIG_PPC_FPU
156 if (current->thread.regs->msr & MSR_FP)
157 giveup_fpu(current);
158 current->thread.fp_save_area = NULL;
159#endif
160}
161
Alexander Graf7a08c272012-08-16 13:10:16 +0200162static void kvmppc_vcpu_sync_fpu(struct kvm_vcpu *vcpu)
163{
164#if defined(CONFIG_PPC_FPU) && !defined(CONFIG_KVM_BOOKE_HV)
165 /* We always treat the FP bit as enabled from the host
166 perspective, so only need to adjust the shadow MSR */
167 vcpu->arch.shadow_msr &= ~MSR_FP;
168 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_FP;
169#endif
170}
171
Mihai Caraman95d80a22014-08-20 16:36:23 +0300172/*
173 * Simulate AltiVec unavailable fault to load guest state
174 * from thread to AltiVec unit.
175 * It requires to be called with preemption disabled.
176 */
177static inline void kvmppc_load_guest_altivec(struct kvm_vcpu *vcpu)
178{
179#ifdef CONFIG_ALTIVEC
180 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
181 if (!(current->thread.regs->msr & MSR_VEC)) {
182 enable_kernel_altivec();
183 load_vr_state(&vcpu->arch.vr);
184 current->thread.vr_save_area = &vcpu->arch.vr;
185 current->thread.regs->msr |= MSR_VEC;
186 }
187 }
188#endif
189}
190
191/*
192 * Save guest vcpu AltiVec state into thread.
193 * It requires to be called with preemption disabled.
194 */
195static inline void kvmppc_save_guest_altivec(struct kvm_vcpu *vcpu)
196{
197#ifdef CONFIG_ALTIVEC
198 if (cpu_has_feature(CPU_FTR_ALTIVEC)) {
199 if (current->thread.regs->msr & MSR_VEC)
200 giveup_altivec(current);
201 current->thread.vr_save_area = NULL;
202 }
203#endif
204}
205
Bharat Bhushance11e482013-07-04 12:27:47 +0530206static void kvmppc_vcpu_sync_debug(struct kvm_vcpu *vcpu)
207{
208 /* Synchronize guest's desire to get debug interrupts into shadow MSR */
209#ifndef CONFIG_KVM_BOOKE_HV
210 vcpu->arch.shadow_msr &= ~MSR_DE;
211 vcpu->arch.shadow_msr |= vcpu->arch.shared->msr & MSR_DE;
212#endif
213
214 /* Force enable debug interrupts when user space wants to debug */
215 if (vcpu->guest_debug) {
216#ifdef CONFIG_KVM_BOOKE_HV
217 /*
218 * Since there is no shadow MSR, sync MSR_DE into the guest
219 * visible MSR.
220 */
221 vcpu->arch.shared->msr |= MSR_DE;
222#else
223 vcpu->arch.shadow_msr |= MSR_DE;
224 vcpu->arch.shared->msr &= ~MSR_DE;
225#endif
226 }
227}
228
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500229/*
230 * Helper function for "full" MSR writes. No need to call this if only
231 * EE/CE/ME/DE/RI are changing.
232 */
Scott Wood4cd35f672011-06-14 18:34:31 -0500233void kvmppc_set_msr(struct kvm_vcpu *vcpu, u32 new_msr)
234{
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500235 u32 old_msr = vcpu->arch.shared->msr;
Scott Wood4cd35f672011-06-14 18:34:31 -0500236
Scott Woodd30f6e42011-12-20 15:34:43 +0000237#ifdef CONFIG_KVM_BOOKE_HV
238 new_msr |= MSR_GS;
239#endif
240
Scott Wood4cd35f672011-06-14 18:34:31 -0500241 vcpu->arch.shared->msr = new_msr;
242
Liu Yudd9ebf1f2011-06-14 18:35:14 -0500243 kvmppc_mmu_msr_notify(vcpu, old_msr);
Scott Wood4cd35f672011-06-14 18:34:31 -0500244 kvmppc_vcpu_sync_spe(vcpu);
Alexander Graf7a08c272012-08-16 13:10:16 +0200245 kvmppc_vcpu_sync_fpu(vcpu);
Bharat Bhushance11e482013-07-04 12:27:47 +0530246 kvmppc_vcpu_sync_debug(vcpu);
Scott Wood4cd35f672011-06-14 18:34:31 -0500247}
248
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600249static void kvmppc_booke_queue_irqprio(struct kvm_vcpu *vcpu,
250 unsigned int priority)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600251{
Alexander Graf63460462012-08-08 00:44:52 +0200252 trace_kvm_booke_queue_irqprio(vcpu, priority);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600253 set_bit(priority, &vcpu->arch.pending_exceptions);
254}
255
Alexander Graf8de12012014-06-18 21:56:55 +0200256void kvmppc_core_queue_dtlb_miss(struct kvm_vcpu *vcpu,
257 ulong dear_flags, ulong esr_flags)
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600258{
Liu Yudaf5e272010-02-02 19:44:35 +0800259 vcpu->arch.queued_dear = dear_flags;
260 vcpu->arch.queued_esr = esr_flags;
261 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DTLB_MISS);
262}
263
Alexander Graf8de12012014-06-18 21:56:55 +0200264void kvmppc_core_queue_data_storage(struct kvm_vcpu *vcpu,
265 ulong dear_flags, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800266{
267 vcpu->arch.queued_dear = dear_flags;
268 vcpu->arch.queued_esr = esr_flags;
269 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DATA_STORAGE);
270}
271
Alexander Graf8de12012014-06-18 21:56:55 +0200272void kvmppc_core_queue_itlb_miss(struct kvm_vcpu *vcpu)
273{
274 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
275}
276
277void kvmppc_core_queue_inst_storage(struct kvm_vcpu *vcpu, ulong esr_flags)
Liu Yudaf5e272010-02-02 19:44:35 +0800278{
279 vcpu->arch.queued_esr = esr_flags;
280 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_INST_STORAGE);
281}
282
Alexander Graf011da892013-01-31 14:17:38 +0100283static void kvmppc_core_queue_alignment(struct kvm_vcpu *vcpu, ulong dear_flags,
284 ulong esr_flags)
285{
286 vcpu->arch.queued_dear = dear_flags;
287 vcpu->arch.queued_esr = esr_flags;
288 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALIGNMENT);
289}
290
Liu Yudaf5e272010-02-02 19:44:35 +0800291void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong esr_flags)
292{
293 vcpu->arch.queued_esr = esr_flags;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600294 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_PROGRAM);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600295}
296
297void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu)
298{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600299 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DECREMENTER);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600300}
301
302int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu)
303{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600304 return test_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600305}
306
Alexander Graf7706664d2009-12-21 20:21:24 +0100307void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu)
308{
309 clear_bit(BOOKE_IRQPRIO_DECREMENTER, &vcpu->arch.pending_exceptions);
310}
311
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600312void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
313 struct kvm_interrupt *irq)
314{
Alexander Grafc5335f12010-08-30 14:03:24 +0200315 unsigned int prio = BOOKE_IRQPRIO_EXTERNAL;
316
317 if (irq->irq == KVM_INTERRUPT_SET_LEVEL)
318 prio = BOOKE_IRQPRIO_EXTERNAL_LEVEL;
319
320 kvmppc_booke_queue_irqprio(vcpu, prio);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600321}
322
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000323void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu)
Alexander Graf4496f972010-04-07 10:03:25 +0200324{
325 clear_bit(BOOKE_IRQPRIO_EXTERNAL, &vcpu->arch.pending_exceptions);
Alexander Grafc5335f12010-08-30 14:03:24 +0200326 clear_bit(BOOKE_IRQPRIO_EXTERNAL_LEVEL, &vcpu->arch.pending_exceptions);
Alexander Graf4496f972010-04-07 10:03:25 +0200327}
328
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000329static void kvmppc_core_queue_watchdog(struct kvm_vcpu *vcpu)
330{
331 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_WATCHDOG);
332}
333
334static void kvmppc_core_dequeue_watchdog(struct kvm_vcpu *vcpu)
335{
336 clear_bit(BOOKE_IRQPRIO_WATCHDOG, &vcpu->arch.pending_exceptions);
337}
338
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530339void kvmppc_core_queue_debug(struct kvm_vcpu *vcpu)
340{
341 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_DEBUG);
342}
343
344void kvmppc_core_dequeue_debug(struct kvm_vcpu *vcpu)
345{
346 clear_bit(BOOKE_IRQPRIO_DEBUG, &vcpu->arch.pending_exceptions);
347}
348
Scott Woodd30f6e42011-12-20 15:34:43 +0000349static void set_guest_srr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
350{
Bharat Bhushan31579ee2014-07-17 17:01:36 +0530351 kvmppc_set_srr0(vcpu, srr0);
352 kvmppc_set_srr1(vcpu, srr1);
Scott Woodd30f6e42011-12-20 15:34:43 +0000353}
354
355static void set_guest_csrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
356{
357 vcpu->arch.csrr0 = srr0;
358 vcpu->arch.csrr1 = srr1;
359}
360
361static void set_guest_dsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
362{
363 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC)) {
364 vcpu->arch.dsrr0 = srr0;
365 vcpu->arch.dsrr1 = srr1;
366 } else {
367 set_guest_csrr(vcpu, srr0, srr1);
368 }
369}
370
371static void set_guest_mcsrr(struct kvm_vcpu *vcpu, unsigned long srr0, u32 srr1)
372{
373 vcpu->arch.mcsrr0 = srr0;
374 vcpu->arch.mcsrr1 = srr1;
375}
376
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600377/* Deliver the interrupt of the corresponding priority, if possible. */
378static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
379 unsigned int priority)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500380{
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600381 int allowed = 0;
Alexander Graf79300f82012-02-15 19:12:29 +0000382 ulong msr_mask = 0;
Alexander Graf1c810632013-01-04 18:12:48 +0100383 bool update_esr = false, update_dear = false, update_epr = false;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200384 ulong crit_raw = vcpu->arch.shared->critical;
385 ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
386 bool crit;
Alexander Grafc5335f12010-08-30 14:03:24 +0200387 bool keep_irq = false;
Scott Woodd30f6e42011-12-20 15:34:43 +0000388 enum int_class int_class;
Mihai Caraman95e90b42012-10-11 06:13:26 +0000389 ulong new_msr = vcpu->arch.shared->msr;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200390
391 /* Truncate crit indicators in 32 bit mode */
392 if (!(vcpu->arch.shared->msr & MSR_SF)) {
393 crit_raw &= 0xffffffff;
394 crit_r1 &= 0xffffffff;
395 }
396
397 /* Critical section when crit == r1 */
398 crit = (crit_raw == crit_r1);
399 /* ... and we're in supervisor mode */
400 crit = crit && !(vcpu->arch.shared->msr & MSR_PR);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500401
Alexander Grafc5335f12010-08-30 14:03:24 +0200402 if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
403 priority = BOOKE_IRQPRIO_EXTERNAL;
404 keep_irq = true;
405 }
406
Scott Wood5df554a2013-04-12 14:08:46 +0000407 if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
Alexander Graf1c810632013-01-04 18:12:48 +0100408 update_epr = true;
409
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600410 switch (priority) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600411 case BOOKE_IRQPRIO_DTLB_MISS:
Liu Yudaf5e272010-02-02 19:44:35 +0800412 case BOOKE_IRQPRIO_DATA_STORAGE:
Alexander Graf011da892013-01-31 14:17:38 +0100413 case BOOKE_IRQPRIO_ALIGNMENT:
Liu Yudaf5e272010-02-02 19:44:35 +0800414 update_dear = true;
415 /* fall through */
416 case BOOKE_IRQPRIO_INST_STORAGE:
417 case BOOKE_IRQPRIO_PROGRAM:
418 update_esr = true;
419 /* fall through */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600420 case BOOKE_IRQPRIO_ITLB_MISS:
421 case BOOKE_IRQPRIO_SYSCALL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600422 case BOOKE_IRQPRIO_FP_UNAVAIL:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300423#ifdef CONFIG_SPE_POSSIBLE
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -0600424 case BOOKE_IRQPRIO_SPE_UNAVAIL:
425 case BOOKE_IRQPRIO_SPE_FP_DATA:
426 case BOOKE_IRQPRIO_SPE_FP_ROUND:
Mihai Caraman95d80a22014-08-20 16:36:23 +0300427#endif
428#ifdef CONFIG_ALTIVEC
429 case BOOKE_IRQPRIO_ALTIVEC_UNAVAIL:
430 case BOOKE_IRQPRIO_ALTIVEC_ASSIST:
431#endif
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600432 case BOOKE_IRQPRIO_AP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600433 allowed = 1;
Alexander Graf79300f82012-02-15 19:12:29 +0000434 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000435 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500436 break;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000437 case BOOKE_IRQPRIO_WATCHDOG:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600438 case BOOKE_IRQPRIO_CRITICAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000439 case BOOKE_IRQPRIO_DBELL_CRIT:
Alexander Graf666e7252010-07-29 14:47:43 +0200440 allowed = vcpu->arch.shared->msr & MSR_CE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000441 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000442 msr_mask = MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000443 int_class = INT_CLASS_CRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500444 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600445 case BOOKE_IRQPRIO_MACHINE_CHECK:
Alexander Graf666e7252010-07-29 14:47:43 +0200446 allowed = vcpu->arch.shared->msr & MSR_ME;
Scott Woodd30f6e42011-12-20 15:34:43 +0000447 allowed = allowed && !crit;
Scott Woodd30f6e42011-12-20 15:34:43 +0000448 int_class = INT_CLASS_MC;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500449 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600450 case BOOKE_IRQPRIO_DECREMENTER:
451 case BOOKE_IRQPRIO_FIT:
Scott Wooddfd4d472011-11-17 12:39:59 +0000452 keep_irq = true;
453 /* fall through */
454 case BOOKE_IRQPRIO_EXTERNAL:
Alexander Graf4ab96912012-02-15 13:28:48 +0000455 case BOOKE_IRQPRIO_DBELL:
Alexander Graf666e7252010-07-29 14:47:43 +0200456 allowed = vcpu->arch.shared->msr & MSR_EE;
Alexander Graf5c6cedf2010-07-29 14:47:49 +0200457 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000458 msr_mask = MSR_CE | MSR_ME | MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000459 int_class = INT_CLASS_NONCRIT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500460 break;
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600461 case BOOKE_IRQPRIO_DEBUG:
Alexander Graf666e7252010-07-29 14:47:43 +0200462 allowed = vcpu->arch.shared->msr & MSR_DE;
Scott Woodd30f6e42011-12-20 15:34:43 +0000463 allowed = allowed && !crit;
Alexander Graf79300f82012-02-15 19:12:29 +0000464 msr_mask = MSR_ME;
Bharat Bhushan9fee7562014-08-06 12:08:51 +0530465 if (cpu_has_feature(CPU_FTR_DEBUG_LVL_EXC))
466 int_class = INT_CLASS_DBG;
467 else
468 int_class = INT_CLASS_CRIT;
469
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500470 break;
471 }
472
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600473 if (allowed) {
Scott Woodd30f6e42011-12-20 15:34:43 +0000474 switch (int_class) {
475 case INT_CLASS_NONCRIT:
476 set_guest_srr(vcpu, vcpu->arch.pc,
477 vcpu->arch.shared->msr);
478 break;
479 case INT_CLASS_CRIT:
480 set_guest_csrr(vcpu, vcpu->arch.pc,
481 vcpu->arch.shared->msr);
482 break;
483 case INT_CLASS_DBG:
484 set_guest_dsrr(vcpu, vcpu->arch.pc,
485 vcpu->arch.shared->msr);
486 break;
487 case INT_CLASS_MC:
488 set_guest_mcsrr(vcpu, vcpu->arch.pc,
489 vcpu->arch.shared->msr);
490 break;
491 }
492
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600493 vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
Liu Yudaf5e272010-02-02 19:44:35 +0800494 if (update_esr == true)
Bharat Bhushandc168542014-07-17 17:01:38 +0530495 kvmppc_set_esr(vcpu, vcpu->arch.queued_esr);
Liu Yudaf5e272010-02-02 19:44:35 +0800496 if (update_dear == true)
Bharat Bhushana5414d42014-07-17 17:01:37 +0530497 kvmppc_set_dar(vcpu, vcpu->arch.queued_dear);
Scott Wood5df554a2013-04-12 14:08:46 +0000498 if (update_epr == true) {
499 if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
500 kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000501 else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
502 BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
503 kvmppc_mpic_set_epr(vcpu);
504 }
Scott Wood5df554a2013-04-12 14:08:46 +0000505 }
Mihai Caraman95e90b42012-10-11 06:13:26 +0000506
507 new_msr &= msr_mask;
508#if defined(CONFIG_64BIT)
509 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
510 new_msr |= MSR_CM;
511#endif
512 kvmppc_set_msr(vcpu, new_msr);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600513
Alexander Grafc5335f12010-08-30 14:03:24 +0200514 if (!keep_irq)
515 clear_bit(priority, &vcpu->arch.pending_exceptions);
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600516 }
517
Scott Woodd30f6e42011-12-20 15:34:43 +0000518#ifdef CONFIG_KVM_BOOKE_HV
519 /*
520 * If an interrupt is pending but masked, raise a guest doorbell
521 * so that we are notified when the guest enables the relevant
522 * MSR bit.
523 */
524 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
525 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
526 if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
527 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
528 if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
529 kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
530#endif
531
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600532 return allowed;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500533}
534
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000535/*
536 * Return the number of jiffies until the next timeout. If the timeout is
537 * longer than the NEXT_TIMER_MAX_DELTA, then return NEXT_TIMER_MAX_DELTA
538 * because the larger value can break the timer APIs.
539 */
540static unsigned long watchdog_next_timeout(struct kvm_vcpu *vcpu)
541{
542 u64 tb, wdt_tb, wdt_ticks = 0;
543 u64 nr_jiffies = 0;
544 u32 period = TCR_GET_WP(vcpu->arch.tcr);
545
546 wdt_tb = 1ULL << (63 - period);
547 tb = get_tb();
548 /*
549 * The watchdog timeout will hapeen when TB bit corresponding
550 * to watchdog will toggle from 0 to 1.
551 */
552 if (tb & wdt_tb)
553 wdt_ticks = wdt_tb;
554
555 wdt_ticks += wdt_tb - (tb & (wdt_tb - 1));
556
557 /* Convert timebase ticks to jiffies */
558 nr_jiffies = wdt_ticks;
559
560 if (do_div(nr_jiffies, tb_ticks_per_jiffy))
561 nr_jiffies++;
562
563 return min_t(unsigned long long, nr_jiffies, NEXT_TIMER_MAX_DELTA);
564}
565
566static void arm_next_watchdog(struct kvm_vcpu *vcpu)
567{
568 unsigned long nr_jiffies;
569 unsigned long flags;
570
571 /*
572 * If TSR_ENW and TSR_WIS are not set then no need to exit to
573 * userspace, so clear the KVM_REQ_WATCHDOG request.
574 */
575 if ((vcpu->arch.tsr & (TSR_ENW | TSR_WIS)) != (TSR_ENW | TSR_WIS))
576 clear_bit(KVM_REQ_WATCHDOG, &vcpu->requests);
577
578 spin_lock_irqsave(&vcpu->arch.wdt_lock, flags);
579 nr_jiffies = watchdog_next_timeout(vcpu);
580 /*
581 * If the number of jiffies of watchdog timer >= NEXT_TIMER_MAX_DELTA
582 * then do not run the watchdog timer as this can break timer APIs.
583 */
584 if (nr_jiffies < NEXT_TIMER_MAX_DELTA)
585 mod_timer(&vcpu->arch.wdt_timer, jiffies + nr_jiffies);
586 else
587 del_timer(&vcpu->arch.wdt_timer);
588 spin_unlock_irqrestore(&vcpu->arch.wdt_lock, flags);
589}
590
591void kvmppc_watchdog_func(unsigned long data)
592{
593 struct kvm_vcpu *vcpu = (struct kvm_vcpu *)data;
594 u32 tsr, new_tsr;
595 int final;
596
597 do {
598 new_tsr = tsr = vcpu->arch.tsr;
599 final = 0;
600
601 /* Time out event */
602 if (tsr & TSR_ENW) {
603 if (tsr & TSR_WIS)
604 final = 1;
605 else
606 new_tsr = tsr | TSR_WIS;
607 } else {
608 new_tsr = tsr | TSR_ENW;
609 }
610 } while (cmpxchg(&vcpu->arch.tsr, tsr, new_tsr) != tsr);
611
612 if (new_tsr & TSR_WIS) {
613 smp_wmb();
614 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
615 kvm_vcpu_kick(vcpu);
616 }
617
618 /*
619 * If this is final watchdog expiry and some action is required
620 * then exit to userspace.
621 */
622 if (final && (vcpu->arch.tcr & TCR_WRC_MASK) &&
623 vcpu->arch.watchdog_enabled) {
624 smp_wmb();
625 kvm_make_request(KVM_REQ_WATCHDOG, vcpu);
626 kvm_vcpu_kick(vcpu);
627 }
628
629 /*
630 * Stop running the watchdog timer after final expiration to
631 * prevent the host from being flooded with timers if the
632 * guest sets a short period.
633 * Timers will resume when TSR/TCR is updated next time.
634 */
635 if (!final)
636 arm_next_watchdog(vcpu);
637}
638
Scott Wooddfd4d472011-11-17 12:39:59 +0000639static void update_timer_ints(struct kvm_vcpu *vcpu)
640{
641 if ((vcpu->arch.tcr & TCR_DIE) && (vcpu->arch.tsr & TSR_DIS))
642 kvmppc_core_queue_dec(vcpu);
643 else
644 kvmppc_core_dequeue_dec(vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000645
646 if ((vcpu->arch.tcr & TCR_WIE) && (vcpu->arch.tsr & TSR_WIS))
647 kvmppc_core_queue_watchdog(vcpu);
648 else
649 kvmppc_core_dequeue_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +0000650}
651
Scott Woodc59a6a32011-11-08 18:23:25 -0600652static void kvmppc_core_check_exceptions(struct kvm_vcpu *vcpu)
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500653{
654 unsigned long *pending = &vcpu->arch.pending_exceptions;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500655 unsigned int priority;
656
Hollis Blanchard9ab80842008-11-05 09:36:22 -0600657 priority = __ffs(*pending);
Alexander Graf8b3a00f2012-02-16 14:12:46 +0000658 while (priority < BOOKE_IRQPRIO_MAX) {
Hollis Blanchardd4cf3892008-11-05 09:36:23 -0600659 if (kvmppc_booke_irqprio_deliver(vcpu, priority))
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500660 break;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500661
662 priority = find_next_bit(pending,
663 BITS_PER_BYTE * sizeof(*pending),
664 priority + 1);
665 }
Alexander Graf90bba352010-07-29 14:47:51 +0200666
667 /* Tell the guest about our interrupt status */
Scott Wood29ac26e2011-11-08 18:23:27 -0600668 vcpu->arch.shared->int_pending = !!*pending;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500669}
670
Scott Woodc59a6a32011-11-08 18:23:25 -0600671/* Check pending exceptions and deliver one, if possible. */
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000672int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu)
Scott Woodc59a6a32011-11-08 18:23:25 -0600673{
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000674 int r = 0;
Scott Woodc59a6a32011-11-08 18:23:25 -0600675 WARN_ON_ONCE(!irqs_disabled());
676
677 kvmppc_core_check_exceptions(vcpu);
678
Alexander Grafb8c649a2012-12-20 04:52:39 +0000679 if (vcpu->requests) {
680 /* Exception delivery raised request; start over */
681 return 1;
682 }
683
Scott Woodc59a6a32011-11-08 18:23:25 -0600684 if (vcpu->arch.shared->msr & MSR_WE) {
685 local_irq_enable();
686 kvm_vcpu_block(vcpu);
Alexander Graf966cd0f2012-03-14 16:55:08 +0100687 clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
Scott Wood6c85f522014-01-09 19:18:40 -0600688 hard_irq_disable();
Scott Woodc59a6a32011-11-08 18:23:25 -0600689
690 kvmppc_set_exit_type(vcpu, EMULATED_MTMSRWE_EXITS);
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000691 r = 1;
Scott Woodc59a6a32011-11-08 18:23:25 -0600692 };
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000693
694 return r;
695}
696
Alexander Graf7c973a22012-08-13 12:50:35 +0200697int kvmppc_core_check_requests(struct kvm_vcpu *vcpu)
Alexander Graf4ffc6352012-08-08 20:31:13 +0200698{
Alexander Graf7c973a22012-08-13 12:50:35 +0200699 int r = 1; /* Indicate we want to get back into the guest */
700
Alexander Graf2d8185d2012-08-10 12:31:12 +0200701 if (kvm_check_request(KVM_REQ_PENDING_TIMER, vcpu))
702 update_timer_ints(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200703#if defined(CONFIG_KVM_E500V2) || defined(CONFIG_KVM_E500MC)
Alexander Graf2d8185d2012-08-10 12:31:12 +0200704 if (kvm_check_request(KVM_REQ_TLB_FLUSH, vcpu))
705 kvmppc_core_flush_tlb(vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200706#endif
Alexander Graf7c973a22012-08-13 12:50:35 +0200707
Bharat Bhushanf61c94b2012-08-08 20:38:19 +0000708 if (kvm_check_request(KVM_REQ_WATCHDOG, vcpu)) {
709 vcpu->run->exit_reason = KVM_EXIT_WATCHDOG;
710 r = 0;
711 }
712
Alexander Graf1c810632013-01-04 18:12:48 +0100713 if (kvm_check_request(KVM_REQ_EPR_EXIT, vcpu)) {
714 vcpu->run->epr.epr = 0;
715 vcpu->arch.epr_needed = true;
716 vcpu->run->exit_reason = KVM_EXIT_EPR;
717 r = 0;
718 }
719
Alexander Graf7c973a22012-08-13 12:50:35 +0200720 return r;
Alexander Graf4ffc6352012-08-08 20:31:13 +0200721}
722
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000723int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
724{
Alexander Graf7ee78852012-08-13 12:44:41 +0200725 int ret, s;
Scott Woodf5f97212013-11-22 15:52:29 -0600726 struct debug_reg debug;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000727
Alexander Grafaf8f38b2011-08-10 13:57:08 +0200728 if (!vcpu->arch.sane) {
729 kvm_run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
730 return -EINVAL;
731 }
732
Alexander Graf7ee78852012-08-13 12:44:41 +0200733 s = kvmppc_prepare_to_enter(vcpu);
734 if (s <= 0) {
Alexander Graf7ee78852012-08-13 12:44:41 +0200735 ret = s;
Scott Wood1d1ef222011-11-08 16:11:59 -0600736 goto out;
737 }
Scott Wood6c85f522014-01-09 19:18:40 -0600738 /* interrupts now hard-disabled */
Scott Wood1d1ef222011-11-08 16:11:59 -0600739
Scott Wood8fae8452011-12-20 15:34:45 +0000740#ifdef CONFIG_PPC_FPU
741 /* Save userspace FPU state in stack */
742 enable_kernel_fp();
Scott Wood8fae8452011-12-20 15:34:45 +0000743
744 /*
745 * Since we can't trap on MSR_FP in GS-mode, we consider the guest
Mihai Caraman3efc7da2014-08-20 16:36:22 +0300746 * as always using the FPU.
Scott Wood8fae8452011-12-20 15:34:45 +0000747 */
Scott Wood8fae8452011-12-20 15:34:45 +0000748 kvmppc_load_guest_fp(vcpu);
749#endif
750
Mihai Caraman95d80a22014-08-20 16:36:23 +0300751#ifdef CONFIG_ALTIVEC
752 /* Save userspace AltiVec state in stack */
753 if (cpu_has_feature(CPU_FTR_ALTIVEC))
754 enable_kernel_altivec();
755 /*
756 * Since we can't trap on MSR_VEC in GS-mode, we consider the guest
757 * as always using the AltiVec.
758 */
759 kvmppc_load_guest_altivec(vcpu);
760#endif
761
Bharat Bhushance11e482013-07-04 12:27:47 +0530762 /* Switch to guest debug context */
Bharat Bhushan348ba712014-08-06 12:08:55 +0530763 debug = vcpu->arch.dbg_reg;
Scott Woodf5f97212013-11-22 15:52:29 -0600764 switch_booke_debug_regs(&debug);
765 debug = current->thread.debug;
Bharat Bhushan348ba712014-08-06 12:08:55 +0530766 current->thread.debug = vcpu->arch.dbg_reg;
Bharat Bhushance11e482013-07-04 12:27:47 +0530767
Bharat Bhushan08c9a182013-11-18 11:18:54 +0530768 vcpu->arch.pgdir = current->mm->pgd;
Scott Wood5f1c2482013-07-10 17:47:39 -0500769 kvmppc_fix_ee_before_entry();
Scott Woodf8941fbe2013-06-11 11:38:31 -0500770
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000771 ret = __kvmppc_vcpu_run(kvm_run, vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000772
Alexander Graf24afa372012-08-12 12:42:30 +0200773 /* No need for kvm_guest_exit. It's done in handle_exit.
774 We also get here with interrupts enabled. */
775
Bharat Bhushance11e482013-07-04 12:27:47 +0530776 /* Switch back to user space debug context */
Scott Woodf5f97212013-11-22 15:52:29 -0600777 switch_booke_debug_regs(&debug);
778 current->thread.debug = debug;
Bharat Bhushance11e482013-07-04 12:27:47 +0530779
Scott Wood8fae8452011-12-20 15:34:45 +0000780#ifdef CONFIG_PPC_FPU
781 kvmppc_save_guest_fp(vcpu);
Scott Wood8fae8452011-12-20 15:34:45 +0000782#endif
783
Mihai Caraman95d80a22014-08-20 16:36:23 +0300784#ifdef CONFIG_ALTIVEC
785 kvmppc_save_guest_altivec(vcpu);
786#endif
787
Scott Wood1d1ef222011-11-08 16:11:59 -0600788out:
Alexander Grafd69c6432012-08-08 20:44:20 +0200789 vcpu->mode = OUTSIDE_GUEST_MODE;
Paul Mackerrasdf6909e52011-06-29 00:19:50 +0000790 return ret;
791}
792
Scott Woodd30f6e42011-12-20 15:34:43 +0000793static int emulation_exit(struct kvm_run *run, struct kvm_vcpu *vcpu)
794{
795 enum emulation_result er;
796
797 er = kvmppc_emulate_instruction(run, vcpu);
798 switch (er) {
799 case EMULATE_DONE:
800 /* don't overwrite subtypes, just account kvm_stats */
801 kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
802 /* Future optimization: only reload non-volatiles if
803 * they were actually modified by emulation. */
804 return RESUME_GUEST_NV;
805
Mihai Caraman51f04722014-07-23 19:06:21 +0300806 case EMULATE_AGAIN:
807 return RESUME_GUEST;
808
Scott Woodd30f6e42011-12-20 15:34:43 +0000809 case EMULATE_FAIL:
Scott Woodd30f6e42011-12-20 15:34:43 +0000810 printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
811 __func__, vcpu->arch.pc, vcpu->arch.last_inst);
812 /* For debugging, encode the failing instruction and
813 * report it to userspace. */
814 run->hw.hardware_exit_reason = ~0ULL << 32;
815 run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
Alexander Grafd1ff5492012-02-16 13:24:03 +0000816 kvmppc_core_queue_program(vcpu, ESR_PIL);
Scott Woodd30f6e42011-12-20 15:34:43 +0000817 return RESUME_HOST;
818
Bharat Bhushan9b4f5302013-04-08 00:32:15 +0000819 case EMULATE_EXIT_USER:
820 return RESUME_HOST;
821
Scott Woodd30f6e42011-12-20 15:34:43 +0000822 default:
823 BUG();
824 }
825}
826
Bharat Bhushance11e482013-07-04 12:27:47 +0530827static int kvmppc_handle_debug(struct kvm_run *run, struct kvm_vcpu *vcpu)
828{
Bharat Bhushan348ba712014-08-06 12:08:55 +0530829 struct debug_reg *dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +0530830 u32 dbsr = vcpu->arch.dbsr;
831
Bharat Bhushan2f699a52014-08-13 14:39:44 +0530832 if (vcpu->guest_debug == 0) {
833 /*
834 * Debug resources belong to Guest.
835 * Imprecise debug event is not injected
836 */
837 if (dbsr & DBSR_IDE) {
838 dbsr &= ~DBSR_IDE;
839 if (!dbsr)
840 return RESUME_GUEST;
841 }
842
843 if (dbsr && (vcpu->arch.shared->msr & MSR_DE) &&
844 (vcpu->arch.dbg_reg.dbcr0 & DBCR0_IDM))
845 kvmppc_core_queue_debug(vcpu);
846
847 /* Inject a program interrupt if trap debug is not allowed */
848 if ((dbsr & DBSR_TIE) && !(vcpu->arch.shared->msr & MSR_DE))
849 kvmppc_core_queue_program(vcpu, ESR_PTR);
850
851 return RESUME_GUEST;
852 }
853
854 /*
855 * Debug resource owned by userspace.
856 * Clear guest dbsr (vcpu->arch.dbsr)
857 */
Bharat Bhushan21909912014-08-06 12:08:54 +0530858 vcpu->arch.dbsr = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +0530859 run->debug.arch.status = 0;
860 run->debug.arch.address = vcpu->arch.pc;
861
862 if (dbsr & (DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4)) {
863 run->debug.arch.status |= KVMPPC_DEBUG_BREAKPOINT;
864 } else {
865 if (dbsr & (DBSR_DAC1W | DBSR_DAC2W))
866 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_WRITE;
867 else if (dbsr & (DBSR_DAC1R | DBSR_DAC2R))
868 run->debug.arch.status |= KVMPPC_DEBUG_WATCH_READ;
869 if (dbsr & (DBSR_DAC1R | DBSR_DAC1W))
870 run->debug.arch.address = dbg_reg->dac1;
871 else if (dbsr & (DBSR_DAC2R | DBSR_DAC2W))
872 run->debug.arch.address = dbg_reg->dac2;
873 }
874
875 return RESUME_HOST;
876}
877
Alexander Graf4e642cc2012-02-20 23:57:26 +0100878static void kvmppc_fill_pt_regs(struct pt_regs *regs)
879{
880 ulong r1, ip, msr, lr;
881
882 asm("mr %0, 1" : "=r"(r1));
883 asm("mflr %0" : "=r"(lr));
884 asm("mfmsr %0" : "=r"(msr));
885 asm("bl 1f; 1: mflr %0" : "=r"(ip));
886
887 memset(regs, 0, sizeof(*regs));
888 regs->gpr[1] = r1;
889 regs->nip = ip;
890 regs->msr = msr;
891 regs->link = lr;
892}
893
Bharat Bhushan6328e592012-06-20 05:56:53 +0000894/*
895 * For interrupts needed to be handled by host interrupt handlers,
896 * corresponding host handler are called from here in similar way
897 * (but not exact) as they are called from low level handler
898 * (such as from arch/powerpc/kernel/head_fsl_booke.S).
899 */
Alexander Graf4e642cc2012-02-20 23:57:26 +0100900static void kvmppc_restart_interrupt(struct kvm_vcpu *vcpu,
901 unsigned int exit_nr)
902{
903 struct pt_regs regs;
904
905 switch (exit_nr) {
906 case BOOKE_INTERRUPT_EXTERNAL:
907 kvmppc_fill_pt_regs(&regs);
908 do_IRQ(&regs);
909 break;
910 case BOOKE_INTERRUPT_DECREMENTER:
911 kvmppc_fill_pt_regs(&regs);
912 timer_interrupt(&regs);
913 break;
Tiejun Chen5f17ce82013-05-13 10:00:45 +0800914#if defined(CONFIG_PPC_DOORBELL)
Alexander Graf4e642cc2012-02-20 23:57:26 +0100915 case BOOKE_INTERRUPT_DOORBELL:
916 kvmppc_fill_pt_regs(&regs);
917 doorbell_exception(&regs);
918 break;
919#endif
920 case BOOKE_INTERRUPT_MACHINE_CHECK:
921 /* FIXME */
922 break;
Alexander Graf7cc1e8e2012-02-22 16:26:34 +0100923 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
924 kvmppc_fill_pt_regs(&regs);
925 performance_monitor_exception(&regs);
926 break;
Bharat Bhushan6328e592012-06-20 05:56:53 +0000927 case BOOKE_INTERRUPT_WATCHDOG:
928 kvmppc_fill_pt_regs(&regs);
929#ifdef CONFIG_BOOKE_WDT
930 WatchdogException(&regs);
931#else
932 unknown_exception(&regs);
933#endif
934 break;
935 case BOOKE_INTERRUPT_CRITICAL:
Tudor Laurentiu845ac982015-05-18 15:44:27 +0300936 kvmppc_fill_pt_regs(&regs);
Bharat Bhushan6328e592012-06-20 05:56:53 +0000937 unknown_exception(&regs);
938 break;
Bharat Bhushance11e482013-07-04 12:27:47 +0530939 case BOOKE_INTERRUPT_DEBUG:
940 /* Save DBSR before preemption is enabled */
941 vcpu->arch.dbsr = mfspr(SPRN_DBSR);
942 kvmppc_clear_dbsr();
943 break;
Alexander Graf4e642cc2012-02-20 23:57:26 +0100944 }
945}
946
Mihai Caramanf5250472014-07-23 19:06:22 +0300947static int kvmppc_resume_inst_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
948 enum emulation_result emulated, u32 last_inst)
949{
950 switch (emulated) {
951 case EMULATE_AGAIN:
952 return RESUME_GUEST;
953
954 case EMULATE_FAIL:
955 pr_debug("%s: load instruction from guest address %lx failed\n",
956 __func__, vcpu->arch.pc);
957 /* For debugging, encode the failing instruction and
958 * report it to userspace. */
959 run->hw.hardware_exit_reason = ~0ULL << 32;
960 run->hw.hardware_exit_reason |= last_inst;
961 kvmppc_core_queue_program(vcpu, ESR_PIL);
962 return RESUME_HOST;
963
964 default:
965 BUG();
966 }
967}
968
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500969/**
970 * kvmppc_handle_exit
971 *
972 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
973 */
974int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
975 unsigned int exit_nr)
976{
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500977 int r = RESUME_HOST;
Alexander Graf7ee78852012-08-13 12:44:41 +0200978 int s;
Scott Woodf1e89022013-06-06 19:16:31 -0500979 int idx;
Mihai Caramanf5250472014-07-23 19:06:22 +0300980 u32 last_inst = KVM_INST_FETCH_FAILED;
981 enum emulation_result emulated = EMULATE_DONE;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500982
Hollis Blanchard73e75b42008-12-02 15:51:57 -0600983 /* update before a new last_exit_type is rewritten */
984 kvmppc_update_timing_stats(vcpu);
985
Alexander Graf4e642cc2012-02-20 23:57:26 +0100986 /* restart interrupts if they were meant for the host */
987 kvmppc_restart_interrupt(vcpu, exit_nr);
Scott Woodd30f6e42011-12-20 15:34:43 +0000988
Mihai Caramanf5250472014-07-23 19:06:22 +0300989 /*
990 * get last instruction before beeing preempted
991 * TODO: for e6500 check also BOOKE_INTERRUPT_LRAT_ERROR & ESR_DATA
992 */
993 switch (exit_nr) {
994 case BOOKE_INTERRUPT_DATA_STORAGE:
995 case BOOKE_INTERRUPT_DTLB_MISS:
996 case BOOKE_INTERRUPT_HV_PRIV:
Alexander Graf8d0eff62014-09-10 14:37:29 +0200997 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Mihai Caramanf5250472014-07-23 19:06:22 +0300998 break;
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +0530999 case BOOKE_INTERRUPT_PROGRAM:
1000 /* SW breakpoints arrive as illegal instructions on HV */
1001 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Alexander Graf8d0eff62014-09-10 14:37:29 +02001002 emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &last_inst);
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301003 break;
Mihai Caramanf5250472014-07-23 19:06:22 +03001004 default:
1005 break;
1006 }
1007
Alexander Graf97c95052012-08-02 15:10:00 +02001008 trace_kvm_exit(exit_nr, vcpu);
Paolo Bonzinie233d542015-04-30 14:39:40 +02001009 __kvm_guest_exit();
1010
1011 local_irq_enable();
Alexander Graf97c95052012-08-02 15:10:00 +02001012
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001013 run->exit_reason = KVM_EXIT_UNKNOWN;
1014 run->ready_for_interrupt_injection = 1;
1015
Mihai Caramanf5250472014-07-23 19:06:22 +03001016 if (emulated != EMULATE_DONE) {
1017 r = kvmppc_resume_inst_load(run, vcpu, emulated, last_inst);
1018 goto out;
1019 }
1020
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001021 switch (exit_nr) {
1022 case BOOKE_INTERRUPT_MACHINE_CHECK:
Alexander Grafc35c9d82012-02-20 12:21:18 +01001023 printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
1024 kvmppc_dump_vcpu(vcpu);
1025 /* For debugging, send invalid exit reason to user space */
1026 run->hw.hardware_exit_reason = ~1ULL << 32;
1027 run->hw.hardware_exit_reason |= mfspr(SPRN_MCSR);
1028 r = RESUME_HOST;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001029 break;
1030
1031 case BOOKE_INTERRUPT_EXTERNAL:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001032 kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
Hollis Blanchard1b6766c2008-11-05 09:36:21 -06001033 r = RESUME_GUEST;
1034 break;
1035
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001036 case BOOKE_INTERRUPT_DECREMENTER:
Hollis Blanchard7b701592008-12-02 15:51:58 -06001037 kvmppc_account_exit(vcpu, DEC_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001038 r = RESUME_GUEST;
1039 break;
1040
Bharat Bhushan6328e592012-06-20 05:56:53 +00001041 case BOOKE_INTERRUPT_WATCHDOG:
1042 r = RESUME_GUEST;
1043 break;
1044
Scott Woodd30f6e42011-12-20 15:34:43 +00001045 case BOOKE_INTERRUPT_DOORBELL:
1046 kvmppc_account_exit(vcpu, DBELL_EXITS);
Scott Woodd30f6e42011-12-20 15:34:43 +00001047 r = RESUME_GUEST;
1048 break;
1049
1050 case BOOKE_INTERRUPT_GUEST_DBELL_CRIT:
1051 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1052
1053 /*
1054 * We are here because there is a pending guest interrupt
1055 * which could not be delivered as MSR_CE or MSR_ME was not
1056 * set. Once we break from here we will retry delivery.
1057 */
1058 r = RESUME_GUEST;
1059 break;
1060
1061 case BOOKE_INTERRUPT_GUEST_DBELL:
1062 kvmppc_account_exit(vcpu, GDBELL_EXITS);
1063
1064 /*
1065 * We are here because there is a pending guest interrupt
1066 * which could not be delivered as MSR_EE was not set. Once
1067 * we break from here we will retry delivery.
1068 */
1069 r = RESUME_GUEST;
1070 break;
1071
Alexander Graf95f2e922012-02-20 22:45:12 +01001072 case BOOKE_INTERRUPT_PERFORMANCE_MONITOR:
1073 r = RESUME_GUEST;
1074 break;
1075
Scott Woodd30f6e42011-12-20 15:34:43 +00001076 case BOOKE_INTERRUPT_HV_PRIV:
1077 r = emulation_exit(run, vcpu);
1078 break;
1079
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001080 case BOOKE_INTERRUPT_PROGRAM:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301081 if ((vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP) &&
1082 (last_inst == KVMPPC_INST_SW_BREAKPOINT)) {
1083 /*
1084 * We are here because of an SW breakpoint instr,
1085 * so lets return to host to handle.
1086 */
1087 r = kvmppc_handle_debug(run, vcpu);
1088 run->exit_reason = KVM_EXIT_DEBUG;
1089 kvmppc_account_exit(vcpu, DEBUG_EXITS);
1090 break;
1091 }
1092
Scott Woodd30f6e42011-12-20 15:34:43 +00001093 if (vcpu->arch.shared->msr & (MSR_PR | MSR_GS)) {
Alexander Graf02685972012-02-20 12:33:22 +01001094 /*
1095 * Program traps generated by user-level software must
1096 * be handled by the guest kernel.
1097 *
1098 * In GS mode, hypervisor privileged instructions trap
1099 * on BOOKE_INTERRUPT_HV_PRIV, not here, so these are
1100 * actual program interrupts, handled by the guest.
1101 */
Liu Yudaf5e272010-02-02 19:44:35 +08001102 kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001103 r = RESUME_GUEST;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001104 kvmppc_account_exit(vcpu, USR_PR_INST);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001105 break;
1106 }
1107
Scott Woodd30f6e42011-12-20 15:34:43 +00001108 r = emulation_exit(run, vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001109 break;
1110
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001111 case BOOKE_INTERRUPT_FP_UNAVAIL:
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001112 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001113 kvmppc_account_exit(vcpu, FP_UNAVAIL);
Christian Ehrhardtde368dc2008-04-29 18:18:23 +02001114 r = RESUME_GUEST;
1115 break;
1116
Scott Wood4cd35f672011-06-14 18:34:31 -05001117#ifdef CONFIG_SPE
1118 case BOOKE_INTERRUPT_SPE_UNAVAIL: {
1119 if (vcpu->arch.shared->msr & MSR_SPE)
1120 kvmppc_vcpu_enable_spe(vcpu);
1121 else
1122 kvmppc_booke_queue_irqprio(vcpu,
1123 BOOKE_IRQPRIO_SPE_UNAVAIL);
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001124 r = RESUME_GUEST;
1125 break;
Scott Wood4cd35f672011-06-14 18:34:31 -05001126 }
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001127
1128 case BOOKE_INTERRUPT_SPE_FP_DATA:
1129 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
1130 r = RESUME_GUEST;
1131 break;
1132
1133 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1134 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
1135 r = RESUME_GUEST;
1136 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001137#elif defined(CONFIG_SPE_POSSIBLE)
Scott Wood4cd35f672011-06-14 18:34:31 -05001138 case BOOKE_INTERRUPT_SPE_UNAVAIL:
1139 /*
1140 * Guest wants SPE, but host kernel doesn't support it. Send
1141 * an "unimplemented operation" program check to the guest.
1142 */
1143 kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
1144 r = RESUME_GUEST;
1145 break;
1146
1147 /*
1148 * These really should never happen without CONFIG_SPE,
1149 * as we should never enable the real MSR[SPE] in the guest.
1150 */
1151 case BOOKE_INTERRUPT_SPE_FP_DATA:
1152 case BOOKE_INTERRUPT_SPE_FP_ROUND:
1153 printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
1154 __func__, exit_nr, vcpu->arch.pc);
1155 run->hw.hardware_exit_reason = exit_nr;
1156 r = RESUME_HOST;
1157 break;
Mihai Caraman95d80a22014-08-20 16:36:23 +03001158#endif /* CONFIG_SPE_POSSIBLE */
1159
1160/*
1161 * On cores with Vector category, KVM is loaded only if CONFIG_ALTIVEC,
1162 * see kvmppc_core_check_processor_compat().
1163 */
1164#ifdef CONFIG_ALTIVEC
1165 case BOOKE_INTERRUPT_ALTIVEC_UNAVAIL:
1166 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_UNAVAIL);
1167 r = RESUME_GUEST;
1168 break;
1169
1170 case BOOKE_INTERRUPT_ALTIVEC_ASSIST:
1171 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ALTIVEC_ASSIST);
1172 r = RESUME_GUEST;
1173 break;
Scott Wood4cd35f672011-06-14 18:34:31 -05001174#endif
Hollis Blanchardbb3a8a12009-01-03 16:23:13 -06001175
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001176 case BOOKE_INTERRUPT_DATA_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001177 kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
1178 vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001179 kvmppc_account_exit(vcpu, DSI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001180 r = RESUME_GUEST;
1181 break;
1182
1183 case BOOKE_INTERRUPT_INST_STORAGE:
Liu Yudaf5e272010-02-02 19:44:35 +08001184 kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001185 kvmppc_account_exit(vcpu, ISI_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001186 r = RESUME_GUEST;
1187 break;
1188
Alexander Graf011da892013-01-31 14:17:38 +01001189 case BOOKE_INTERRUPT_ALIGNMENT:
1190 kvmppc_core_queue_alignment(vcpu, vcpu->arch.fault_dear,
1191 vcpu->arch.fault_esr);
1192 r = RESUME_GUEST;
1193 break;
1194
Scott Woodd30f6e42011-12-20 15:34:43 +00001195#ifdef CONFIG_KVM_BOOKE_HV
1196 case BOOKE_INTERRUPT_HV_SYSCALL:
1197 if (!(vcpu->arch.shared->msr & MSR_PR)) {
1198 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1199 } else {
1200 /*
1201 * hcall from guest userspace -- send privileged
1202 * instruction program check.
1203 */
1204 kvmppc_core_queue_program(vcpu, ESR_PPR);
1205 }
1206
1207 r = RESUME_GUEST;
1208 break;
1209#else
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001210 case BOOKE_INTERRUPT_SYSCALL:
Alexander Graf2a342ed2010-07-29 14:47:48 +02001211 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1212 (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
1213 /* KVM PV hypercalls */
1214 kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
1215 r = RESUME_GUEST;
1216 } else {
1217 /* Guest syscalls */
1218 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
1219 }
Hollis Blanchard7b701592008-12-02 15:51:58 -06001220 kvmppc_account_exit(vcpu, SYSCALL_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001221 r = RESUME_GUEST;
1222 break;
Scott Woodd30f6e42011-12-20 15:34:43 +00001223#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001224
1225 case BOOKE_INTERRUPT_DTLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001226 unsigned long eaddr = vcpu->arch.fault_dear;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001227 int gtlb_index;
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001228 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001229 gfn_t gfn;
1230
Alexander Grafbf7ca4b2012-02-15 23:40:00 +00001231#ifdef CONFIG_KVM_E500V2
Scott Wooda4cd8b22011-06-14 18:34:41 -05001232 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1233 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1234 kvmppc_map_magic(vcpu);
1235 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
1236 r = RESUME_GUEST;
1237
1238 break;
1239 }
1240#endif
1241
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001242 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001243 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001244 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001245 /* The guest didn't have a mapping for it. */
Liu Yudaf5e272010-02-02 19:44:35 +08001246 kvmppc_core_queue_dtlb_miss(vcpu,
1247 vcpu->arch.fault_dear,
1248 vcpu->arch.fault_esr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001249 kvmppc_mmu_dtlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001250 kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001251 r = RESUME_GUEST;
1252 break;
1253 }
1254
Scott Woodf1e89022013-06-06 19:16:31 -05001255 idx = srcu_read_lock(&vcpu->kvm->srcu);
1256
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001257 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001258 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001259
1260 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1261 /* The guest TLB had a mapping, but the shadow TLB
1262 * didn't, and it is RAM. This could be because:
1263 * a) the entry is mapping the host kernel, or
1264 * b) the guest used a large mapping which we're faking
1265 * Either way, we need to satisfy the fault without
1266 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001267 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001268 kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001269 r = RESUME_GUEST;
1270 } else {
1271 /* Guest has mapped and accessed a page which is not
1272 * actually RAM. */
Hollis Blanchard475e7cd2009-01-03 16:23:00 -06001273 vcpu->arch.paddr_accessed = gpaddr;
Alexander Graf6020c0f2012-03-12 02:26:30 +01001274 vcpu->arch.vaddr_accessed = eaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001275 r = kvmppc_emulate_mmio(run, vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001276 kvmppc_account_exit(vcpu, MMIO_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001277 }
1278
Scott Woodf1e89022013-06-06 19:16:31 -05001279 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001280 break;
1281 }
1282
1283 case BOOKE_INTERRUPT_ITLB_MISS: {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001284 unsigned long eaddr = vcpu->arch.pc;
Hollis Blanchard89168612008-12-02 15:51:53 -06001285 gpa_t gpaddr;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001286 gfn_t gfn;
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001287 int gtlb_index;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001288
1289 r = RESUME_GUEST;
1290
1291 /* Check the guest TLB. */
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -06001292 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
Hollis Blanchard7924bd42008-12-02 15:51:55 -06001293 if (gtlb_index < 0) {
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001294 /* The guest didn't have a mapping for it. */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001295 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
Hollis Blanchardb52a6382009-01-03 16:23:11 -06001296 kvmppc_mmu_itlb_miss(vcpu);
Hollis Blanchard7b701592008-12-02 15:51:58 -06001297 kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001298 break;
1299 }
1300
Hollis Blanchard7b701592008-12-02 15:51:58 -06001301 kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001302
Scott Woodf1e89022013-06-06 19:16:31 -05001303 idx = srcu_read_lock(&vcpu->kvm->srcu);
1304
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -06001305 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
Hollis Blanchard89168612008-12-02 15:51:53 -06001306 gfn = gpaddr >> PAGE_SHIFT;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001307
1308 if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
1309 /* The guest TLB had a mapping, but the shadow TLB
1310 * didn't. This could be because:
1311 * a) the entry is mapping the host kernel, or
1312 * b) the guest used a large mapping which we're faking
1313 * Either way, we need to satisfy the fault without
1314 * invoking the guest. */
Hollis Blanchard58a96212009-01-03 16:23:01 -06001315 kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001316 } else {
1317 /* Guest mapped and leaped at non-RAM! */
Hollis Blanchardd4cf3892008-11-05 09:36:23 -06001318 kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001319 }
1320
Scott Woodf1e89022013-06-06 19:16:31 -05001321 srcu_read_unlock(&vcpu->kvm->srcu, idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001322 break;
1323 }
1324
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001325 case BOOKE_INTERRUPT_DEBUG: {
Bharat Bhushance11e482013-07-04 12:27:47 +05301326 r = kvmppc_handle_debug(run, vcpu);
1327 if (r == RESUME_HOST)
1328 run->exit_reason = KVM_EXIT_DEBUG;
Hollis Blanchard7b701592008-12-02 15:51:58 -06001329 kvmppc_account_exit(vcpu, DEBUG_EXITS);
Hollis Blanchard6a0ab732008-07-25 13:54:49 -05001330 break;
1331 }
1332
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001333 default:
1334 printk(KERN_EMERG "exit_nr %d\n", exit_nr);
1335 BUG();
1336 }
1337
Mihai Caramanf5250472014-07-23 19:06:22 +03001338out:
Alexander Grafa8e4ef82012-02-16 14:07:37 +00001339 /*
1340 * To avoid clobbering exit_reason, only check for signals if we
1341 * aren't already exiting to userspace for some other reason.
1342 */
Alexander Graf03660ba2012-02-28 12:00:41 +01001343 if (!(r & RESUME_HOST)) {
Alexander Graf7ee78852012-08-13 12:44:41 +02001344 s = kvmppc_prepare_to_enter(vcpu);
Scott Wood6c85f522014-01-09 19:18:40 -06001345 if (s <= 0)
Alexander Graf7ee78852012-08-13 12:44:41 +02001346 r = (s << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
Scott Wood6c85f522014-01-09 19:18:40 -06001347 else {
1348 /* interrupts now hard-disabled */
Scott Wood5f1c2482013-07-10 17:47:39 -05001349 kvmppc_fix_ee_before_entry();
Mihai Caraman3efc7da2014-08-20 16:36:22 +03001350 kvmppc_load_guest_fp(vcpu);
Mihai Caraman95d80a22014-08-20 16:36:23 +03001351 kvmppc_load_guest_altivec(vcpu);
Alexander Graf03660ba2012-02-28 12:00:41 +01001352 }
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001353 }
1354
1355 return r;
1356}
1357
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001358static void kvmppc_set_tsr(struct kvm_vcpu *vcpu, u32 new_tsr)
1359{
1360 u32 old_tsr = vcpu->arch.tsr;
1361
1362 vcpu->arch.tsr = new_tsr;
1363
1364 if ((old_tsr ^ vcpu->arch.tsr) & (TSR_ENW | TSR_WIS))
1365 arm_next_watchdog(vcpu);
1366
1367 update_timer_ints(vcpu);
1368}
1369
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001370/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */
1371int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu)
1372{
Hollis Blanchard082decf2010-08-07 10:33:56 -07001373 int i;
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001374 int r;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001375
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001376 vcpu->arch.pc = 0;
Scott Woodb5904972011-11-08 18:23:30 -06001377 vcpu->arch.shared->pir = vcpu->vcpu_id;
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001378 kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */
Scott Woodd30f6e42011-12-20 15:34:43 +00001379 kvmppc_set_msr(vcpu, 0);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001380
Scott Woodd30f6e42011-12-20 15:34:43 +00001381#ifndef CONFIG_KVM_BOOKE_HV
Bharat Bhushance11e482013-07-04 12:27:47 +05301382 vcpu->arch.shadow_msr = MSR_USER | MSR_IS | MSR_DS;
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001383 vcpu->arch.shadow_pid = 1;
Scott Woodd30f6e42011-12-20 15:34:43 +00001384 vcpu->arch.shared->msr = 0;
1385#endif
Hollis Blanchard49dd2c42008-07-25 13:54:53 -05001386
Hollis Blanchard082decf2010-08-07 10:33:56 -07001387 /* Eye-catching numbers so we know if the guest takes an interrupt
1388 * before it's programmed its own IVPR/IVORs. */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001389 vcpu->arch.ivpr = 0x55550000;
Hollis Blanchard082decf2010-08-07 10:33:56 -07001390 for (i = 0; i < BOOKE_IRQPRIO_MAX; i++)
1391 vcpu->arch.ivor[i] = 0x7700 | i * 4;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001392
Hollis Blanchard73e75b42008-12-02 15:51:57 -06001393 kvmppc_init_timing_stats(vcpu);
1394
Alexander Grafaf8f38b2011-08-10 13:57:08 +02001395 r = kvmppc_core_vcpu_setup(vcpu);
1396 kvmppc_sanity_check(vcpu);
1397 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001398}
1399
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001400int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu)
1401{
1402 /* setup watchdog timer once */
1403 spin_lock_init(&vcpu->arch.wdt_lock);
1404 setup_timer(&vcpu->arch.wdt_timer, kvmppc_watchdog_func,
1405 (unsigned long)vcpu);
1406
Bharat Bhushan2f699a52014-08-13 14:39:44 +05301407 /*
1408 * Clear DBSR.MRR to avoid guest debug interrupt as
1409 * this is of host interest
1410 */
1411 mtspr(SPRN_DBSR, DBSR_MRR);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001412 return 0;
1413}
1414
1415void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu)
1416{
1417 del_timer_sync(&vcpu->arch.wdt_timer);
1418}
1419
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001420int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1421{
1422 int i;
1423
1424 regs->pc = vcpu->arch.pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001425 regs->cr = kvmppc_get_cr(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001426 regs->ctr = vcpu->arch.ctr;
1427 regs->lr = vcpu->arch.lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001428 regs->xer = kvmppc_get_xer(vcpu);
Alexander Graf666e7252010-07-29 14:47:43 +02001429 regs->msr = vcpu->arch.shared->msr;
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301430 regs->srr0 = kvmppc_get_srr0(vcpu);
1431 regs->srr1 = kvmppc_get_srr1(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001432 regs->pid = vcpu->arch.pid;
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301433 regs->sprg0 = kvmppc_get_sprg0(vcpu);
1434 regs->sprg1 = kvmppc_get_sprg1(vcpu);
1435 regs->sprg2 = kvmppc_get_sprg2(vcpu);
1436 regs->sprg3 = kvmppc_get_sprg3(vcpu);
1437 regs->sprg4 = kvmppc_get_sprg4(vcpu);
1438 regs->sprg5 = kvmppc_get_sprg5(vcpu);
1439 regs->sprg6 = kvmppc_get_sprg6(vcpu);
1440 regs->sprg7 = kvmppc_get_sprg7(vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001441
1442 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001443 regs->gpr[i] = kvmppc_get_gpr(vcpu, i);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001444
1445 return 0;
1446}
1447
1448int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
1449{
1450 int i;
1451
1452 vcpu->arch.pc = regs->pc;
Alexander Graf992b5b22010-01-08 02:58:02 +01001453 kvmppc_set_cr(vcpu, regs->cr);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001454 vcpu->arch.ctr = regs->ctr;
1455 vcpu->arch.lr = regs->lr;
Alexander Graf992b5b22010-01-08 02:58:02 +01001456 kvmppc_set_xer(vcpu, regs->xer);
Hollis Blanchardb8fd68a2008-11-05 09:36:20 -06001457 kvmppc_set_msr(vcpu, regs->msr);
Bharat Bhushan31579ee2014-07-17 17:01:36 +05301458 kvmppc_set_srr0(vcpu, regs->srr0);
1459 kvmppc_set_srr1(vcpu, regs->srr1);
Scott Wood5ce941e2011-04-27 17:24:21 -05001460 kvmppc_set_pid(vcpu, regs->pid);
Bharat Bhushanc1b8a012014-07-17 17:01:39 +05301461 kvmppc_set_sprg0(vcpu, regs->sprg0);
1462 kvmppc_set_sprg1(vcpu, regs->sprg1);
1463 kvmppc_set_sprg2(vcpu, regs->sprg2);
1464 kvmppc_set_sprg3(vcpu, regs->sprg3);
1465 kvmppc_set_sprg4(vcpu, regs->sprg4);
1466 kvmppc_set_sprg5(vcpu, regs->sprg5);
1467 kvmppc_set_sprg6(vcpu, regs->sprg6);
1468 kvmppc_set_sprg7(vcpu, regs->sprg7);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001469
Alexander Graf8e5b26b2010-01-08 02:58:01 +01001470 for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
1471 kvmppc_set_gpr(vcpu, i, regs->gpr[i]);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001472
1473 return 0;
1474}
1475
Scott Wood5ce941e2011-04-27 17:24:21 -05001476static void get_sregs_base(struct kvm_vcpu *vcpu,
1477 struct kvm_sregs *sregs)
1478{
1479 u64 tb = get_tb();
1480
1481 sregs->u.e.features |= KVM_SREGS_E_BASE;
1482
1483 sregs->u.e.csrr0 = vcpu->arch.csrr0;
1484 sregs->u.e.csrr1 = vcpu->arch.csrr1;
1485 sregs->u.e.mcsr = vcpu->arch.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301486 sregs->u.e.esr = kvmppc_get_esr(vcpu);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301487 sregs->u.e.dear = kvmppc_get_dar(vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -05001488 sregs->u.e.tsr = vcpu->arch.tsr;
1489 sregs->u.e.tcr = vcpu->arch.tcr;
1490 sregs->u.e.dec = kvmppc_get_dec(vcpu, tb);
1491 sregs->u.e.tb = tb;
1492 sregs->u.e.vrsave = vcpu->arch.vrsave;
1493}
1494
1495static int set_sregs_base(struct kvm_vcpu *vcpu,
1496 struct kvm_sregs *sregs)
1497{
1498 if (!(sregs->u.e.features & KVM_SREGS_E_BASE))
1499 return 0;
1500
1501 vcpu->arch.csrr0 = sregs->u.e.csrr0;
1502 vcpu->arch.csrr1 = sregs->u.e.csrr1;
1503 vcpu->arch.mcsr = sregs->u.e.mcsr;
Bharat Bhushandc168542014-07-17 17:01:38 +05301504 kvmppc_set_esr(vcpu, sregs->u.e.esr);
Bharat Bhushana5414d42014-07-17 17:01:37 +05301505 kvmppc_set_dar(vcpu, sregs->u.e.dear);
Scott Wood5ce941e2011-04-27 17:24:21 -05001506 vcpu->arch.vrsave = sregs->u.e.vrsave;
Scott Wooddfd4d472011-11-17 12:39:59 +00001507 kvmppc_set_tcr(vcpu, sregs->u.e.tcr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001508
Scott Wooddfd4d472011-11-17 12:39:59 +00001509 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_DEC) {
Scott Wood5ce941e2011-04-27 17:24:21 -05001510 vcpu->arch.dec = sregs->u.e.dec;
Scott Wooddfd4d472011-11-17 12:39:59 +00001511 kvmppc_emulate_dec(vcpu);
1512 }
Scott Wood5ce941e2011-04-27 17:24:21 -05001513
Bharat Bhushand26f22c2013-02-24 18:57:11 +00001514 if (sregs->u.e.update_special & KVM_SREGS_E_UPDATE_TSR)
1515 kvmppc_set_tsr(vcpu, sregs->u.e.tsr);
Scott Wood5ce941e2011-04-27 17:24:21 -05001516
1517 return 0;
1518}
1519
1520static void get_sregs_arch206(struct kvm_vcpu *vcpu,
1521 struct kvm_sregs *sregs)
1522{
1523 sregs->u.e.features |= KVM_SREGS_E_ARCH206;
1524
Scott Wood841741f2011-09-02 17:39:37 -05001525 sregs->u.e.pir = vcpu->vcpu_id;
Scott Wood5ce941e2011-04-27 17:24:21 -05001526 sregs->u.e.mcsrr0 = vcpu->arch.mcsrr0;
1527 sregs->u.e.mcsrr1 = vcpu->arch.mcsrr1;
1528 sregs->u.e.decar = vcpu->arch.decar;
1529 sregs->u.e.ivpr = vcpu->arch.ivpr;
1530}
1531
1532static int set_sregs_arch206(struct kvm_vcpu *vcpu,
1533 struct kvm_sregs *sregs)
1534{
1535 if (!(sregs->u.e.features & KVM_SREGS_E_ARCH206))
1536 return 0;
1537
Scott Wood841741f2011-09-02 17:39:37 -05001538 if (sregs->u.e.pir != vcpu->vcpu_id)
Scott Wood5ce941e2011-04-27 17:24:21 -05001539 return -EINVAL;
1540
1541 vcpu->arch.mcsrr0 = sregs->u.e.mcsrr0;
1542 vcpu->arch.mcsrr1 = sregs->u.e.mcsrr1;
1543 vcpu->arch.decar = sregs->u.e.decar;
1544 vcpu->arch.ivpr = sregs->u.e.ivpr;
1545
1546 return 0;
1547}
1548
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301549int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
Scott Wood5ce941e2011-04-27 17:24:21 -05001550{
1551 sregs->u.e.features |= KVM_SREGS_E_IVOR;
1552
1553 sregs->u.e.ivor_low[0] = vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL];
1554 sregs->u.e.ivor_low[1] = vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK];
1555 sregs->u.e.ivor_low[2] = vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE];
1556 sregs->u.e.ivor_low[3] = vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE];
1557 sregs->u.e.ivor_low[4] = vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL];
1558 sregs->u.e.ivor_low[5] = vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT];
1559 sregs->u.e.ivor_low[6] = vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM];
1560 sregs->u.e.ivor_low[7] = vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL];
1561 sregs->u.e.ivor_low[8] = vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL];
1562 sregs->u.e.ivor_low[9] = vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL];
1563 sregs->u.e.ivor_low[10] = vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER];
1564 sregs->u.e.ivor_low[11] = vcpu->arch.ivor[BOOKE_IRQPRIO_FIT];
1565 sregs->u.e.ivor_low[12] = vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG];
1566 sregs->u.e.ivor_low[13] = vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS];
1567 sregs->u.e.ivor_low[14] = vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS];
1568 sregs->u.e.ivor_low[15] = vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG];
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05301569 return 0;
Scott Wood5ce941e2011-04-27 17:24:21 -05001570}
1571
1572int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs)
1573{
1574 if (!(sregs->u.e.features & KVM_SREGS_E_IVOR))
1575 return 0;
1576
1577 vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = sregs->u.e.ivor_low[0];
1578 vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = sregs->u.e.ivor_low[1];
1579 vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = sregs->u.e.ivor_low[2];
1580 vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = sregs->u.e.ivor_low[3];
1581 vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = sregs->u.e.ivor_low[4];
1582 vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = sregs->u.e.ivor_low[5];
1583 vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = sregs->u.e.ivor_low[6];
1584 vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = sregs->u.e.ivor_low[7];
1585 vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = sregs->u.e.ivor_low[8];
1586 vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = sregs->u.e.ivor_low[9];
1587 vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = sregs->u.e.ivor_low[10];
1588 vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = sregs->u.e.ivor_low[11];
1589 vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = sregs->u.e.ivor_low[12];
1590 vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = sregs->u.e.ivor_low[13];
1591 vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = sregs->u.e.ivor_low[14];
1592 vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = sregs->u.e.ivor_low[15];
1593
1594 return 0;
1595}
1596
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001597int kvm_arch_vcpu_ioctl_get_sregs(struct kvm_vcpu *vcpu,
1598 struct kvm_sregs *sregs)
1599{
Scott Wood5ce941e2011-04-27 17:24:21 -05001600 sregs->pvr = vcpu->arch.pvr;
1601
1602 get_sregs_base(vcpu, sregs);
1603 get_sregs_arch206(vcpu, sregs);
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301604 return vcpu->kvm->arch.kvm_ops->get_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001605}
1606
1607int kvm_arch_vcpu_ioctl_set_sregs(struct kvm_vcpu *vcpu,
1608 struct kvm_sregs *sregs)
1609{
Scott Wood5ce941e2011-04-27 17:24:21 -05001610 int ret;
1611
1612 if (vcpu->arch.pvr != sregs->pvr)
1613 return -EINVAL;
1614
1615 ret = set_sregs_base(vcpu, sregs);
1616 if (ret < 0)
1617 return ret;
1618
1619 ret = set_sregs_arch206(vcpu, sregs);
1620 if (ret < 0)
1621 return ret;
1622
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05301623 return vcpu->kvm->arch.kvm_ops->set_sregs(vcpu, sregs);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001624}
1625
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001626int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id,
1627 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001628{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001629 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001630
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001631 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001632 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001633 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac1);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001634 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301635 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001636 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac2);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301637 break;
1638#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1639 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001640 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac3);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301641 break;
1642 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001643 *val = get_reg_val(id, vcpu->arch.dbg_reg.iac4);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301644 break;
1645#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001646 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001647 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac1);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301648 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001649 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001650 *val = get_reg_val(id, vcpu->arch.dbg_reg.dac2);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301651 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001652 case KVM_REG_PPC_EPR: {
Bharat Bhushan34f754b2014-07-17 17:01:40 +05301653 u32 epr = kvmppc_get_epr(vcpu);
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001654 *val = get_reg_val(id, epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001655 break;
1656 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001657#if defined(CONFIG_64BIT)
1658 case KVM_REG_PPC_EPCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001659 *val = get_reg_val(id, vcpu->arch.epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001660 break;
1661#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001662 case KVM_REG_PPC_TCR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001663 *val = get_reg_val(id, vcpu->arch.tcr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001664 break;
1665 case KVM_REG_PPC_TSR:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001666 *val = get_reg_val(id, vcpu->arch.tsr);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001667 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001668 case KVM_REG_PPC_DEBUG_INST:
Madhavan Srinivasan033aaa12014-09-09 22:37:36 +05301669 *val = get_reg_val(id, KVMPPC_INST_SW_BREAKPOINT);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001670 break;
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001671 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001672 *val = get_reg_val(id, vcpu->arch.vrsave);
Bharat Bhushan8c32a2e2013-03-20 20:24:58 +00001673 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001674 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001675 r = vcpu->kvm->arch.kvm_ops->get_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001676 break;
1677 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001678
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001679 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001680}
1681
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001682int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id,
1683 union kvmppc_one_reg *val)
Paul Mackerras31f34382011-12-12 12:26:50 +00001684{
Mihai Caraman35b299e2013-04-11 00:03:07 +00001685 int r = 0;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001686
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001687 switch (id) {
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001688 case KVM_REG_PPC_IAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001689 vcpu->arch.dbg_reg.iac1 = set_reg_val(id, *val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001690 break;
Bharat Bhushan547465e2013-07-04 12:27:46 +05301691 case KVM_REG_PPC_IAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001692 vcpu->arch.dbg_reg.iac2 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301693 break;
1694#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1695 case KVM_REG_PPC_IAC3:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001696 vcpu->arch.dbg_reg.iac3 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301697 break;
1698 case KVM_REG_PPC_IAC4:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001699 vcpu->arch.dbg_reg.iac4 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301700 break;
1701#endif
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001702 case KVM_REG_PPC_DAC1:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001703 vcpu->arch.dbg_reg.dac1 = set_reg_val(id, *val);
Bharat Bhushan547465e2013-07-04 12:27:46 +05301704 break;
Mihai Caraman35b299e2013-04-11 00:03:07 +00001705 case KVM_REG_PPC_DAC2:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001706 vcpu->arch.dbg_reg.dac2 = set_reg_val(id, *val);
Bharat Bhushan2c509672014-08-06 12:08:56 +05301707 break;
Alexander Graf324b3e62013-01-04 18:28:51 +01001708 case KVM_REG_PPC_EPR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001709 u32 new_epr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001710 kvmppc_set_epr(vcpu, new_epr);
Alexander Graf324b3e62013-01-04 18:28:51 +01001711 break;
1712 }
Mihai Caraman352df1d2012-10-11 06:13:29 +00001713#if defined(CONFIG_64BIT)
1714 case KVM_REG_PPC_EPCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001715 u32 new_epcr = set_reg_val(id, *val);
Mihai Caraman35b299e2013-04-11 00:03:07 +00001716 kvmppc_set_epcr(vcpu, new_epcr);
Mihai Caraman352df1d2012-10-11 06:13:29 +00001717 break;
1718 }
1719#endif
Bharat Bhushan78accda2013-02-24 18:57:12 +00001720 case KVM_REG_PPC_OR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001721 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001722 kvmppc_set_tsr_bits(vcpu, tsr_bits);
1723 break;
1724 }
1725 case KVM_REG_PPC_CLEAR_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001726 u32 tsr_bits = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001727 kvmppc_clr_tsr_bits(vcpu, tsr_bits);
1728 break;
1729 }
1730 case KVM_REG_PPC_TSR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001731 u32 tsr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001732 kvmppc_set_tsr(vcpu, tsr);
1733 break;
1734 }
1735 case KVM_REG_PPC_TCR: {
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001736 u32 tcr = set_reg_val(id, *val);
Bharat Bhushan78accda2013-02-24 18:57:12 +00001737 kvmppc_set_tcr(vcpu, tcr);
1738 break;
1739 }
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001740 case KVM_REG_PPC_VRSAVE:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001741 vcpu->arch.vrsave = set_reg_val(id, *val);
Paul Mackerras8b75cbb2013-09-20 14:52:37 +10001742 break;
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001743 default:
Mihai Caraman8a41ea52014-08-20 16:36:24 +03001744 r = vcpu->kvm->arch.kvm_ops->set_one_reg(vcpu, id, val);
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001745 break;
1746 }
Mihai Caraman35b299e2013-04-11 00:03:07 +00001747
Bharat Bhushan6df8d3f2012-08-08 21:17:55 +00001748 return r;
Paul Mackerras31f34382011-12-12 12:26:50 +00001749}
1750
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001751int kvm_arch_vcpu_ioctl_get_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1752{
1753 return -ENOTSUPP;
1754}
1755
1756int kvm_arch_vcpu_ioctl_set_fpu(struct kvm_vcpu *vcpu, struct kvm_fpu *fpu)
1757{
1758 return -ENOTSUPP;
1759}
1760
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001761int kvm_arch_vcpu_ioctl_translate(struct kvm_vcpu *vcpu,
1762 struct kvm_translation *tr)
1763{
Avi Kivity98001d82010-05-13 11:05:49 +03001764 int r;
1765
Avi Kivity98001d82010-05-13 11:05:49 +03001766 r = kvmppc_core_vcpu_translate(vcpu, tr);
Avi Kivity98001d82010-05-13 11:05:49 +03001767 return r;
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001768}
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06001769
Alexander Graf4e755752009-10-30 05:47:01 +00001770int kvm_vm_ioctl_get_dirty_log(struct kvm *kvm, struct kvm_dirty_log *log)
1771{
1772 return -ENOTSUPP;
1773}
1774
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301775void kvmppc_core_free_memslot(struct kvm *kvm, struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001776 struct kvm_memory_slot *dont)
1777{
1778}
1779
Aneesh Kumar K.V55870272013-10-07 22:18:00 +05301780int kvmppc_core_create_memslot(struct kvm *kvm, struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001781 unsigned long npages)
1782{
1783 return 0;
1784}
1785
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001786int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +00001787 struct kvm_memory_slot *memslot,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001788 const struct kvm_userspace_memory_region *mem)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001789{
1790 return 0;
1791}
1792
1793void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paolo Bonzini09170a42015-05-18 13:59:39 +02001794 const struct kvm_userspace_memory_region *mem,
Paolo Bonzinif36f3f22015-05-18 13:20:23 +02001795 const struct kvm_memory_slot *old,
1796 const struct kvm_memory_slot *new)
Paul Mackerrasdfe49db2012-09-11 13:28:18 +00001797{
1798}
1799
1800void kvmppc_core_flush_memslot(struct kvm *kvm, struct kvm_memory_slot *memslot)
Paul Mackerrasf9e05542011-06-29 00:19:22 +00001801{
1802}
1803
Mihai Caraman38f98822012-10-11 06:13:27 +00001804void kvmppc_set_epcr(struct kvm_vcpu *vcpu, u32 new_epcr)
1805{
1806#if defined(CONFIG_64BIT)
1807 vcpu->arch.epcr = new_epcr;
1808#ifdef CONFIG_KVM_BOOKE_HV
1809 vcpu->arch.shadow_epcr &= ~SPRN_EPCR_GICM;
1810 if (vcpu->arch.epcr & SPRN_EPCR_ICM)
1811 vcpu->arch.shadow_epcr |= SPRN_EPCR_GICM;
1812#endif
1813#endif
1814}
1815
Scott Wooddfd4d472011-11-17 12:39:59 +00001816void kvmppc_set_tcr(struct kvm_vcpu *vcpu, u32 new_tcr)
1817{
1818 vcpu->arch.tcr = new_tcr;
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001819 arm_next_watchdog(vcpu);
Scott Wooddfd4d472011-11-17 12:39:59 +00001820 update_timer_ints(vcpu);
1821}
1822
1823void kvmppc_set_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1824{
1825 set_bits(tsr_bits, &vcpu->arch.tsr);
1826 smp_wmb();
1827 kvm_make_request(KVM_REQ_PENDING_TIMER, vcpu);
1828 kvm_vcpu_kick(vcpu);
1829}
1830
1831void kvmppc_clr_tsr_bits(struct kvm_vcpu *vcpu, u32 tsr_bits)
1832{
1833 clear_bits(tsr_bits, &vcpu->arch.tsr);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +00001834
1835 /*
1836 * We may have stopped the watchdog due to
1837 * being stuck on final expiration.
1838 */
1839 if (tsr_bits & (TSR_ENW | TSR_WIS))
1840 arm_next_watchdog(vcpu);
1841
Scott Wooddfd4d472011-11-17 12:39:59 +00001842 update_timer_ints(vcpu);
1843}
1844
Mihai Caramand02d4d12014-09-01 17:19:56 +03001845void kvmppc_decrementer_func(struct kvm_vcpu *vcpu)
Scott Wooddfd4d472011-11-17 12:39:59 +00001846{
Bharat Bhushan21bd0002012-05-20 23:21:23 +00001847 if (vcpu->arch.tcr & TCR_ARE) {
1848 vcpu->arch.dec = vcpu->arch.decar;
1849 kvmppc_emulate_dec(vcpu);
1850 }
1851
Scott Wooddfd4d472011-11-17 12:39:59 +00001852 kvmppc_set_tsr_bits(vcpu, TSR_DIS);
1853}
1854
Bharat Bhushance11e482013-07-04 12:27:47 +05301855static int kvmppc_booke_add_breakpoint(struct debug_reg *dbg_reg,
1856 uint64_t addr, int index)
1857{
1858 switch (index) {
1859 case 0:
1860 dbg_reg->dbcr0 |= DBCR0_IAC1;
1861 dbg_reg->iac1 = addr;
1862 break;
1863 case 1:
1864 dbg_reg->dbcr0 |= DBCR0_IAC2;
1865 dbg_reg->iac2 = addr;
1866 break;
1867#if CONFIG_PPC_ADV_DEBUG_IACS > 2
1868 case 2:
1869 dbg_reg->dbcr0 |= DBCR0_IAC3;
1870 dbg_reg->iac3 = addr;
1871 break;
1872 case 3:
1873 dbg_reg->dbcr0 |= DBCR0_IAC4;
1874 dbg_reg->iac4 = addr;
1875 break;
1876#endif
1877 default:
1878 return -EINVAL;
1879 }
1880
1881 dbg_reg->dbcr0 |= DBCR0_IDM;
1882 return 0;
1883}
1884
1885static int kvmppc_booke_add_watchpoint(struct debug_reg *dbg_reg, uint64_t addr,
1886 int type, int index)
1887{
1888 switch (index) {
1889 case 0:
1890 if (type & KVMPPC_DEBUG_WATCH_READ)
1891 dbg_reg->dbcr0 |= DBCR0_DAC1R;
1892 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1893 dbg_reg->dbcr0 |= DBCR0_DAC1W;
1894 dbg_reg->dac1 = addr;
1895 break;
1896 case 1:
1897 if (type & KVMPPC_DEBUG_WATCH_READ)
1898 dbg_reg->dbcr0 |= DBCR0_DAC2R;
1899 if (type & KVMPPC_DEBUG_WATCH_WRITE)
1900 dbg_reg->dbcr0 |= DBCR0_DAC2W;
1901 dbg_reg->dac2 = addr;
1902 break;
1903 default:
1904 return -EINVAL;
1905 }
1906
1907 dbg_reg->dbcr0 |= DBCR0_IDM;
1908 return 0;
1909}
1910void kvm_guest_protect_msr(struct kvm_vcpu *vcpu, ulong prot_bitmap, bool set)
1911{
1912 /* XXX: Add similar MSR protection for BookE-PR */
1913#ifdef CONFIG_KVM_BOOKE_HV
1914 BUG_ON(prot_bitmap & ~(MSRP_UCLEP | MSRP_DEP | MSRP_PMMP));
1915 if (set) {
1916 if (prot_bitmap & MSR_UCLE)
1917 vcpu->arch.shadow_msrp |= MSRP_UCLEP;
1918 if (prot_bitmap & MSR_DE)
1919 vcpu->arch.shadow_msrp |= MSRP_DEP;
1920 if (prot_bitmap & MSR_PMM)
1921 vcpu->arch.shadow_msrp |= MSRP_PMMP;
1922 } else {
1923 if (prot_bitmap & MSR_UCLE)
1924 vcpu->arch.shadow_msrp &= ~MSRP_UCLEP;
1925 if (prot_bitmap & MSR_DE)
1926 vcpu->arch.shadow_msrp &= ~MSRP_DEP;
1927 if (prot_bitmap & MSR_PMM)
1928 vcpu->arch.shadow_msrp &= ~MSRP_PMMP;
1929 }
1930#endif
1931}
1932
Alexander Graf7d15c062014-06-20 13:52:36 +02001933int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr, enum xlate_instdata xlid,
1934 enum xlate_readwrite xlrw, struct kvmppc_pte *pte)
1935{
1936 int gtlb_index;
1937 gpa_t gpaddr;
1938
1939#ifdef CONFIG_KVM_E500V2
1940 if (!(vcpu->arch.shared->msr & MSR_PR) &&
1941 (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
1942 pte->eaddr = eaddr;
1943 pte->raddr = (vcpu->arch.magic_page_pa & PAGE_MASK) |
1944 (eaddr & ~PAGE_MASK);
1945 pte->vpage = eaddr >> PAGE_SHIFT;
1946 pte->may_read = true;
1947 pte->may_write = true;
1948 pte->may_execute = true;
1949
1950 return 0;
1951 }
1952#endif
1953
1954 /* Check the guest TLB. */
1955 switch (xlid) {
1956 case XLATE_INST:
1957 gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
1958 break;
1959 case XLATE_DATA:
1960 gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
1961 break;
1962 default:
1963 BUG();
1964 }
1965
1966 /* Do we have a TLB entry at all? */
1967 if (gtlb_index < 0)
1968 return -ENOENT;
1969
1970 gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
1971
1972 pte->eaddr = eaddr;
1973 pte->raddr = (gpaddr & PAGE_MASK) | (eaddr & ~PAGE_MASK);
1974 pte->vpage = eaddr >> PAGE_SHIFT;
1975
1976 /* XXX read permissions from the guest TLB */
1977 pte->may_read = true;
1978 pte->may_write = true;
1979 pte->may_execute = true;
1980
1981 return 0;
1982}
1983
Bharat Bhushance11e482013-07-04 12:27:47 +05301984int kvm_arch_vcpu_ioctl_set_guest_debug(struct kvm_vcpu *vcpu,
1985 struct kvm_guest_debug *dbg)
1986{
1987 struct debug_reg *dbg_reg;
1988 int n, b = 0, w = 0;
1989
1990 if (!(dbg->control & KVM_GUESTDBG_ENABLE)) {
Bharat Bhushan348ba712014-08-06 12:08:55 +05301991 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05301992 vcpu->guest_debug = 0;
1993 kvm_guest_protect_msr(vcpu, MSR_DE, false);
1994 return 0;
1995 }
1996
1997 kvm_guest_protect_msr(vcpu, MSR_DE, true);
1998 vcpu->guest_debug = dbg->control;
Bharat Bhushan348ba712014-08-06 12:08:55 +05301999 vcpu->arch.dbg_reg.dbcr0 = 0;
Bharat Bhushance11e482013-07-04 12:27:47 +05302000
2001 if (vcpu->guest_debug & KVM_GUESTDBG_SINGLESTEP)
Bharat Bhushan348ba712014-08-06 12:08:55 +05302002 vcpu->arch.dbg_reg.dbcr0 |= DBCR0_IDM | DBCR0_IC;
Bharat Bhushance11e482013-07-04 12:27:47 +05302003
2004 /* Code below handles only HW breakpoints */
Bharat Bhushan348ba712014-08-06 12:08:55 +05302005 dbg_reg = &(vcpu->arch.dbg_reg);
Bharat Bhushance11e482013-07-04 12:27:47 +05302006
2007#ifdef CONFIG_KVM_BOOKE_HV
2008 /*
2009 * On BookE-HV (e500mc) the guest is always executed with MSR.GS=1
2010 * DBCR1 and DBCR2 are set to trigger debug events when MSR.PR is 0
2011 */
2012 dbg_reg->dbcr1 = 0;
2013 dbg_reg->dbcr2 = 0;
2014#else
2015 /*
2016 * On BookE-PR (e500v2) the guest is always executed with MSR.PR=1
2017 * We set DBCR1 and DBCR2 to only trigger debug events when MSR.PR
2018 * is set.
2019 */
2020 dbg_reg->dbcr1 = DBCR1_IAC1US | DBCR1_IAC2US | DBCR1_IAC3US |
2021 DBCR1_IAC4US;
2022 dbg_reg->dbcr2 = DBCR2_DAC1US | DBCR2_DAC2US;
2023#endif
2024
2025 if (!(vcpu->guest_debug & KVM_GUESTDBG_USE_HW_BP))
2026 return 0;
2027
2028 for (n = 0; n < (KVMPPC_BOOKE_IAC_NUM + KVMPPC_BOOKE_DAC_NUM); n++) {
2029 uint64_t addr = dbg->arch.bp[n].addr;
2030 uint32_t type = dbg->arch.bp[n].type;
2031
2032 if (type == KVMPPC_DEBUG_NONE)
2033 continue;
2034
2035 if (type & !(KVMPPC_DEBUG_WATCH_READ |
2036 KVMPPC_DEBUG_WATCH_WRITE |
2037 KVMPPC_DEBUG_BREAKPOINT))
2038 return -EINVAL;
2039
2040 if (type & KVMPPC_DEBUG_BREAKPOINT) {
2041 /* Setting H/W breakpoint */
2042 if (kvmppc_booke_add_breakpoint(dbg_reg, addr, b++))
2043 return -EINVAL;
2044 } else {
2045 /* Setting H/W watchpoint */
2046 if (kvmppc_booke_add_watchpoint(dbg_reg, addr,
2047 type, w++))
2048 return -EINVAL;
2049 }
2050 }
2051
2052 return 0;
2053}
2054
Scott Wood94fa9d92011-12-20 15:34:22 +00002055void kvmppc_booke_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2056{
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002057 vcpu->cpu = smp_processor_id();
Scott Woodd30f6e42011-12-20 15:34:43 +00002058 current->thread.kvm_vcpu = vcpu;
Scott Wood94fa9d92011-12-20 15:34:22 +00002059}
2060
2061void kvmppc_booke_vcpu_put(struct kvm_vcpu *vcpu)
2062{
Scott Woodd30f6e42011-12-20 15:34:43 +00002063 current->thread.kvm_vcpu = NULL;
Paul Mackerrasa47d72f2012-09-20 19:35:51 +00002064 vcpu->cpu = -1;
Bharat Bhushance11e482013-07-04 12:27:47 +05302065
2066 /* Clear pending debug event in DBSR */
2067 kvmppc_clear_dbsr();
Scott Wood94fa9d92011-12-20 15:34:22 +00002068}
2069
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302070void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu)
2071{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302072 vcpu->kvm->arch.kvm_ops->mmu_destroy(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302073}
2074
2075int kvmppc_core_init_vm(struct kvm *kvm)
2076{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302077 return kvm->arch.kvm_ops->init_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302078}
2079
2080struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm, unsigned int id)
2081{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302082 return kvm->arch.kvm_ops->vcpu_create(kvm, id);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302083}
2084
2085void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu)
2086{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302087 vcpu->kvm->arch.kvm_ops->vcpu_free(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302088}
2089
2090void kvmppc_core_destroy_vm(struct kvm *kvm)
2091{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302092 kvm->arch.kvm_ops->destroy_vm(kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302093}
2094
2095void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
2096{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302097 vcpu->kvm->arch.kvm_ops->vcpu_load(vcpu, cpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +05302098}
2099
2100void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu)
2101{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +05302102 vcpu->kvm->arch.kvm_ops->vcpu_put(vcpu);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002103}
2104
2105int __init kvmppc_booke_init(void)
2106{
Scott Woodd30f6e42011-12-20 15:34:43 +00002107#ifndef CONFIG_KVM_BOOKE_HV
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002108 unsigned long ivor[16];
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002109 unsigned long *handler = kvmppc_booke_handler_addr;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002110 unsigned long max_ivor = 0;
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002111 unsigned long handler_len;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002112 int i;
2113
2114 /* We install our own exception handlers by hijacking IVPR. IVPR must
2115 * be 16-bit aligned, so we need a 64KB allocation. */
2116 kvmppc_booke_handlers = __get_free_pages(GFP_KERNEL | __GFP_ZERO,
2117 VCPU_SIZE_ORDER);
2118 if (!kvmppc_booke_handlers)
2119 return -ENOMEM;
2120
2121 /* XXX make sure our handlers are smaller than Linux's */
2122
2123 /* Copy our interrupt handlers to match host IVORs. That way we don't
2124 * have to swap the IVORs on every guest/host transition. */
2125 ivor[0] = mfspr(SPRN_IVOR0);
2126 ivor[1] = mfspr(SPRN_IVOR1);
2127 ivor[2] = mfspr(SPRN_IVOR2);
2128 ivor[3] = mfspr(SPRN_IVOR3);
2129 ivor[4] = mfspr(SPRN_IVOR4);
2130 ivor[5] = mfspr(SPRN_IVOR5);
2131 ivor[6] = mfspr(SPRN_IVOR6);
2132 ivor[7] = mfspr(SPRN_IVOR7);
2133 ivor[8] = mfspr(SPRN_IVOR8);
2134 ivor[9] = mfspr(SPRN_IVOR9);
2135 ivor[10] = mfspr(SPRN_IVOR10);
2136 ivor[11] = mfspr(SPRN_IVOR11);
2137 ivor[12] = mfspr(SPRN_IVOR12);
2138 ivor[13] = mfspr(SPRN_IVOR13);
2139 ivor[14] = mfspr(SPRN_IVOR14);
2140 ivor[15] = mfspr(SPRN_IVOR15);
2141
2142 for (i = 0; i < 16; i++) {
2143 if (ivor[i] > max_ivor)
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002144 max_ivor = i;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002145
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002146 handler_len = handler[i + 1] - handler[i];
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002147 memcpy((void *)kvmppc_booke_handlers + ivor[i],
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002148 (void *)handler[i], handler_len);
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002149 }
Bharat Bhushan1d542d92013-01-15 22:24:39 +00002150
2151 handler_len = handler[max_ivor + 1] - handler[max_ivor];
2152 flush_icache_range(kvmppc_booke_handlers, kvmppc_booke_handlers +
2153 ivor[max_ivor] + handler_len);
Scott Woodd30f6e42011-12-20 15:34:43 +00002154#endif /* !BOOKE_HV */
Hollis Blancharddb93f572008-11-05 09:36:18 -06002155 return 0;
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002156}
2157
Hollis Blancharddb93f572008-11-05 09:36:18 -06002158void __exit kvmppc_booke_exit(void)
Hollis Blanchardd9fbd032008-11-05 09:36:13 -06002159{
2160 free_pages(kvmppc_booke_handlers, VCPU_SIZE_ORDER);
2161 kvm_exit();
2162}