blob: e9fa56a911fdb2fec27ef9d6b90d9f7a3591ed9b [file] [log] [blame]
Scott Woodd30f6e42011-12-20 15:34:43 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright (C) 2010-2011 Freescale Semiconductor, Inc.
16 *
17 * Author: Varun Sethi <varun.sethi@freescale.com>
18 * Author: Scott Wood <scotwood@freescale.com>
Mihai Caramane51f8f32012-10-11 06:13:21 +000019 * Author: Mihai Caraman <mihai.caraman@freescale.com>
Scott Woodd30f6e42011-12-20 15:34:43 +000020 *
21 * This file is derived from arch/powerpc/kvm/booke_interrupts.S
22 */
23
24#include <asm/ppc_asm.h>
25#include <asm/kvm_asm.h>
26#include <asm/reg.h>
Scott Woodd30f6e42011-12-20 15:34:43 +000027#include <asm/page.h>
28#include <asm/asm-compat.h>
29#include <asm/asm-offsets.h>
30#include <asm/bitsperlong.h>
31
Mihai Caramane51f8f32012-10-11 06:13:21 +000032#ifdef CONFIG_64BIT
33#include <asm/exception-64e.h>
Tiejun Chen9bd880a2013-10-23 09:26:48 +080034#include <asm/hw_irq.h>
35#include <asm/irqflags.h>
Mihai Caramane51f8f32012-10-11 06:13:21 +000036#else
Scott Woodd30f6e42011-12-20 15:34:43 +000037#include "../kernel/head_booke.h" /* for THREAD_NORMSAVE() */
Mihai Caramane51f8f32012-10-11 06:13:21 +000038#endif
Scott Woodd30f6e42011-12-20 15:34:43 +000039
Scott Woodd30f6e42011-12-20 15:34:43 +000040#define LONGBYTES (BITS_PER_LONG / 8)
41
Scott Woodd30f6e42011-12-20 15:34:43 +000042#define VCPU_GUEST_SPRG(n) (VCPU_GUEST_SPRGS + (n * LONGBYTES))
43
44/* The host stack layout: */
Mihai Caramane51f8f32012-10-11 06:13:21 +000045#define HOST_R1 0 /* Implied by stwu. */
46#define HOST_CALLEE_LR PPC_LR_STKOFF
47#define HOST_RUN (HOST_CALLEE_LR + LONGBYTES)
Scott Woodd30f6e42011-12-20 15:34:43 +000048/*
49 * r2 is special: it holds 'current', and it made nonvolatile in the
50 * kernel with the -ffixed-r2 gcc option.
51 */
Mihai Caramane51f8f32012-10-11 06:13:21 +000052#define HOST_R2 (HOST_RUN + LONGBYTES)
53#define HOST_CR (HOST_R2 + LONGBYTES)
54#define HOST_NV_GPRS (HOST_CR + LONGBYTES)
Alexander Graf38df8502012-07-24 13:02:34 +000055#define __HOST_NV_GPR(n) (HOST_NV_GPRS + ((n - 14) * LONGBYTES))
56#define HOST_NV_GPR(n) __HOST_NV_GPR(__REG_##n)
57#define HOST_MIN_STACK_SIZE (HOST_NV_GPR(R31) + LONGBYTES)
Scott Woodd30f6e42011-12-20 15:34:43 +000058#define HOST_STACK_SIZE ((HOST_MIN_STACK_SIZE + 15) & ~15) /* Align. */
Mihai Caramane51f8f32012-10-11 06:13:21 +000059/* LR in caller stack frame. */
60#define HOST_STACK_LR (HOST_STACK_SIZE + PPC_LR_STKOFF)
Scott Woodd30f6e42011-12-20 15:34:43 +000061
62#define NEED_EMU 0x00000001 /* emulation -- save nv regs */
63#define NEED_DEAR 0x00000002 /* save faulting DEAR */
64#define NEED_ESR 0x00000004 /* save faulting ESR */
65
66/*
67 * On entry:
68 * r4 = vcpu, r5 = srr0, r6 = srr1
69 * saved in vcpu: cr, ctr, r3-r13
70 */
71.macro kvm_handler_common intno, srr0, flags
Alexander Grafa2723ce2012-02-15 23:06:24 +000072 /* Restore host stack pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +000073 PPC_STL r1, VCPU_GPR(R1)(r4)
74 PPC_STL r2, VCPU_GPR(R2)(r4)
Alexander Grafa2723ce2012-02-15 23:06:24 +000075 PPC_LL r1, VCPU_HOST_STACK(r4)
76 PPC_LL r2, HOST_R2(r1)
77
Scott Woodd30f6e42011-12-20 15:34:43 +000078 mfspr r10, SPRN_PID
79 lwz r8, VCPU_HOST_PID(r4)
80 PPC_LL r11, VCPU_SHARED(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +000081 PPC_STL r14, VCPU_GPR(R14)(r4) /* We need a non-volatile GPR. */
Scott Woodd30f6e42011-12-20 15:34:43 +000082 li r14, \intno
83
84 stw r10, VCPU_GUEST_PID(r4)
85 mtspr SPRN_PID, r8
86
Scott Woodd30f6e42011-12-20 15:34:43 +000087#ifdef CONFIG_KVM_EXIT_TIMING
88 /* save exit time */
891: mfspr r7, SPRN_TBRU
90 mfspr r8, SPRN_TBRL
91 mfspr r9, SPRN_TBRU
92 cmpw r9, r7
Mihai Caraman518f0402012-04-16 04:08:54 +000093 stw r8, VCPU_TIMING_EXIT_TBL(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +000094 bne- 1b
Mihai Caraman518f0402012-04-16 04:08:54 +000095 stw r9, VCPU_TIMING_EXIT_TBU(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +000096#endif
97
98 oris r8, r6, MSR_CE@h
Varun Sethi185e4182012-04-25 01:26:43 +000099 PPC_STD(r6, VCPU_SHARED_MSR, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000100 ori r8, r8, MSR_ME | MSR_RI
101 PPC_STL r5, VCPU_PC(r4)
102
103 /*
104 * Make sure CE/ME/RI are set (if appropriate for exception type)
105 * whether or not the guest had it set. Since mfmsr/mtmsr are
106 * somewhat expensive, skip in the common case where the guest
107 * had all these bits set (and thus they're still set if
108 * appropriate for the exception type).
109 */
110 cmpw r6, r8
Scott Woodd30f6e42011-12-20 15:34:43 +0000111 beq 1f
112 mfmsr r7
113 .if \srr0 != SPRN_MCSRR0 && \srr0 != SPRN_CSRR0
114 oris r7, r7, MSR_CE@h
115 .endif
116 .if \srr0 != SPRN_MCSRR0
117 ori r7, r7, MSR_ME | MSR_RI
118 .endif
119 mtmsr r7
1201:
121
122 .if \flags & NEED_EMU
Michael Neulingc75df6f2012-06-25 13:33:10 +0000123 PPC_STL r15, VCPU_GPR(R15)(r4)
124 PPC_STL r16, VCPU_GPR(R16)(r4)
125 PPC_STL r17, VCPU_GPR(R17)(r4)
126 PPC_STL r18, VCPU_GPR(R18)(r4)
127 PPC_STL r19, VCPU_GPR(R19)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000128 PPC_STL r20, VCPU_GPR(R20)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000129 PPC_STL r21, VCPU_GPR(R21)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000130 PPC_STL r22, VCPU_GPR(R22)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000131 PPC_STL r23, VCPU_GPR(R23)(r4)
132 PPC_STL r24, VCPU_GPR(R24)(r4)
133 PPC_STL r25, VCPU_GPR(R25)(r4)
134 PPC_STL r26, VCPU_GPR(R26)(r4)
135 PPC_STL r27, VCPU_GPR(R27)(r4)
136 PPC_STL r28, VCPU_GPR(R28)(r4)
137 PPC_STL r29, VCPU_GPR(R29)(r4)
138 PPC_STL r30, VCPU_GPR(R30)(r4)
139 PPC_STL r31, VCPU_GPR(R31)(r4)
Mihai Caramanf5250472014-07-23 19:06:22 +0300140
141 /*
142 * We don't use external PID support. lwepx faults would need to be
143 * handled by KVM and this implies aditional code in DO_KVM (for
144 * DTB_MISS, DSI and LRAT) to check ESR[EPID] and EPLC[EGS] which
145 * is too intrusive for the host. Get last instuction in
146 * kvmppc_get_last_inst().
147 */
148 li r9, KVM_INST_FETCH_FAILED
Scott Woodd30f6e42011-12-20 15:34:43 +0000149 stw r9, VCPU_LAST_INST(r4)
150 .endif
151
152 .if \flags & NEED_ESR
153 mfspr r8, SPRN_ESR
154 PPC_STL r8, VCPU_FAULT_ESR(r4)
155 .endif
156
157 .if \flags & NEED_DEAR
158 mfspr r9, SPRN_DEAR
159 PPC_STL r9, VCPU_FAULT_DEAR(r4)
160 .endif
161
162 b kvmppc_resume_host
163.endm
164
Mihai Caramane51f8f32012-10-11 06:13:21 +0000165#ifdef CONFIG_64BIT
166/* Exception types */
167#define EX_GEN 1
168#define EX_GDBELL 2
169#define EX_DBG 3
170#define EX_MC 4
171#define EX_CRIT 5
172#define EX_TLB 6
173
174/*
175 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
176 */
177.macro kvm_handler intno type scratch, paca_ex, ex_r10, ex_r11, srr0, srr1, flags
178 _GLOBAL(kvmppc_handler_\intno\()_\srr1)
179 mr r11, r4
180 /*
181 * Get vcpu from Paca: paca->__current.thread->kvm_vcpu
182 */
183 PPC_LL r4, PACACURRENT(r13)
184 PPC_LL r4, (THREAD + THREAD_KVM_VCPU)(r4)
185 stw r10, VCPU_CR(r4)
186 PPC_STL r11, VCPU_GPR(R4)(r4)
187 PPC_STL r5, VCPU_GPR(R5)(r4)
Mihai Caramane51f8f32012-10-11 06:13:21 +0000188 PPC_STL r6, VCPU_GPR(R6)(r4)
189 PPC_STL r8, VCPU_GPR(R8)(r4)
190 PPC_STL r9, VCPU_GPR(R9)(r4)
Scott Wooda3dc6202014-03-10 17:29:38 -0500191 .if \type == EX_TLB
192 PPC_LL r5, EX_TLB_R13(r12)
193 PPC_LL r6, EX_TLB_R10(r12)
194 PPC_LL r8, EX_TLB_R11(r12)
195 mfspr r12, \scratch
196 .else
197 mfspr r5, \scratch
Mihai Caramane51f8f32012-10-11 06:13:21 +0000198 PPC_LL r6, (\paca_ex + \ex_r10)(r13)
199 PPC_LL r8, (\paca_ex + \ex_r11)(r13)
Scott Wooda3dc6202014-03-10 17:29:38 -0500200 .endif
201 PPC_STL r5, VCPU_GPR(R13)(r4)
Mihai Caramane51f8f32012-10-11 06:13:21 +0000202 PPC_STL r3, VCPU_GPR(R3)(r4)
203 PPC_STL r7, VCPU_GPR(R7)(r4)
204 PPC_STL r12, VCPU_GPR(R12)(r4)
205 PPC_STL r6, VCPU_GPR(R10)(r4)
206 PPC_STL r8, VCPU_GPR(R11)(r4)
207 mfctr r5
208 PPC_STL r5, VCPU_CTR(r4)
209 mfspr r5, \srr0
210 mfspr r6, \srr1
211 kvm_handler_common \intno, \srr0, \flags
212.endm
213
214#define EX_PARAMS(type) \
215 EX_##type, \
216 SPRN_SPRG_##type##_SCRATCH, \
217 PACA_EX##type, \
218 EX_R10, \
219 EX_R11
220
221#define EX_PARAMS_TLB \
222 EX_TLB, \
223 SPRN_SPRG_GEN_SCRATCH, \
224 PACA_EXTLB, \
225 EX_TLB_R10, \
226 EX_TLB_R11
227
228kvm_handler BOOKE_INTERRUPT_CRITICAL, EX_PARAMS(CRIT), \
229 SPRN_CSRR0, SPRN_CSRR1, 0
230kvm_handler BOOKE_INTERRUPT_MACHINE_CHECK, EX_PARAMS(MC), \
231 SPRN_MCSRR0, SPRN_MCSRR1, 0
232kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, EX_PARAMS(GEN), \
233 SPRN_SRR0, SPRN_SRR1,(NEED_EMU | NEED_DEAR | NEED_ESR)
234kvm_handler BOOKE_INTERRUPT_INST_STORAGE, EX_PARAMS(GEN), \
235 SPRN_SRR0, SPRN_SRR1, NEED_ESR
236kvm_handler BOOKE_INTERRUPT_EXTERNAL, EX_PARAMS(GEN), \
237 SPRN_SRR0, SPRN_SRR1, 0
238kvm_handler BOOKE_INTERRUPT_ALIGNMENT, EX_PARAMS(GEN), \
239 SPRN_SRR0, SPRN_SRR1,(NEED_DEAR | NEED_ESR)
240kvm_handler BOOKE_INTERRUPT_PROGRAM, EX_PARAMS(GEN), \
241 SPRN_SRR0, SPRN_SRR1,NEED_ESR
242kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, EX_PARAMS(GEN), \
243 SPRN_SRR0, SPRN_SRR1, 0
244kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, EX_PARAMS(GEN), \
245 SPRN_SRR0, SPRN_SRR1, 0
246kvm_handler BOOKE_INTERRUPT_DECREMENTER, EX_PARAMS(GEN), \
247 SPRN_SRR0, SPRN_SRR1, 0
248kvm_handler BOOKE_INTERRUPT_FIT, EX_PARAMS(GEN), \
249 SPRN_SRR0, SPRN_SRR1, 0
250kvm_handler BOOKE_INTERRUPT_WATCHDOG, EX_PARAMS(CRIT),\
251 SPRN_CSRR0, SPRN_CSRR1, 0
252/*
253 * Only bolted TLB miss exception handlers are supported for now
254 */
255kvm_handler BOOKE_INTERRUPT_DTLB_MISS, EX_PARAMS_TLB, \
256 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
257kvm_handler BOOKE_INTERRUPT_ITLB_MISS, EX_PARAMS_TLB, \
258 SPRN_SRR0, SPRN_SRR1, 0
259kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, EX_PARAMS(GEN), \
260 SPRN_SRR0, SPRN_SRR1, 0
261kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, EX_PARAMS(GEN), \
262 SPRN_SRR0, SPRN_SRR1, 0
263kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, EX_PARAMS(GEN), \
264 SPRN_SRR0, SPRN_SRR1, 0
265kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, EX_PARAMS(GEN), \
266 SPRN_SRR0, SPRN_SRR1, 0
267kvm_handler BOOKE_INTERRUPT_DOORBELL, EX_PARAMS(GEN), \
268 SPRN_SRR0, SPRN_SRR1, 0
269kvm_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, EX_PARAMS(CRIT), \
270 SPRN_CSRR0, SPRN_CSRR1, 0
271kvm_handler BOOKE_INTERRUPT_HV_PRIV, EX_PARAMS(GEN), \
272 SPRN_SRR0, SPRN_SRR1, NEED_EMU
273kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, EX_PARAMS(GEN), \
274 SPRN_SRR0, SPRN_SRR1, 0
275kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, EX_PARAMS(GDBELL), \
276 SPRN_GSRR0, SPRN_GSRR1, 0
277kvm_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, EX_PARAMS(CRIT), \
278 SPRN_CSRR0, SPRN_CSRR1, 0
279kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(DBG), \
280 SPRN_DSRR0, SPRN_DSRR1, 0
281kvm_handler BOOKE_INTERRUPT_DEBUG, EX_PARAMS(CRIT), \
282 SPRN_CSRR0, SPRN_CSRR1, 0
Mihai Caraman228b1a42013-08-08 15:56:09 +0300283kvm_handler BOOKE_INTERRUPT_LRAT_ERROR, EX_PARAMS(GEN), \
284 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
Mihai Caramane51f8f32012-10-11 06:13:21 +0000285#else
Scott Woodd30f6e42011-12-20 15:34:43 +0000286/*
287 * For input register values, see arch/powerpc/include/asm/kvm_booke_hv_asm.h
288 */
289.macro kvm_handler intno srr0, srr1, flags
290_GLOBAL(kvmppc_handler_\intno\()_\srr1)
Mihai Caramanff594742012-10-11 06:13:20 +0000291 PPC_LL r11, THREAD_KVM_VCPU(r10)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000292 PPC_STL r3, VCPU_GPR(R3)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000293 mfspr r3, SPRN_SPRG_RSCRATCH0
Michael Neulingc75df6f2012-06-25 13:33:10 +0000294 PPC_STL r4, VCPU_GPR(R4)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000295 PPC_LL r4, THREAD_NORMSAVE(0)(r10)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000296 PPC_STL r5, VCPU_GPR(R5)(r11)
Mihai Caraman518f0402012-04-16 04:08:54 +0000297 stw r13, VCPU_CR(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000298 mfspr r5, \srr0
Michael Neulingc75df6f2012-06-25 13:33:10 +0000299 PPC_STL r3, VCPU_GPR(R10)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000300 PPC_LL r3, THREAD_NORMSAVE(2)(r10)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000301 PPC_STL r6, VCPU_GPR(R6)(r11)
302 PPC_STL r4, VCPU_GPR(R11)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000303 mfspr r6, \srr1
Michael Neulingc75df6f2012-06-25 13:33:10 +0000304 PPC_STL r7, VCPU_GPR(R7)(r11)
305 PPC_STL r8, VCPU_GPR(R8)(r11)
306 PPC_STL r9, VCPU_GPR(R9)(r11)
307 PPC_STL r3, VCPU_GPR(R13)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000308 mfctr r7
Michael Neulingc75df6f2012-06-25 13:33:10 +0000309 PPC_STL r12, VCPU_GPR(R12)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000310 PPC_STL r7, VCPU_CTR(r11)
311 mr r4, r11
312 kvm_handler_common \intno, \srr0, \flags
313.endm
314
315.macro kvm_lvl_handler intno scratch srr0, srr1, flags
316_GLOBAL(kvmppc_handler_\intno\()_\srr1)
317 mfspr r10, SPRN_SPRG_THREAD
Mihai Caramanff594742012-10-11 06:13:20 +0000318 PPC_LL r11, THREAD_KVM_VCPU(r10)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000319 PPC_STL r3, VCPU_GPR(R3)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000320 mfspr r3, \scratch
Michael Neulingc75df6f2012-06-25 13:33:10 +0000321 PPC_STL r4, VCPU_GPR(R4)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000322 PPC_LL r4, GPR9(r8)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000323 PPC_STL r5, VCPU_GPR(R5)(r11)
Mihai Caraman518f0402012-04-16 04:08:54 +0000324 stw r9, VCPU_CR(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000325 mfspr r5, \srr0
Michael Neulingc75df6f2012-06-25 13:33:10 +0000326 PPC_STL r3, VCPU_GPR(R8)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000327 PPC_LL r3, GPR10(r8)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000328 PPC_STL r6, VCPU_GPR(R6)(r11)
329 PPC_STL r4, VCPU_GPR(R9)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000330 mfspr r6, \srr1
331 PPC_LL r4, GPR11(r8)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000332 PPC_STL r7, VCPU_GPR(R7)(r11)
333 PPC_STL r3, VCPU_GPR(R10)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000334 mfctr r7
Michael Neulingc75df6f2012-06-25 13:33:10 +0000335 PPC_STL r12, VCPU_GPR(R12)(r11)
336 PPC_STL r13, VCPU_GPR(R13)(r11)
337 PPC_STL r4, VCPU_GPR(R11)(r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000338 PPC_STL r7, VCPU_CTR(r11)
339 mr r4, r11
340 kvm_handler_common \intno, \srr0, \flags
341.endm
342
343kvm_lvl_handler BOOKE_INTERRUPT_CRITICAL, \
344 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
345kvm_lvl_handler BOOKE_INTERRUPT_MACHINE_CHECK, \
346 SPRN_SPRG_RSCRATCH_MC, SPRN_MCSRR0, SPRN_MCSRR1, 0
347kvm_handler BOOKE_INTERRUPT_DATA_STORAGE, \
Mihai Caraman99977822012-06-22 13:33:12 +0000348 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
Scott Woodd30f6e42011-12-20 15:34:43 +0000349kvm_handler BOOKE_INTERRUPT_INST_STORAGE, SPRN_SRR0, SPRN_SRR1, NEED_ESR
350kvm_handler BOOKE_INTERRUPT_EXTERNAL, SPRN_SRR0, SPRN_SRR1, 0
351kvm_handler BOOKE_INTERRUPT_ALIGNMENT, \
352 SPRN_SRR0, SPRN_SRR1, (NEED_DEAR | NEED_ESR)
353kvm_handler BOOKE_INTERRUPT_PROGRAM, SPRN_SRR0, SPRN_SRR1, NEED_ESR
354kvm_handler BOOKE_INTERRUPT_FP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
355kvm_handler BOOKE_INTERRUPT_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
356kvm_handler BOOKE_INTERRUPT_AP_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
357kvm_handler BOOKE_INTERRUPT_DECREMENTER, SPRN_SRR0, SPRN_SRR1, 0
358kvm_handler BOOKE_INTERRUPT_FIT, SPRN_SRR0, SPRN_SRR1, 0
359kvm_lvl_handler BOOKE_INTERRUPT_WATCHDOG, \
360 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
361kvm_handler BOOKE_INTERRUPT_DTLB_MISS, \
362 SPRN_SRR0, SPRN_SRR1, (NEED_EMU | NEED_DEAR | NEED_ESR)
363kvm_handler BOOKE_INTERRUPT_ITLB_MISS, SPRN_SRR0, SPRN_SRR1, 0
364kvm_handler BOOKE_INTERRUPT_SPE_UNAVAIL, SPRN_SRR0, SPRN_SRR1, 0
365kvm_handler BOOKE_INTERRUPT_SPE_FP_DATA, SPRN_SRR0, SPRN_SRR1, 0
366kvm_handler BOOKE_INTERRUPT_SPE_FP_ROUND, SPRN_SRR0, SPRN_SRR1, 0
367kvm_handler BOOKE_INTERRUPT_PERFORMANCE_MONITOR, SPRN_SRR0, SPRN_SRR1, 0
368kvm_handler BOOKE_INTERRUPT_DOORBELL, SPRN_SRR0, SPRN_SRR1, 0
369kvm_lvl_handler BOOKE_INTERRUPT_DOORBELL_CRITICAL, \
370 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
371kvm_handler BOOKE_INTERRUPT_HV_PRIV, SPRN_SRR0, SPRN_SRR1, NEED_EMU
372kvm_handler BOOKE_INTERRUPT_HV_SYSCALL, SPRN_SRR0, SPRN_SRR1, 0
373kvm_handler BOOKE_INTERRUPT_GUEST_DBELL, SPRN_GSRR0, SPRN_GSRR1, 0
374kvm_lvl_handler BOOKE_INTERRUPT_GUEST_DBELL_CRIT, \
375 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
376kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
377 SPRN_SPRG_RSCRATCH_CRIT, SPRN_CSRR0, SPRN_CSRR1, 0
378kvm_lvl_handler BOOKE_INTERRUPT_DEBUG, \
379 SPRN_SPRG_RSCRATCH_DBG, SPRN_DSRR0, SPRN_DSRR1, 0
Mihai Caramane51f8f32012-10-11 06:13:21 +0000380#endif
Scott Woodd30f6e42011-12-20 15:34:43 +0000381
382/* Registers:
383 * SPRG_SCRATCH0: guest r10
384 * r4: vcpu pointer
385 * r11: vcpu->arch.shared
386 * r14: KVM exit number
387 */
388_GLOBAL(kvmppc_resume_host)
389 /* Save remaining volatile guest register state to vcpu. */
390 mfspr r3, SPRN_VRSAVE
Michael Neulingc75df6f2012-06-25 13:33:10 +0000391 PPC_STL r0, VCPU_GPR(R0)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000392 mflr r5
393 mfspr r6, SPRN_SPRG4
Scott Woodd30f6e42011-12-20 15:34:43 +0000394 PPC_STL r5, VCPU_LR(r4)
395 mfspr r7, SPRN_SPRG5
Mihai Caraman518f0402012-04-16 04:08:54 +0000396 stw r3, VCPU_VRSAVE(r4)
Scott Wood9d378df2014-03-10 17:29:38 -0500397#ifdef CONFIG_64BIT
398 PPC_LL r3, PACA_SPRG_VDSO(r13)
399#endif
Bharat Bhushan99e99d12014-07-21 11:23:26 +0530400 mfspr r5, SPRN_SPRG9
Varun Sethi30124902012-04-25 01:27:34 +0000401 PPC_STD(r6, VCPU_SHARED_SPRG4, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000402 mfspr r8, SPRN_SPRG6
Varun Sethi30124902012-04-25 01:27:34 +0000403 PPC_STD(r7, VCPU_SHARED_SPRG5, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000404 mfspr r9, SPRN_SPRG7
Scott Wood9d378df2014-03-10 17:29:38 -0500405#ifdef CONFIG_64BIT
406 mtspr SPRN_SPRG_VDSO_WRITE, r3
407#endif
Bharat Bhushan99e99d12014-07-21 11:23:26 +0530408 PPC_STD(r5, VCPU_SPRG9, r4)
Varun Sethi30124902012-04-25 01:27:34 +0000409 PPC_STD(r8, VCPU_SHARED_SPRG6, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000410 mfxer r3
Varun Sethi30124902012-04-25 01:27:34 +0000411 PPC_STD(r9, VCPU_SHARED_SPRG7, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000412
413 /* save guest MAS registers and restore host mas4 & mas6 */
414 mfspr r5, SPRN_MAS0
415 PPC_STL r3, VCPU_XER(r4)
416 mfspr r6, SPRN_MAS1
417 stw r5, VCPU_SHARED_MAS0(r11)
418 mfspr r7, SPRN_MAS2
419 stw r6, VCPU_SHARED_MAS1(r11)
Varun Sethi185e4182012-04-25 01:26:43 +0000420 PPC_STD(r7, VCPU_SHARED_MAS2, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000421 mfspr r5, SPRN_MAS3
422 mfspr r6, SPRN_MAS4
423 stw r5, VCPU_SHARED_MAS7_3+4(r11)
424 mfspr r7, SPRN_MAS6
425 stw r6, VCPU_SHARED_MAS4(r11)
426 mfspr r5, SPRN_MAS7
427 lwz r6, VCPU_HOST_MAS4(r4)
428 stw r7, VCPU_SHARED_MAS6(r11)
429 lwz r8, VCPU_HOST_MAS6(r4)
430 mtspr SPRN_MAS4, r6
431 stw r5, VCPU_SHARED_MAS7_3+0(r11)
432 mtspr SPRN_MAS6, r8
Alexander Grafe9ba39c2012-02-16 14:53:04 +0000433 /* Enable MAS register updates via exception */
Scott Woodd30f6e42011-12-20 15:34:43 +0000434 mfspr r3, SPRN_EPCR
435 rlwinm r3, r3, 0, ~SPRN_EPCR_DMIUH
436 mtspr SPRN_EPCR, r3
437 isync
438
Tiejun Chen9bd880a2013-10-23 09:26:48 +0800439#ifdef CONFIG_64BIT
440 /*
441 * We enter with interrupts disabled in hardware, but
442 * we need to call RECONCILE_IRQ_STATE to ensure
443 * that the software state is kept in sync.
444 */
445 RECONCILE_IRQ_STATE(r3,r5)
446#endif
447
Scott Woodd30f6e42011-12-20 15:34:43 +0000448 /* Switch to kernel stack and jump to handler. */
449 PPC_LL r3, HOST_RUN(r1)
450 mr r5, r14 /* intno */
451 mr r14, r4 /* Save vcpu pointer. */
452 bl kvmppc_handle_exit
453
454 /* Restore vcpu pointer and the nonvolatiles we used. */
455 mr r4, r14
Michael Neulingc75df6f2012-06-25 13:33:10 +0000456 PPC_LL r14, VCPU_GPR(R14)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000457
458 andi. r5, r3, RESUME_FLAG_NV
459 beq skip_nv_load
Michael Neulingc75df6f2012-06-25 13:33:10 +0000460 PPC_LL r15, VCPU_GPR(R15)(r4)
461 PPC_LL r16, VCPU_GPR(R16)(r4)
462 PPC_LL r17, VCPU_GPR(R17)(r4)
463 PPC_LL r18, VCPU_GPR(R18)(r4)
464 PPC_LL r19, VCPU_GPR(R19)(r4)
465 PPC_LL r20, VCPU_GPR(R20)(r4)
466 PPC_LL r21, VCPU_GPR(R21)(r4)
467 PPC_LL r22, VCPU_GPR(R22)(r4)
468 PPC_LL r23, VCPU_GPR(R23)(r4)
469 PPC_LL r24, VCPU_GPR(R24)(r4)
470 PPC_LL r25, VCPU_GPR(R25)(r4)
471 PPC_LL r26, VCPU_GPR(R26)(r4)
472 PPC_LL r27, VCPU_GPR(R27)(r4)
473 PPC_LL r28, VCPU_GPR(R28)(r4)
474 PPC_LL r29, VCPU_GPR(R29)(r4)
475 PPC_LL r30, VCPU_GPR(R30)(r4)
476 PPC_LL r31, VCPU_GPR(R31)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000477skip_nv_load:
478 /* Should we return to the guest? */
479 andi. r5, r3, RESUME_FLAG_HOST
480 beq lightweight_exit
481
482 srawi r3, r3, 2 /* Shift -ERR back down. */
483
484heavyweight_exit:
485 /* Not returning to guest. */
486 PPC_LL r5, HOST_STACK_LR(r1)
Alexander Graff6127712012-03-05 16:00:28 +0100487 lwz r6, HOST_CR(r1)
Scott Woodd30f6e42011-12-20 15:34:43 +0000488
489 /*
490 * We already saved guest volatile register state; now save the
491 * non-volatiles.
492 */
493
Michael Neulingc75df6f2012-06-25 13:33:10 +0000494 PPC_STL r15, VCPU_GPR(R15)(r4)
495 PPC_STL r16, VCPU_GPR(R16)(r4)
496 PPC_STL r17, VCPU_GPR(R17)(r4)
497 PPC_STL r18, VCPU_GPR(R18)(r4)
498 PPC_STL r19, VCPU_GPR(R19)(r4)
499 PPC_STL r20, VCPU_GPR(R20)(r4)
500 PPC_STL r21, VCPU_GPR(R21)(r4)
501 PPC_STL r22, VCPU_GPR(R22)(r4)
502 PPC_STL r23, VCPU_GPR(R23)(r4)
503 PPC_STL r24, VCPU_GPR(R24)(r4)
504 PPC_STL r25, VCPU_GPR(R25)(r4)
505 PPC_STL r26, VCPU_GPR(R26)(r4)
506 PPC_STL r27, VCPU_GPR(R27)(r4)
507 PPC_STL r28, VCPU_GPR(R28)(r4)
508 PPC_STL r29, VCPU_GPR(R29)(r4)
509 PPC_STL r30, VCPU_GPR(R30)(r4)
510 PPC_STL r31, VCPU_GPR(R31)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000511
512 /* Load host non-volatile register state from host stack. */
Alexander Graf38df8502012-07-24 13:02:34 +0000513 PPC_LL r14, HOST_NV_GPR(R14)(r1)
514 PPC_LL r15, HOST_NV_GPR(R15)(r1)
515 PPC_LL r16, HOST_NV_GPR(R16)(r1)
516 PPC_LL r17, HOST_NV_GPR(R17)(r1)
517 PPC_LL r18, HOST_NV_GPR(R18)(r1)
518 PPC_LL r19, HOST_NV_GPR(R19)(r1)
519 PPC_LL r20, HOST_NV_GPR(R20)(r1)
520 PPC_LL r21, HOST_NV_GPR(R21)(r1)
521 PPC_LL r22, HOST_NV_GPR(R22)(r1)
522 PPC_LL r23, HOST_NV_GPR(R23)(r1)
523 PPC_LL r24, HOST_NV_GPR(R24)(r1)
524 PPC_LL r25, HOST_NV_GPR(R25)(r1)
525 PPC_LL r26, HOST_NV_GPR(R26)(r1)
526 PPC_LL r27, HOST_NV_GPR(R27)(r1)
527 PPC_LL r28, HOST_NV_GPR(R28)(r1)
528 PPC_LL r29, HOST_NV_GPR(R29)(r1)
529 PPC_LL r30, HOST_NV_GPR(R30)(r1)
530 PPC_LL r31, HOST_NV_GPR(R31)(r1)
Scott Woodd30f6e42011-12-20 15:34:43 +0000531
532 /* Return to kvm_vcpu_run(). */
533 mtlr r5
Alexander Graff6127712012-03-05 16:00:28 +0100534 mtcr r6
Scott Woodd30f6e42011-12-20 15:34:43 +0000535 addi r1, r1, HOST_STACK_SIZE
536 /* r3 still contains the return code from kvmppc_handle_exit(). */
537 blr
538
539/* Registers:
540 * r3: kvm_run pointer
541 * r4: vcpu pointer
542 */
543_GLOBAL(__kvmppc_vcpu_run)
544 stwu r1, -HOST_STACK_SIZE(r1)
545 PPC_STL r1, VCPU_HOST_STACK(r4) /* Save stack pointer to vcpu. */
546
547 /* Save host state to stack. */
548 PPC_STL r3, HOST_RUN(r1)
549 mflr r3
Alexander Graff6127712012-03-05 16:00:28 +0100550 mfcr r5
Scott Woodd30f6e42011-12-20 15:34:43 +0000551 PPC_STL r3, HOST_STACK_LR(r1)
552
Alexander Graff6127712012-03-05 16:00:28 +0100553 stw r5, HOST_CR(r1)
554
Scott Woodd30f6e42011-12-20 15:34:43 +0000555 /* Save host non-volatile register state to stack. */
Alexander Graf38df8502012-07-24 13:02:34 +0000556 PPC_STL r14, HOST_NV_GPR(R14)(r1)
557 PPC_STL r15, HOST_NV_GPR(R15)(r1)
558 PPC_STL r16, HOST_NV_GPR(R16)(r1)
559 PPC_STL r17, HOST_NV_GPR(R17)(r1)
560 PPC_STL r18, HOST_NV_GPR(R18)(r1)
561 PPC_STL r19, HOST_NV_GPR(R19)(r1)
562 PPC_STL r20, HOST_NV_GPR(R20)(r1)
563 PPC_STL r21, HOST_NV_GPR(R21)(r1)
564 PPC_STL r22, HOST_NV_GPR(R22)(r1)
565 PPC_STL r23, HOST_NV_GPR(R23)(r1)
566 PPC_STL r24, HOST_NV_GPR(R24)(r1)
567 PPC_STL r25, HOST_NV_GPR(R25)(r1)
568 PPC_STL r26, HOST_NV_GPR(R26)(r1)
569 PPC_STL r27, HOST_NV_GPR(R27)(r1)
570 PPC_STL r28, HOST_NV_GPR(R28)(r1)
571 PPC_STL r29, HOST_NV_GPR(R29)(r1)
572 PPC_STL r30, HOST_NV_GPR(R30)(r1)
573 PPC_STL r31, HOST_NV_GPR(R31)(r1)
Scott Woodd30f6e42011-12-20 15:34:43 +0000574
575 /* Load guest non-volatiles. */
Michael Neulingc75df6f2012-06-25 13:33:10 +0000576 PPC_LL r14, VCPU_GPR(R14)(r4)
577 PPC_LL r15, VCPU_GPR(R15)(r4)
578 PPC_LL r16, VCPU_GPR(R16)(r4)
579 PPC_LL r17, VCPU_GPR(R17)(r4)
580 PPC_LL r18, VCPU_GPR(R18)(r4)
581 PPC_LL r19, VCPU_GPR(R19)(r4)
582 PPC_LL r20, VCPU_GPR(R20)(r4)
583 PPC_LL r21, VCPU_GPR(R21)(r4)
584 PPC_LL r22, VCPU_GPR(R22)(r4)
585 PPC_LL r23, VCPU_GPR(R23)(r4)
586 PPC_LL r24, VCPU_GPR(R24)(r4)
587 PPC_LL r25, VCPU_GPR(R25)(r4)
588 PPC_LL r26, VCPU_GPR(R26)(r4)
589 PPC_LL r27, VCPU_GPR(R27)(r4)
590 PPC_LL r28, VCPU_GPR(R28)(r4)
591 PPC_LL r29, VCPU_GPR(R29)(r4)
592 PPC_LL r30, VCPU_GPR(R30)(r4)
593 PPC_LL r31, VCPU_GPR(R31)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000594
595
596lightweight_exit:
597 PPC_STL r2, HOST_R2(r1)
598
599 mfspr r3, SPRN_PID
600 stw r3, VCPU_HOST_PID(r4)
601 lwz r3, VCPU_GUEST_PID(r4)
602 mtspr SPRN_PID, r3
603
Scott Woodd30f6e42011-12-20 15:34:43 +0000604 PPC_LL r11, VCPU_SHARED(r4)
Alexander Grafe9ba39c2012-02-16 14:53:04 +0000605 /* Disable MAS register updates via exception */
606 mfspr r3, SPRN_EPCR
607 oris r3, r3, SPRN_EPCR_DMIUH@h
608 mtspr SPRN_EPCR, r3
609 isync
Scott Woodd30f6e42011-12-20 15:34:43 +0000610 /* Save host mas4 and mas6 and load guest MAS registers */
611 mfspr r3, SPRN_MAS4
612 stw r3, VCPU_HOST_MAS4(r4)
613 mfspr r3, SPRN_MAS6
614 stw r3, VCPU_HOST_MAS6(r4)
615 lwz r3, VCPU_SHARED_MAS0(r11)
616 lwz r5, VCPU_SHARED_MAS1(r11)
Varun Sethi185e4182012-04-25 01:26:43 +0000617 PPC_LD(r6, VCPU_SHARED_MAS2, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000618 lwz r7, VCPU_SHARED_MAS7_3+4(r11)
619 lwz r8, VCPU_SHARED_MAS4(r11)
620 mtspr SPRN_MAS0, r3
621 mtspr SPRN_MAS1, r5
622 mtspr SPRN_MAS2, r6
623 mtspr SPRN_MAS3, r7
624 mtspr SPRN_MAS4, r8
625 lwz r3, VCPU_SHARED_MAS6(r11)
626 lwz r5, VCPU_SHARED_MAS7_3+0(r11)
627 mtspr SPRN_MAS6, r3
628 mtspr SPRN_MAS7, r5
Scott Woodd30f6e42011-12-20 15:34:43 +0000629
630 /*
631 * Host interrupt handlers may have clobbered these guest-readable
632 * SPRGs, so we need to reload them here with the guest's values.
633 */
634 lwz r3, VCPU_VRSAVE(r4)
Varun Sethi30124902012-04-25 01:27:34 +0000635 PPC_LD(r5, VCPU_SHARED_SPRG4, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000636 mtspr SPRN_VRSAVE, r3
Varun Sethi30124902012-04-25 01:27:34 +0000637 PPC_LD(r6, VCPU_SHARED_SPRG5, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000638 mtspr SPRN_SPRG4W, r5
Varun Sethi30124902012-04-25 01:27:34 +0000639 PPC_LD(r7, VCPU_SHARED_SPRG6, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000640 mtspr SPRN_SPRG5W, r6
Varun Sethi30124902012-04-25 01:27:34 +0000641 PPC_LD(r8, VCPU_SHARED_SPRG7, r11)
Scott Woodd30f6e42011-12-20 15:34:43 +0000642 mtspr SPRN_SPRG6W, r7
Bharat Bhushan99e99d12014-07-21 11:23:26 +0530643 PPC_LD(r5, VCPU_SPRG9, r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000644 mtspr SPRN_SPRG7W, r8
Bharat Bhushan99e99d12014-07-21 11:23:26 +0530645 mtspr SPRN_SPRG9, r5
Scott Woodd30f6e42011-12-20 15:34:43 +0000646
647 /* Load some guest volatiles. */
648 PPC_LL r3, VCPU_LR(r4)
649 PPC_LL r5, VCPU_XER(r4)
650 PPC_LL r6, VCPU_CTR(r4)
Mihai Caraman518f0402012-04-16 04:08:54 +0000651 lwz r7, VCPU_CR(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000652 PPC_LL r8, VCPU_PC(r4)
Varun Sethi185e4182012-04-25 01:26:43 +0000653 PPC_LD(r9, VCPU_SHARED_MSR, r11)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000654 PPC_LL r0, VCPU_GPR(R0)(r4)
655 PPC_LL r1, VCPU_GPR(R1)(r4)
656 PPC_LL r2, VCPU_GPR(R2)(r4)
657 PPC_LL r10, VCPU_GPR(R10)(r4)
658 PPC_LL r11, VCPU_GPR(R11)(r4)
659 PPC_LL r12, VCPU_GPR(R12)(r4)
660 PPC_LL r13, VCPU_GPR(R13)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000661 mtlr r3
662 mtxer r5
663 mtctr r6
Scott Woodd30f6e42011-12-20 15:34:43 +0000664 mtsrr0 r8
665 mtsrr1 r9
666
667#ifdef CONFIG_KVM_EXIT_TIMING
668 /* save enter time */
6691:
670 mfspr r6, SPRN_TBRU
Bharat Bhushanc0fe7b02012-03-05 01:34:08 +0000671 mfspr r9, SPRN_TBRL
Scott Woodd30f6e42011-12-20 15:34:43 +0000672 mfspr r8, SPRN_TBRU
673 cmpw r8, r6
Mihai Caraman518f0402012-04-16 04:08:54 +0000674 stw r9, VCPU_TIMING_LAST_ENTER_TBL(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000675 bne 1b
Mihai Caraman518f0402012-04-16 04:08:54 +0000676 stw r8, VCPU_TIMING_LAST_ENTER_TBU(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000677#endif
678
Bharat Bhushanc0fe7b02012-03-05 01:34:08 +0000679 /*
680 * Don't execute any instruction which can change CR after
681 * below instruction.
682 */
683 mtcr r7
684
Scott Woodd30f6e42011-12-20 15:34:43 +0000685 /* Finish loading guest volatiles and jump to guest. */
Michael Neulingc75df6f2012-06-25 13:33:10 +0000686 PPC_LL r5, VCPU_GPR(R5)(r4)
687 PPC_LL r6, VCPU_GPR(R6)(r4)
688 PPC_LL r7, VCPU_GPR(R7)(r4)
689 PPC_LL r8, VCPU_GPR(R8)(r4)
690 PPC_LL r9, VCPU_GPR(R9)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000691
Michael Neulingc75df6f2012-06-25 13:33:10 +0000692 PPC_LL r3, VCPU_GPR(R3)(r4)
693 PPC_LL r4, VCPU_GPR(R4)(r4)
Scott Woodd30f6e42011-12-20 15:34:43 +0000694 rfi