blob: 0447a22a4df669decea975c09954bdfef6b20386 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110033
34#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000035
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110036/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +100040/* Stack frame offsets for kvmppc_hv_entry */
41#define SFS 112
42#define STACK_SLOT_TRAP (SFS-4)
43#define STACK_SLOT_CIABR (SFS-16)
44#define STACK_SLOT_DAWR (SFS-24)
45#define STACK_SLOT_DAWRX (SFS-32)
46
Paul Mackerrasde56a942011-06-29 00:21:34 +000047/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100048 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000049 * Must be called with interrupts hard-disabled.
50 *
51 * Input Registers:
52 *
53 * LR = return address to continue at after eventually re-enabling MMU
54 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100055_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100056 mflr r0
57 std r0, PPC_LR_STKOFF(r1)
58 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000059 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100060 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000061 li r0,MSR_RI
62 andc r0,r10,r0
63 li r6,MSR_IR | MSR_DR
64 andc r6,r10,r6
65 mtmsrd r0,1 /* clear RI in MSR */
66 mtsrr0 r5
67 mtsrr1 r6
68 RFI
69
Paul Mackerras218309b2013-09-06 13:23:44 +100070kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110071 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100072 bl kvmppc_hv_entry
73
74 /* Back from guest - restore host state and return to caller */
75
Michael Neulingeee7ff92014-01-08 21:25:19 +110076BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100077 /* Restore host DABR and DABRX */
78 ld r5,HSTATE_DABR(r13)
79 li r6,7
80 mtspr SPRN_DABR,r5
81 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110082END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100083
84 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050085 ld r3,PACA_SPRG_VDSO(r13)
86 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100087
Paul Mackerras218309b2013-09-06 13:23:44 +100088 /* Reload the host's PMU registers */
89 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
90 lbz r4, LPPACA_PMCINUSE(r3)
91 cmpwi r4, 0
92 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +100093BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100094 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +100095 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
96 cmpwi r4, MMCR0_PMAO
97 beql kvmppc_fix_pmao
98END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100099 lwz r3, HSTATE_PMC1(r13)
100 lwz r4, HSTATE_PMC2(r13)
101 lwz r5, HSTATE_PMC3(r13)
102 lwz r6, HSTATE_PMC4(r13)
103 lwz r8, HSTATE_PMC5(r13)
104 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000105 mtspr SPRN_PMC1, r3
106 mtspr SPRN_PMC2, r4
107 mtspr SPRN_PMC3, r5
108 mtspr SPRN_PMC4, r6
109 mtspr SPRN_PMC5, r8
110 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000111 ld r3, HSTATE_MMCR0(r13)
112 ld r4, HSTATE_MMCR1(r13)
113 ld r5, HSTATE_MMCRA(r13)
114 ld r6, HSTATE_SIAR(r13)
115 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000116 mtspr SPRN_MMCR1, r4
117 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100118 mtspr SPRN_SIAR, r6
119 mtspr SPRN_SDAR, r7
120BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000121 ld r8, HSTATE_MMCR2(r13)
122 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100123 mtspr SPRN_MMCR2, r8
124 mtspr SPRN_SIER, r9
125END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000126 mtspr SPRN_MMCR0, r3
127 isync
12823:
129
130 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100131 * Reload DEC. HDEC interrupts were disabled when
132 * we reloaded the host's LPCR value.
133 */
134 ld r3, HSTATE_DECEXP(r13)
135 mftb r4
136 subf r4, r4, r3
137 mtspr SPRN_DEC, r4
138
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000139 /* hwthread_req may have got set by cede or no vcpu, so clear it */
140 li r0, 0
141 stb r0, HSTATE_HWTHREAD_REQ(r13)
142
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100143 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000144 * For external and machine check interrupts, we need
145 * to call the Linux handler to process the interrupt.
146 * We do that by jumping to absolute address 0x500 for
147 * external interrupts, or the machine_check_fwnmi label
148 * for machine checks (since firmware might have patched
149 * the vector area at 0x200). The [h]rfid at the end of the
150 * handler will return to the book3s_hv_interrupts.S code.
151 * For other interrupts we do the rfid to get back
152 * to the book3s_hv_interrupts.S code here.
153 */
154 ld r8, 112+PPC_LR_STKOFF(r1)
155 addi r1, r1, 112
156 ld r7, HSTATE_HOST_MSR(r13)
157
158 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
159 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000160 beq 11f
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +0530161 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
162 beq 15f /* Invoke the H_DOORBELL handler */
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530163 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
164 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000165
166 /* RFI into the highmem handler, or branch to interrupt handler */
167 mfmsr r6
168 li r0, MSR_RI
169 andc r6, r6, r0
170 mtmsrd r6, 1 /* Clear RI in MSR */
171 mtsrr0 r8
172 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000173 beq cr1, 13f /* machine check */
174 RFI
175
176 /* On POWER7, we have external interrupts set to use HSRR0/1 */
17711: mtspr SPRN_HSRR0, r8
178 mtspr SPRN_HSRR1, r7
179 ba 0x500
180
18113: b machine_check_fwnmi
182
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053018314: mtspr SPRN_HSRR0, r8
184 mtspr SPRN_HSRR1, r7
185 b hmi_exception_after_realmode
186
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +053018715: mtspr SPRN_HSRR0, r8
188 mtspr SPRN_HSRR1, r7
189 ba 0xe80
190
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100191kvmppc_primary_no_guest:
192 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100193 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
194 mfspr r3, SPRN_HDEC
195 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100196 /*
197 * Make sure the primary has finished the MMU switch.
198 * We should never get here on a secondary thread, but
199 * check it for robustness' sake.
200 */
201 ld r5, HSTATE_KVM_VCORE(r13)
20265: lbz r0, VCORE_IN_GUEST(r5)
203 cmpwi r0, 0
204 beq 65b
205 /* Set LPCR. */
206 ld r8,VCORE_LPCR(r5)
207 mtspr SPRN_LPCR,r8
208 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100209 /* set our bit in napping_threads */
210 ld r5, HSTATE_KVM_VCORE(r13)
211 lbz r7, HSTATE_PTID(r13)
212 li r0, 1
213 sld r0, r0, r7
214 addi r6, r5, VCORE_NAPPING_THREADS
2151: lwarx r3, 0, r6
216 or r3, r3, r0
217 stwcx. r3, 0, r6
218 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100219 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100220 isync
221 li r12, 0
222 lwz r7, VCORE_ENTRY_EXIT(r5)
223 cmpwi r7, 0x100
224 bge kvm_novcpu_exit /* another thread already exiting */
225 li r3, NAPPING_NOVCPU
226 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100227
Paul Mackerrasccc07772015-03-28 14:21:07 +1100228 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100229 b kvm_do_nap
230
Suresh Warrier37f55d32016-08-19 15:35:46 +1000231/*
232 * kvm_novcpu_wakeup
233 * Entered from kvm_start_guest if kvm_hstate.napping is set
234 * to NAPPING_NOVCPU
235 * r2 = kernel TOC
236 * r13 = paca
237 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100238kvm_novcpu_wakeup:
239 ld r1, HSTATE_HOST_R1(r13)
240 ld r5, HSTATE_KVM_VCORE(r13)
241 li r0, 0
242 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100243
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100244 /* check the wake reason */
245 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100246
Suresh Warrier37f55d32016-08-19 15:35:46 +1000247 /*
248 * Restore volatile registers since we could have called
249 * a C routine in kvmppc_check_wake_reason.
250 * r5 = VCORE
251 */
252 ld r5, HSTATE_KVM_VCORE(r13)
253
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100254 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100255 lwz r0, VCORE_ENTRY_EXIT(r5)
256 cmpwi r0, 0x100
257 bge kvm_novcpu_exit
258
259 /* clear our bit in napping_threads */
260 lbz r7, HSTATE_PTID(r13)
261 li r0, 1
262 sld r0, r0, r7
263 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002644: lwarx r7, 0, r6
265 andc r7, r7, r0
266 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100267 bne 4b
268
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100269 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100270 cmpdi r3, 0
271 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100272
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100273 /* See if our timeslice has expired (HDEC is negative) */
274 mfspr r0, SPRN_HDEC
275 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
276 cmpwi r0, 0
277 blt kvm_novcpu_exit
278
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100279 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
280 ld r4, HSTATE_KVM_VCPU(r13)
281 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100282 beq kvmppc_primary_no_guest
283
284#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
285 addi r3, r4, VCPU_TB_RMENTRY
286 bl kvmhv_start_timing
287#endif
288 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100289
290kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100291#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
292 ld r4, HSTATE_KVM_VCPU(r13)
293 cmpdi r4, 0
294 beq 13f
295 addi r3, r4, VCPU_TB_RMEXIT
296 bl kvmhv_accumulate_time
297#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110029813: mr r3, r12
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +1000299 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100300 bl kvmhv_commence_exit
301 nop
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +1000302 lwz r12, STACK_SLOT_TRAP(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100303 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100304
Paul Mackerras371fefd2011-06-29 00:23:08 +0000305/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100306 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000307 * Relocation is off and most register values are lost.
308 * r13 points to the PACA.
309 */
310 .globl kvm_start_guest
311kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530312
313 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100314 mfspr r0, SPRN_CTRLF
315 ori r0, r0, 1
316 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530317
Paul Mackerras19ccb762011-07-23 17:42:46 +1000318 ld r2,PACATOC(r13)
319
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000320 li r0,KVM_HWTHREAD_IN_KVM
321 stb r0,HSTATE_HWTHREAD_STATE(r13)
322
323 /* NV GPR values from power7_idle() will no longer be valid */
324 li r0,1
325 stb r0,PACA_NAPSTATELOST(r13)
326
Paul Mackerras4619ac82013-04-17 20:31:41 +0000327 /* were we napping due to cede? */
328 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100329 cmpwi r0,NAPPING_CEDE
330 beq kvm_end_cede
331 cmpwi r0,NAPPING_NOVCPU
332 beq kvm_novcpu_wakeup
333
334 ld r1,PACAEMERGSP(r13)
335 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000336
337 /*
338 * We weren't napping due to cede, so this must be a secondary
339 * thread being woken up to run a guest, or being woken up due
340 * to a stray IPI. (Or due to some machine check or hypervisor
341 * maintenance interrupt while the core is in KVM.)
342 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000343
344 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100345 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000346 /*
347 * kvmppc_check_wake_reason could invoke a C routine, but we
348 * have no volatile registers to restore when we return.
349 */
350
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100351 cmpdi r3, 0
352 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000353
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000354 /* get vcore pointer, NULL if we have nothing to run */
355 ld r5,HSTATE_KVM_VCORE(r13)
356 cmpdi r5,0
357 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000358 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000359
Paul Mackerras56548fc2014-12-03 14:48:40 +1100360kvm_secondary_got_guest:
361
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100362 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530363 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100364 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000365
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000366 /* On thread 0 of a subcore, set HDEC to max */
367 lbz r4, HSTATE_PTID(r13)
368 cmpwi r4, 0
369 bne 63f
370 lis r6, 0x7fff
371 ori r6, r6, 0xffff
372 mtspr SPRN_HDEC, r6
373 /* and set per-LPAR registers, if doing dynamic micro-threading */
374 ld r6, HSTATE_SPLIT_MODE(r13)
375 cmpdi r6, 0
376 beq 63f
377 ld r0, KVM_SPLIT_RPR(r6)
378 mtspr SPRN_RPR, r0
379 ld r0, KVM_SPLIT_PMMAR(r6)
380 mtspr SPRN_PMMAR, r0
381 ld r0, KVM_SPLIT_LDBAR(r6)
382 mtspr SPRN_LDBAR, r0
383 isync
38463:
385 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100386 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000387 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100388 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000389
390 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000391 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000392 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000393 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100394 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000395 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100396 * kvmppc_run_core() is going to assume that all our vcpu
397 * state is visible in memory. This lwsync makes sure
398 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100399 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000400 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000401 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000402
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530403 /*
404 * All secondaries exiting guest will fall through this path.
405 * Before proceeding, just check for HMI interrupt and
406 * invoke opal hmi handler. By now we are sure that the
407 * primary thread on this core/subcore has already made partition
408 * switch/TB resync and we are good to call opal hmi handler.
409 */
410 cmpwi r12, BOOK3S_INTERRUPT_HMI
411 bne kvm_no_guest
412
413 li r3,0 /* NULL argument */
414 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100415/*
416 * At this point we have finished executing in the guest.
417 * We need to wait for hwthread_req to become zero, since
418 * we may not turn on the MMU while hwthread_req is non-zero.
419 * While waiting we also need to check if we get given a vcpu to run.
420 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000421kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100422 lbz r3, HSTATE_HWTHREAD_REQ(r13)
423 cmpwi r3, 0
424 bne 53f
425 HMT_MEDIUM
426 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000427 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100428 /* need to recheck hwthread_req after a barrier, to avoid race */
429 sync
430 lbz r3, HSTATE_HWTHREAD_REQ(r13)
431 cmpwi r3, 0
432 bne 54f
433/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530434 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100435 * of power7_nap in the powernv cpu offline loop. The value we
436 * put in r3 becomes the return value for power7_nap.
437 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000438 li r3, LPCR_PECE0
439 mfspr r4, SPRN_LPCR
440 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
441 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100442 li r3, 0
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530443 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100444
44553: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000446 ld r5, HSTATE_KVM_VCORE(r13)
447 cmpdi r5, 0
448 bne 60f
449 ld r3, HSTATE_SPLIT_MODE(r13)
450 cmpdi r3, 0
451 beq kvm_no_guest
452 lbz r0, KVM_SPLIT_DO_NAP(r3)
453 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100454 beq kvm_no_guest
455 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000456 b kvm_unsplit_nap
45760: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100458 b kvm_secondary_got_guest
459
46054: li r0, KVM_HWTHREAD_IN_KVM
461 stb r0, HSTATE_HWTHREAD_STATE(r13)
462 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000463
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000464/*
465 * Here the primary thread is trying to return the core to
466 * whole-core mode, so we need to nap.
467 */
468kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530469 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530470 * When secondaries are napping in kvm_unsplit_nap() with
471 * hwthread_req = 1, HMI goes ignored even though subcores are
472 * already exited the guest. Hence HMI keeps waking up secondaries
473 * from nap in a loop and secondaries always go back to nap since
474 * no vcore is assigned to them. This makes impossible for primary
475 * thread to get hold of secondary threads resulting into a soft
476 * lockup in KVM path.
477 *
478 * Let us check if HMI is pending and handle it before we go to nap.
479 */
480 cmpwi r12, BOOK3S_INTERRUPT_HMI
481 bne 55f
482 li r3, 0 /* NULL argument */
483 bl hmi_exception_realmode
48455:
485 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530486 * Ensure that secondary doesn't nap when it has
487 * its vcore pointer set.
488 */
489 sync /* matches smp_mb() before setting split_info.do_nap */
490 ld r0, HSTATE_KVM_VCORE(r13)
491 cmpdi r0, 0
492 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000493 /* clear any pending message */
494BEGIN_FTR_SECTION
495 lis r6, (PPC_DBELL_SERVER << (63-36))@h
496 PPC_MSGCLR(6)
497END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
498 /* Set kvm_split_mode.napped[tid] = 1 */
499 ld r3, HSTATE_SPLIT_MODE(r13)
500 li r0, 1
501 lhz r4, PACAPACAINDEX(r13)
502 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
503 addi r4, r4, KVM_SPLIT_NAPPED
504 stbx r0, r3, r4
505 /* Check the do_nap flag again after setting napped[] */
506 sync
507 lbz r0, KVM_SPLIT_DO_NAP(r3)
508 cmpwi r0, 0
509 beq 57f
510 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
511 mfspr r4, SPRN_LPCR
512 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
513 mtspr SPRN_LPCR, r4
514 isync
515 std r0, HSTATE_SCRATCH0(r13)
516 ptesync
517 ld r0, HSTATE_SCRATCH0(r13)
5181: cmpd r0, r0
519 bne 1b
520 nap
521 b .
522
52357: li r0, 0
524 stbx r0, r3, r4
525 b kvm_no_guest
526
Paul Mackerras218309b2013-09-06 13:23:44 +1000527/******************************************************************************
528 * *
529 * Entry code *
530 * *
531 *****************************************************************************/
532
Paul Mackerrasde56a942011-06-29 00:21:34 +0000533.global kvmppc_hv_entry
534kvmppc_hv_entry:
535
536 /* Required state:
537 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100538 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000539 * MSR = ~IR|DR
540 * R13 = PACA
541 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000542 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000543 * all other volatile GPRS = free
544 */
545 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000546 std r0, PPC_LR_STKOFF(r1)
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +1000547 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000548
Paul Mackerrasde56a942011-06-29 00:21:34 +0000549 /* Save R1 in the PACA */
550 std r1, HSTATE_HOST_R1(r13)
551
Paul Mackerras44a3add2013-10-04 21:45:04 +1000552 li r6, KVM_GUEST_MODE_HOST_HV
553 stb r6, HSTATE_IN_GUEST(r13)
554
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100555#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
556 /* Store initial timestamp */
557 cmpdi r4, 0
558 beq 1f
559 addi r3, r4, VCPU_TB_RMENTRY
560 bl kvmhv_start_timing
5611:
562#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +0000563 /* Clear out SLB */
564 li r6,0
565 slbmte r6,r6
566 slbia
567 ptesync
568
Paul Mackerras9e368f22011-06-29 00:40:08 +0000569 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100570 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000571 * We don't have to lock against concurrent tlbies,
572 * but we do have to coordinate across hardware threads.
573 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100574 /* Set bit in entry map iff exit map is zero. */
575 ld r5, HSTATE_KVM_VCORE(r13)
576 li r7, 1
577 lbz r6, HSTATE_PTID(r13)
578 sld r7, r7, r6
579 addi r9, r5, VCORE_ENTRY_EXIT
58021: lwarx r3, 0, r9
581 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000582 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100583 or r3, r3, r7
584 stwcx. r3, 0, r9
Paul Mackerras371fefd2011-06-29 00:23:08 +0000585 bne 21b
586
587 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100588 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000589 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100590 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000591 ld r6,KVM_SDR1(r9)
592 lwz r7,KVM_LPID(r9)
593 li r0,LPID_RSVD /* switch to reserved LPID */
594 mtspr SPRN_LPID,r0
595 ptesync
596 mtspr SPRN_SDR1,r6 /* switch to partition page table */
597 mtspr SPRN_LPID,r7
598 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000599
600 /* See if we need to flush the TLB */
601 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
602 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
603 srdi r6,r6,6 /* doubleword number */
604 sldi r6,r6,3 /* address offset */
605 add r6,r6,r9
606 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000607 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000608 sld r0,r0,r7
609 ld r7,0(r6)
610 and. r7,r7,r0
611 beq 22f
61223: ldarx r7,0,r6 /* if set, clear the bit */
613 andc r7,r7,r0
614 stdcx. r7,0,r6
615 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100616 /* Flush the TLB of any entries for this LPID */
617 /* use arch 2.07S as a proxy for POWER8 */
618BEGIN_FTR_SECTION
619 li r6,512 /* POWER8 has 512 sets */
620FTR_SECTION_ELSE
621 li r6,128 /* POWER7 has 128 sets */
622ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000623 mtctr r6
624 li r7,0x800 /* IS field = 0b10 */
625 ptesync
62628: tlbiel r7
627 addi r7,r7,0x1000
628 bdnz 28b
629 ptesync
630
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000631 /* Add timebase offset onto timebase */
63222: ld r8,VCORE_TB_OFFSET(r5)
633 cmpdi r8,0
634 beq 37f
635 mftb r6 /* current host timebase */
636 add r8,r8,r6
637 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
638 mftb r7 /* check if lower 24 bits overflowed */
639 clrldi r6,r6,40
640 clrldi r7,r7,40
641 cmpld r7,r6
642 bge 37f
643 addis r8,r8,0x100 /* if so, increment upper 40 bits */
644 mtspr SPRN_TBU40,r8
645
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000646 /* Load guest PCR value to select appropriate compat mode */
64737: ld r7, VCORE_PCR(r5)
648 cmpdi r7, 0
649 beq 38f
650 mtspr SPRN_PCR, r7
65138:
Michael Neulingb005255e2014-01-08 21:25:21 +1100652
653BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000654 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100655 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000656 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100657 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000658 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100659END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
660
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530661 /* Mark the subcore state as inside guest */
662 bl kvmppc_subcore_enter_guest
663 nop
664 ld r5, HSTATE_KVM_VCORE(r13)
665 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000666 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000667 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000668
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100669 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110067010: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100671 beq kvmppc_primary_no_guest
672kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000673
674 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100675 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000676 cmpwi r5,0
677 beq 9f
678 mtctr r5
679 addi r6,r4,VCPU_SLB
6801: ld r8,VCPU_SLB_E(r6)
681 ld r9,VCPU_SLB_V(r6)
682 slbmte r9,r8
683 addi r6,r6,VCPU_SLB_SIZE
684 bdnz 1b
6859:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100686 /* Increment yield count if they have a VPA */
687 ld r3, VCPU_VPA(r4)
688 cmpdi r3, 0
689 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200690 li r6, LPPACA_YIELDCOUNT
691 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100692 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200693 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100694 li r6, 1
695 stb r6, VCPU_VPA_DIRTY(r4)
69625:
697
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100698 /* Save purr/spurr */
699 mfspr r5,SPRN_PURR
700 mfspr r6,SPRN_SPURR
701 std r5,HSTATE_PURR(r13)
702 std r6,HSTATE_SPURR(r13)
703 ld r7,VCPU_PURR(r4)
704 ld r8,VCPU_SPURR(r4)
705 mtspr SPRN_PURR,r7
706 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100707
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +1000708 /* Save host values of some registers */
709BEGIN_FTR_SECTION
710 mfspr r5, SPRN_CIABR
711 mfspr r6, SPRN_DAWR
712 mfspr r7, SPRN_DAWRX
713 std r5, STACK_SLOT_CIABR(r1)
714 std r6, STACK_SLOT_DAWR(r1)
715 std r7, STACK_SLOT_DAWRX(r1)
716END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
717
Michael Neulingeee7ff92014-01-08 21:25:19 +1100718BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000719 /* Set partition DABR */
720 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100721 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000722 ld r6,VCPU_DABR(r4)
723 mtspr SPRN_DABRX,r5
724 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000725 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100726END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000727
Michael Neulinge4e38122014-03-25 10:47:02 +1100728#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
729BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000730 bl kvmppc_restore_tm
731END_FTR_SECTION_IFSET(CPU_FTR_TM)
Michael Neulinge4e38122014-03-25 10:47:02 +1100732#endif
733
Paul Mackerrasde56a942011-06-29 00:21:34 +0000734 /* Load guest PMU registers */
735 /* R4 is live here (vcpu pointer) */
736 li r3, 1
737 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
738 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
739 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000740BEGIN_FTR_SECTION
741 ld r3, VCPU_MMCR(r4)
742 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
743 cmpwi r5, MMCR0_PMAO
744 beql kvmppc_fix_pmao
745END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000746 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
747 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
748 lwz r6, VCPU_PMC + 8(r4)
749 lwz r7, VCPU_PMC + 12(r4)
750 lwz r8, VCPU_PMC + 16(r4)
751 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000752 mtspr SPRN_PMC1, r3
753 mtspr SPRN_PMC2, r5
754 mtspr SPRN_PMC3, r6
755 mtspr SPRN_PMC4, r7
756 mtspr SPRN_PMC5, r8
757 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000758 ld r3, VCPU_MMCR(r4)
759 ld r5, VCPU_MMCR + 8(r4)
760 ld r6, VCPU_MMCR + 16(r4)
761 ld r7, VCPU_SIAR(r4)
762 ld r8, VCPU_SDAR(r4)
763 mtspr SPRN_MMCR1, r5
764 mtspr SPRN_MMCRA, r6
765 mtspr SPRN_SIAR, r7
766 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100767BEGIN_FTR_SECTION
768 ld r5, VCPU_MMCR + 24(r4)
769 ld r6, VCPU_SIER(r4)
770 lwz r7, VCPU_PMC + 24(r4)
771 lwz r8, VCPU_PMC + 28(r4)
772 ld r9, VCPU_MMCR + 32(r4)
773 mtspr SPRN_MMCR2, r5
774 mtspr SPRN_SIER, r6
775 mtspr SPRN_SPMC1, r7
776 mtspr SPRN_SPMC2, r8
777 mtspr SPRN_MMCRS, r9
778END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000779 mtspr SPRN_MMCR0, r3
780 isync
781
782 /* Load up FP, VMX and VSX registers */
783 bl kvmppc_load_fp
784
785 ld r14, VCPU_GPR(R14)(r4)
786 ld r15, VCPU_GPR(R15)(r4)
787 ld r16, VCPU_GPR(R16)(r4)
788 ld r17, VCPU_GPR(R17)(r4)
789 ld r18, VCPU_GPR(R18)(r4)
790 ld r19, VCPU_GPR(R19)(r4)
791 ld r20, VCPU_GPR(R20)(r4)
792 ld r21, VCPU_GPR(R21)(r4)
793 ld r22, VCPU_GPR(R22)(r4)
794 ld r23, VCPU_GPR(R23)(r4)
795 ld r24, VCPU_GPR(R24)(r4)
796 ld r25, VCPU_GPR(R25)(r4)
797 ld r26, VCPU_GPR(R26)(r4)
798 ld r27, VCPU_GPR(R27)(r4)
799 ld r28, VCPU_GPR(R28)(r4)
800 ld r29, VCPU_GPR(R29)(r4)
801 ld r30, VCPU_GPR(R30)(r4)
802 ld r31, VCPU_GPR(R31)(r4)
803
Paul Mackerrasde56a942011-06-29 00:21:34 +0000804 /* Switch DSCR to guest value */
805 ld r5, VCPU_DSCR(r4)
806 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000807
Michael Neulingb005255e2014-01-08 21:25:21 +1100808BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100809 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100810 b 8f
811END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100812 /* Load up POWER8-specific registers */
813 ld r5, VCPU_IAMR(r4)
814 lwz r6, VCPU_PSPB(r4)
815 ld r7, VCPU_FSCR(r4)
816 mtspr SPRN_IAMR, r5
817 mtspr SPRN_PSPB, r6
818 mtspr SPRN_FSCR, r7
819 ld r5, VCPU_DAWR(r4)
820 ld r6, VCPU_DAWRX(r4)
821 ld r7, VCPU_CIABR(r4)
822 ld r8, VCPU_TAR(r4)
823 mtspr SPRN_DAWR, r5
824 mtspr SPRN_DAWRX, r6
825 mtspr SPRN_CIABR, r7
826 mtspr SPRN_TAR, r8
827 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100828 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000829 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100830 mtspr SPRN_EBBHR, r8
831 ld r5, VCPU_EBBRR(r4)
832 ld r6, VCPU_BESCR(r4)
833 ld r7, VCPU_CSIGR(r4)
834 ld r8, VCPU_TACR(r4)
835 mtspr SPRN_EBBRR, r5
836 mtspr SPRN_BESCR, r6
837 mtspr SPRN_CSIGR, r7
838 mtspr SPRN_TACR, r8
839 ld r5, VCPU_TCSCR(r4)
840 ld r6, VCPU_ACOP(r4)
841 lwz r7, VCPU_GUEST_PID(r4)
842 ld r8, VCPU_WORT(r4)
843 mtspr SPRN_TCSCR, r5
844 mtspr SPRN_ACOP, r6
845 mtspr SPRN_PID, r7
846 mtspr SPRN_WORT, r8
8478:
848
Paul Mackerrasde56a942011-06-29 00:21:34 +0000849 /*
850 * Set the decrementer to the guest decrementer.
851 */
852 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100853 /* r8 is a host timebase value here, convert to guest TB */
854 ld r5,HSTATE_KVM_VCORE(r13)
855 ld r6,VCORE_TB_OFFSET(r5)
856 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000857 mftb r7
858 subf r3,r7,r8
859 mtspr SPRN_DEC,r3
860 stw r3,VCPU_DEC(r4)
861
862 ld r5, VCPU_SPRG0(r4)
863 ld r6, VCPU_SPRG1(r4)
864 ld r7, VCPU_SPRG2(r4)
865 ld r8, VCPU_SPRG3(r4)
866 mtspr SPRN_SPRG0, r5
867 mtspr SPRN_SPRG1, r6
868 mtspr SPRN_SPRG2, r7
869 mtspr SPRN_SPRG3, r8
870
Paul Mackerrasde56a942011-06-29 00:21:34 +0000871 /* Load up DAR and DSISR */
872 ld r5, VCPU_DAR(r4)
873 lwz r6, VCPU_DSISR(r4)
874 mtspr SPRN_DAR, r5
875 mtspr SPRN_DSISR, r6
876
Paul Mackerrasde56a942011-06-29 00:21:34 +0000877 /* Restore AMR and UAMOR, set AMOR to all 1s */
878 ld r5,VCPU_AMR(r4)
879 ld r6,VCPU_UAMOR(r4)
880 li r7,-1
881 mtspr SPRN_AMR,r5
882 mtspr SPRN_UAMOR,r6
883 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000884
885 /* Restore state of CTRL run bit; assume 1 on entry */
886 lwz r5,VCPU_CTRL(r4)
887 andi. r5,r5,1
888 bne 4f
889 mfspr r6,SPRN_CTRLF
890 clrrdi r6,r6,1
891 mtspr SPRN_CTRLT,r6
8924:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100893 /* Secondary threads wait for primary to have done partition switch */
894 ld r5, HSTATE_KVM_VCORE(r13)
895 lbz r6, HSTATE_PTID(r13)
896 cmpwi r6, 0
897 beq 21f
898 lbz r0, VCORE_IN_GUEST(r5)
899 cmpwi r0, 0
900 bne 21f
901 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100090220: lwz r3, VCORE_ENTRY_EXIT(r5)
903 cmpwi r3, 0x100
904 bge no_switch_exit
905 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100906 cmpwi r0, 0
907 beq 20b
908 HMT_MEDIUM
90921:
910 /* Set LPCR. */
911 ld r8,VCORE_LPCR(r5)
912 mtspr SPRN_LPCR,r8
913 isync
914
915 /* Check if HDEC expires soon */
916 mfspr r3, SPRN_HDEC
917 cmpwi r3, 512 /* 1 microsecond */
918 blt hdec_soon
919
Suresh Warrier37f55d32016-08-19 15:35:46 +1000920deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000921 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +1000922 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000923
924 mtctr r6
925 mtxer r7
926
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100927kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000928 ld r10, VCPU_PC(r4)
929 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000930 ld r6, VCPU_SRR0(r4)
931 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100932 mtspr SPRN_SRR0, r6
933 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000934
Paul Mackerras4619ac82013-04-17 20:31:41 +0000935 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000936 rldicl r11, r11, 63 - MSR_HV_LG, 1
937 rotldi r11, r11, 1 + MSR_HV_LG
938 ori r11, r11, MSR_ME
939
Paul Mackerras19ccb762011-07-23 17:42:46 +1000940 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100941 ld r0, VCPU_PENDING_EXC(r4)
942 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
943 cmpdi cr1, r0, 0
944 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100945 mfspr r8, SPRN_LPCR
946 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
947 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
948 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +1000949 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +1000950 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100951 li r0, BOOK3S_INTERRUPT_EXTERNAL
952 bne cr1, 12f
953 mfspr r0, SPRN_DEC
954 cmpwi r0, 0
955 li r0, BOOK3S_INTERRUPT_DECREMENTER
956 bge 5f
957
95812: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +1000959 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100960 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +1100961 mr r9, r4
962 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11009635:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000964
Liu Ping Fan27025a62013-11-19 14:12:48 +0800965/*
966 * Required state:
967 * R4 = vcpu
968 * R10: value for HSRR0
969 * R11: value for HSRR1
970 * R13 = PACA
971 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000972fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000973 li r0,0
974 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000975 mtspr SPRN_HSRR0,r10
976 mtspr SPRN_HSRR1,r11
977
978 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000979 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000980 stb r9, HSTATE_IN_GUEST(r13)
981
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100982#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
983 /* Accumulate timing */
984 addi r3, r4, VCPU_TB_GUEST
985 bl kvmhv_accumulate_time
986#endif
987
Paul Mackerrasde56a942011-06-29 00:21:34 +0000988 /* Enter guest */
989
Paul Mackerras0acb9112013-02-04 18:10:51 +0000990BEGIN_FTR_SECTION
991 ld r5, VCPU_CFAR(r4)
992 mtspr SPRN_CFAR, r5
993END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000994BEGIN_FTR_SECTION
995 ld r0, VCPU_PPR(r4)
996END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000997
Paul Mackerrasde56a942011-06-29 00:21:34 +0000998 ld r5, VCPU_LR(r4)
999 lwz r6, VCPU_CR(r4)
1000 mtlr r5
1001 mtcr r6
1002
Michael Neulingc75df6f2012-06-25 13:33:10 +00001003 ld r1, VCPU_GPR(R1)(r4)
1004 ld r2, VCPU_GPR(R2)(r4)
1005 ld r3, VCPU_GPR(R3)(r4)
1006 ld r5, VCPU_GPR(R5)(r4)
1007 ld r6, VCPU_GPR(R6)(r4)
1008 ld r7, VCPU_GPR(R7)(r4)
1009 ld r8, VCPU_GPR(R8)(r4)
1010 ld r9, VCPU_GPR(R9)(r4)
1011 ld r10, VCPU_GPR(R10)(r4)
1012 ld r11, VCPU_GPR(R11)(r4)
1013 ld r12, VCPU_GPR(R12)(r4)
1014 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001015
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001016BEGIN_FTR_SECTION
1017 mtspr SPRN_PPR, r0
1018END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1019 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001020 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001021
1022 hrfid
1023 b .
1024
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001025secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001026 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001027 cmpdi r4, 0
1028 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001029 stw r12, VCPU_TRAP(r4)
1030#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001031 addi r3, r4, VCPU_TB_RMEXIT
1032 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001033#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100103411: b kvmhv_switch_to_host
1035
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001036no_switch_exit:
1037 HMT_MEDIUM
1038 li r12, 0
1039 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001040hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001041 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000104212: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001043 mr r9, r4
1044#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001045 addi r3, r4, VCPU_TB_RMEXIT
1046 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001047#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001048 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001049
Paul Mackerrasde56a942011-06-29 00:21:34 +00001050/******************************************************************************
1051 * *
1052 * Exit code *
1053 * *
1054 *****************************************************************************/
1055
1056/*
1057 * We come here from the first-level interrupt handlers.
1058 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301059 .globl kvmppc_interrupt_hv
1060kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001061 /*
1062 * Register contents:
1063 * R12 = interrupt vector
1064 * R13 = PACA
1065 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1066 * guest R13 saved in SPRN_SCRATCH0
1067 */
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301068 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001069
1070 lbz r9, HSTATE_IN_GUEST(r13)
1071 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1072 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301073#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1074 cmpwi r9, KVM_GUEST_MODE_GUEST
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301075 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301076 beq kvmppc_interrupt_pr
1077#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001078 /* We're now back in the host but in guest MMU context */
1079 li r9, KVM_GUEST_MODE_HOST_HV
1080 stb r9, HSTATE_IN_GUEST(r13)
1081
Paul Mackerrasde56a942011-06-29 00:21:34 +00001082 ld r9, HSTATE_KVM_VCPU(r13)
1083
1084 /* Save registers */
1085
Michael Neulingc75df6f2012-06-25 13:33:10 +00001086 std r0, VCPU_GPR(R0)(r9)
1087 std r1, VCPU_GPR(R1)(r9)
1088 std r2, VCPU_GPR(R2)(r9)
1089 std r3, VCPU_GPR(R3)(r9)
1090 std r4, VCPU_GPR(R4)(r9)
1091 std r5, VCPU_GPR(R5)(r9)
1092 std r6, VCPU_GPR(R6)(r9)
1093 std r7, VCPU_GPR(R7)(r9)
1094 std r8, VCPU_GPR(R8)(r9)
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301095 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001096 std r0, VCPU_GPR(R9)(r9)
1097 std r10, VCPU_GPR(R10)(r9)
1098 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001099 ld r3, HSTATE_SCRATCH0(r13)
1100 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001101 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001102 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001103BEGIN_FTR_SECTION
1104 ld r3, HSTATE_CFAR(r13)
1105 std r3, VCPU_CFAR(r9)
1106END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001107BEGIN_FTR_SECTION
1108 ld r4, HSTATE_PPR(r13)
1109 std r4, VCPU_PPR(r9)
1110END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001111
1112 /* Restore R1/R2 so we can handle faults */
1113 ld r1, HSTATE_HOST_R1(r13)
1114 ld r2, PACATOC(r13)
1115
1116 mfspr r10, SPRN_SRR0
1117 mfspr r11, SPRN_SRR1
1118 std r10, VCPU_SRR0(r9)
1119 std r11, VCPU_SRR1(r9)
1120 andi. r0, r12, 2 /* need to read HSRR0/1? */
1121 beq 1f
1122 mfspr r10, SPRN_HSRR0
1123 mfspr r11, SPRN_HSRR1
1124 clrrdi r12, r12, 2
11251: std r10, VCPU_PC(r9)
1126 std r11, VCPU_MSR(r9)
1127
1128 GET_SCRATCH0(r3)
1129 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001130 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001131 std r4, VCPU_LR(r9)
1132
Paul Mackerrasde56a942011-06-29 00:21:34 +00001133 stw r12,VCPU_TRAP(r9)
1134
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001135#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1136 addi r3, r9, VCPU_TB_RMINTR
1137 mr r4, r9
1138 bl kvmhv_accumulate_time
1139 ld r5, VCPU_GPR(R5)(r9)
1140 ld r6, VCPU_GPR(R6)(r9)
1141 ld r7, VCPU_GPR(R7)(r9)
1142 ld r8, VCPU_GPR(R8)(r9)
1143#endif
1144
Paul Mackerras4a157d62014-12-03 13:30:39 +11001145 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001146 if this is an HEI (HV emulation interrupt, e40) */
1147 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001148 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001149 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1150 bne 11f
1151 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100115211: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001153
1154 /* these are volatile across C function calls */
1155 mfctr r3
1156 mfxer r4
1157 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001158 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001159
Paul Mackerras697d3892011-12-12 12:36:37 +00001160 /* If this is a page table miss then see if it's theirs or ours */
1161 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1162 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001163 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1164 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001165
Paul Mackerrasde56a942011-06-29 00:21:34 +00001166 /* See if this is a leftover HDEC interrupt */
1167 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1168 bne 2f
1169 mfspr r3,SPRN_HDEC
1170 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001171 mr r4,r9
1172 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000011732:
Paul Mackerras697d3892011-12-12 12:36:37 +00001174 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001175 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1176 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001177
Paul Mackerras66feed62015-03-28 14:21:12 +11001178 /* Hypervisor doorbell - exit only if host IPI flag set */
1179 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1180 bne 3f
1181 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301182 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001183 beq 4f
1184 b guest_exit_cont
11853:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001186 /* External interrupt ? */
1187 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001188 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001189
1190 /* External interrupt, first check for host_ipi. If this is
1191 * set, we know the host wants us out so let's do it now
1192 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001193 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001194
1195 /*
1196 * Restore the active volatile registers after returning from
1197 * a C function.
1198 */
1199 ld r9, HSTATE_KVM_VCPU(r13)
1200 li r12, BOOK3S_INTERRUPT_EXTERNAL
1201
1202 /*
1203 * kvmppc_read_intr return codes:
1204 *
1205 * Exit to host (r3 > 0)
1206 * 1 An interrupt is pending that needs to be handled by the host
1207 * Exit guest and return to host by branching to guest_exit_cont
1208 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001209 * 2 Passthrough that needs completion in the host
1210 * Exit guest and return to host by branching to guest_exit_cont
1211 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1212 * to indicate to the host to complete handling the interrupt
1213 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001214 * Before returning to guest, we check if any CPU is heading out
1215 * to the host and if so, we head out also. If no CPUs are heading
1216 * check return values <= 0.
1217 *
1218 * Return to guest (r3 <= 0)
1219 * 0 No external interrupt is pending
1220 * -1 A guest wakeup IPI (which has now been cleared)
1221 * In either case, we return to guest to deliver any pending
1222 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001223 *
1224 * -2 A PCI passthrough external interrupt was handled
1225 * (interrupt was delivered directly to guest)
1226 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001227 */
1228
Suresh Warrierf7af5202016-08-19 15:35:52 +10001229 cmpdi r3, 1
1230 ble 1f
1231
1232 /* Return code = 2 */
1233 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1234 stw r12, VCPU_TRAP(r9)
1235 b guest_exit_cont
1236
12371: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001238 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001239 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001240
Suresh Warrier37f55d32016-08-19 15:35:46 +10001241 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110012424: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001243 lwz r0, VCORE_ENTRY_EXIT(r5)
1244 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001245 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001246 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001247
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001248guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001249 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001250 mfdar r6
1251 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001252 std r6, VCPU_DAR(r9)
1253 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001254 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001255 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001256 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001257 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001258 stw r7, VCPU_FAULT_DSISR(r9)
1259
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001260 /* See if it is a machine check */
1261 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1262 beq machine_check_realmode
1263mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001264#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1265 addi r3, r9, VCPU_TB_RMEXIT
1266 mr r4, r9
1267 bl kvmhv_accumulate_time
1268#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001269
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301270 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001271 /* Increment exit count, poke other threads to exit */
1272 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001273 nop
1274 ld r9, HSTATE_KVM_VCPU(r13)
1275 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001276
Paul Mackerrasec257162015-06-24 21:18:03 +10001277 /* Stop others sending VCPU interrupts to this physical CPU */
1278 li r0, -1
1279 stw r0, VCPU_CPU(r9)
1280 stw r0, VCPU_THREAD_CPU(r9)
1281
Paul Mackerrasde56a942011-06-29 00:21:34 +00001282 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001283 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001284 stw r6,VCPU_CTRL(r9)
1285 andi. r0,r6,1
1286 bne 4f
1287 ori r6,r6,1
1288 mtspr SPRN_CTRLT,r6
12894:
1290 /* Read the guest SLB and save it away */
1291 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1292 mtctr r0
1293 li r6,0
1294 addi r7,r9,VCPU_SLB
1295 li r5,0
12961: slbmfee r8,r6
1297 andis. r0,r8,SLB_ESID_V@h
1298 beq 2f
1299 add r8,r8,r6 /* put index in */
1300 slbmfev r3,r6
1301 std r8,VCPU_SLB_E(r7)
1302 std r3,VCPU_SLB_V(r7)
1303 addi r7,r7,VCPU_SLB_SIZE
1304 addi r5,r5,1
13052: addi r6,r6,1
1306 bdnz 1b
1307 stw r5,VCPU_SLB_MAX(r9)
1308
1309 /*
1310 * Save the guest PURR/SPURR
1311 */
1312 mfspr r5,SPRN_PURR
1313 mfspr r6,SPRN_SPURR
1314 ld r7,VCPU_PURR(r9)
1315 ld r8,VCPU_SPURR(r9)
1316 std r5,VCPU_PURR(r9)
1317 std r6,VCPU_SPURR(r9)
1318 subf r5,r7,r5
1319 subf r6,r8,r6
1320
1321 /*
1322 * Restore host PURR/SPURR and add guest times
1323 * so that the time in the guest gets accounted.
1324 */
1325 ld r3,HSTATE_PURR(r13)
1326 ld r4,HSTATE_SPURR(r13)
1327 add r3,r3,r5
1328 add r4,r4,r6
1329 mtspr SPRN_PURR,r3
1330 mtspr SPRN_SPURR,r4
1331
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001332 /* Save DEC */
1333 mfspr r5,SPRN_DEC
1334 mftb r6
1335 extsw r5,r5
1336 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001337 /* r5 is a guest timebase value here, convert to host TB */
1338 ld r3,HSTATE_KVM_VCORE(r13)
1339 ld r4,VCORE_TB_OFFSET(r3)
1340 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001341 std r5,VCPU_DEC_EXPIRES(r9)
1342
Michael Neulingb005255e2014-01-08 21:25:21 +11001343BEGIN_FTR_SECTION
1344 b 8f
1345END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001346 /* Save POWER8-specific registers */
1347 mfspr r5, SPRN_IAMR
1348 mfspr r6, SPRN_PSPB
1349 mfspr r7, SPRN_FSCR
1350 std r5, VCPU_IAMR(r9)
1351 stw r6, VCPU_PSPB(r9)
1352 std r7, VCPU_FSCR(r9)
1353 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001354 mfspr r7, SPRN_TAR
1355 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001356 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001357 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001358 std r8, VCPU_EBBHR(r9)
1359 mfspr r5, SPRN_EBBRR
1360 mfspr r6, SPRN_BESCR
1361 mfspr r7, SPRN_CSIGR
1362 mfspr r8, SPRN_TACR
1363 std r5, VCPU_EBBRR(r9)
1364 std r6, VCPU_BESCR(r9)
1365 std r7, VCPU_CSIGR(r9)
1366 std r8, VCPU_TACR(r9)
1367 mfspr r5, SPRN_TCSCR
1368 mfspr r6, SPRN_ACOP
1369 mfspr r7, SPRN_PID
1370 mfspr r8, SPRN_WORT
1371 std r5, VCPU_TCSCR(r9)
1372 std r6, VCPU_ACOP(r9)
1373 stw r7, VCPU_GUEST_PID(r9)
1374 std r8, VCPU_WORT(r9)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001375 /*
1376 * Restore various registers to 0, where non-zero values
1377 * set by the guest could disrupt the host.
1378 */
1379 li r0, 0
1380 mtspr SPRN_IAMR, r0
Paul Mackerrase5cd34d2017-06-15 15:43:17 +10001381 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001382 mtspr SPRN_TCSCR, r0
1383 mtspr SPRN_WORT, r0
1384 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1385 li r0, 1
1386 sldi r0, r0, 31
1387 mtspr SPRN_MMCRS, r0
Michael Neulingb005255e2014-01-08 21:25:21 +110013888:
1389
Paul Mackerrasde56a942011-06-29 00:21:34 +00001390 /* Save and reset AMR and UAMOR before turning on the MMU */
1391 mfspr r5,SPRN_AMR
1392 mfspr r6,SPRN_UAMOR
1393 std r5,VCPU_AMR(r9)
1394 std r6,VCPU_UAMOR(r9)
1395 li r6,0
1396 mtspr SPRN_AMR,r6
Paul Mackerrase5cd34d2017-06-15 15:43:17 +10001397 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001398
Paul Mackerrasde56a942011-06-29 00:21:34 +00001399 /* Switch DSCR back to host value */
1400 mfspr r8, SPRN_DSCR
1401 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001402 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001403 mtspr SPRN_DSCR, r7
1404
1405 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001406 std r14, VCPU_GPR(R14)(r9)
1407 std r15, VCPU_GPR(R15)(r9)
1408 std r16, VCPU_GPR(R16)(r9)
1409 std r17, VCPU_GPR(R17)(r9)
1410 std r18, VCPU_GPR(R18)(r9)
1411 std r19, VCPU_GPR(R19)(r9)
1412 std r20, VCPU_GPR(R20)(r9)
1413 std r21, VCPU_GPR(R21)(r9)
1414 std r22, VCPU_GPR(R22)(r9)
1415 std r23, VCPU_GPR(R23)(r9)
1416 std r24, VCPU_GPR(R24)(r9)
1417 std r25, VCPU_GPR(R25)(r9)
1418 std r26, VCPU_GPR(R26)(r9)
1419 std r27, VCPU_GPR(R27)(r9)
1420 std r28, VCPU_GPR(R28)(r9)
1421 std r29, VCPU_GPR(R29)(r9)
1422 std r30, VCPU_GPR(R30)(r9)
1423 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001424
1425 /* Save SPRGs */
1426 mfspr r3, SPRN_SPRG0
1427 mfspr r4, SPRN_SPRG1
1428 mfspr r5, SPRN_SPRG2
1429 mfspr r6, SPRN_SPRG3
1430 std r3, VCPU_SPRG0(r9)
1431 std r4, VCPU_SPRG1(r9)
1432 std r5, VCPU_SPRG2(r9)
1433 std r6, VCPU_SPRG3(r9)
1434
Paul Mackerras89436332012-03-02 01:38:23 +00001435 /* save FP state */
1436 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001437 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001438
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001439#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1440BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001441 bl kvmppc_save_tm
1442END_FTR_SECTION_IFSET(CPU_FTR_TM)
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001443#endif
1444
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001445 /* Increment yield count if they have a VPA */
1446 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1447 cmpdi r8, 0
1448 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001449 li r4, LPPACA_YIELDCOUNT
1450 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001451 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001452 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001453 li r3, 1
1454 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000145525:
1456 /* Save PMU registers if requested */
1457 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001458BEGIN_FTR_SECTION
1459 /*
1460 * POWER8 seems to have a hardware bug where setting
1461 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1462 * when some counters are already negative doesn't seem
1463 * to cause a performance monitor alert (and hence interrupt).
1464 * The effect of this is that when saving the PMU state,
1465 * if there is no PMU alert pending when we read MMCR0
1466 * before freezing the counters, but one becomes pending
1467 * before we read the counters, we lose it.
1468 * To work around this, we need a way to freeze the counters
1469 * before reading MMCR0. Normally, freezing the counters
1470 * is done by writing MMCR0 (to set MMCR0[FC]) which
1471 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1472 * we can also freeze the counters using MMCR2, by writing
1473 * 1s to all the counter freeze condition bits (there are
1474 * 9 bits each for 6 counters).
1475 */
1476 li r3, -1 /* set all freeze bits */
1477 clrrdi r3, r3, 10
1478 mfspr r10, SPRN_MMCR2
1479 mtspr SPRN_MMCR2, r3
1480 isync
1481END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001482 li r3, 1
1483 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1484 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1485 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001486 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001487 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001488 li r7, 0
1489 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001490 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001491 beq 21f /* if no VPA, save PMU stuff anyway */
1492 lbz r7, LPPACA_PMCINUSE(r8)
1493 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1494 bne 21f
1495 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1496 b 22f
149721: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001498 mfspr r7, SPRN_SIAR
1499 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001500 std r4, VCPU_MMCR(r9)
1501 std r5, VCPU_MMCR + 8(r9)
1502 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001503BEGIN_FTR_SECTION
1504 std r10, VCPU_MMCR + 24(r9)
1505END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001506 std r7, VCPU_SIAR(r9)
1507 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001508 mfspr r3, SPRN_PMC1
1509 mfspr r4, SPRN_PMC2
1510 mfspr r5, SPRN_PMC3
1511 mfspr r6, SPRN_PMC4
1512 mfspr r7, SPRN_PMC5
1513 mfspr r8, SPRN_PMC6
1514 stw r3, VCPU_PMC(r9)
1515 stw r4, VCPU_PMC + 4(r9)
1516 stw r5, VCPU_PMC + 8(r9)
1517 stw r6, VCPU_PMC + 12(r9)
1518 stw r7, VCPU_PMC + 16(r9)
1519 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001520BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001521 mfspr r5, SPRN_SIER
1522 mfspr r6, SPRN_SPMC1
1523 mfspr r7, SPRN_SPMC2
1524 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001525 std r5, VCPU_SIER(r9)
1526 stw r6, VCPU_PMC + 24(r9)
1527 stw r7, VCPU_PMC + 28(r9)
1528 std r8, VCPU_MMCR + 32(r9)
1529 lis r4, 0x8000
1530 mtspr SPRN_MMCRS, r4
1531END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000153222:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001533 /* Clear out SLB */
1534 li r5,0
1535 slbmte r5,r5
1536 slbia
1537 ptesync
1538
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +10001539 /* Restore host values of some registers */
1540BEGIN_FTR_SECTION
1541 ld r5, STACK_SLOT_CIABR(r1)
1542 ld r6, STACK_SLOT_DAWR(r1)
1543 ld r7, STACK_SLOT_DAWRX(r1)
1544 mtspr SPRN_CIABR, r5
1545 mtspr SPRN_DAWR, r6
1546 mtspr SPRN_DAWRX, r7
1547END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1548
Paul Mackerrasde56a942011-06-29 00:21:34 +00001549 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001550 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001551 * We don't have to lock against tlbies but we do
1552 * have to coordinate the hardware threads.
1553 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001554kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001555 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001556 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001557 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1558 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001559 cmpwi r3,0
1560 beq 15f
1561 HMT_LOW
156213: lbz r3,VCORE_IN_GUEST(r5)
1563 cmpwi r3,0
1564 bne 13b
1565 HMT_MEDIUM
1566 b 16f
1567
1568 /* Primary thread waits for all the secondaries to exit guest */
156915: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001570 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001571 clrldi r3,r3,56
1572 cmpw r3,r0
1573 bne 15b
1574 isync
1575
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001576 /* Did we actually switch to the guest at all? */
1577 lbz r6, VCORE_IN_GUEST(r5)
1578 cmpwi r6, 0
1579 beq 19f
1580
Paul Mackerrasde56a942011-06-29 00:21:34 +00001581 /* Primary thread switches back to host partition */
1582 ld r6,KVM_HOST_SDR1(r4)
1583 lwz r7,KVM_HOST_LPID(r4)
1584 li r8,LPID_RSVD /* switch to reserved LPID */
1585 mtspr SPRN_LPID,r8
1586 ptesync
1587 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1588 mtspr SPRN_LPID,r7
1589 isync
1590
Michael Neulingb005255e2014-01-08 21:25:21 +11001591BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001592 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001593 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001594 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001595 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001596 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001597 /* clear DPDES so we don't get guest doorbells in the host */
1598 li r8, 0
1599 mtspr SPRN_DPDES, r8
1600END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1601
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301602 /* If HMI, call kvmppc_realmode_hmi_handler() */
1603 cmpwi r12, BOOK3S_INTERRUPT_HMI
1604 bne 27f
1605 bl kvmppc_realmode_hmi_handler
1606 nop
1607 li r12, BOOK3S_INTERRUPT_HMI
1608 /*
1609 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1610 * the TB. Hence it is not required to subtract guest timebase
1611 * offset from timebase. So, skip it.
1612 *
1613 * Also, do not call kvmppc_subcore_exit_guest() because it has
1614 * been invoked as part of kvmppc_realmode_hmi_handler().
1615 */
1616 b 30f
1617
161827:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001619 /* Subtract timebase offset from timebase */
1620 ld r8,VCORE_TB_OFFSET(r5)
1621 cmpdi r8,0
1622 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001623 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001624 subf r8,r8,r6
1625 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1626 mftb r7 /* check if lower 24 bits overflowed */
1627 clrldi r6,r6,40
1628 clrldi r7,r7,40
1629 cmpld r7,r6
1630 bge 17f
1631 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1632 mtspr SPRN_TBU40,r8
1633
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530163417: bl kvmppc_subcore_exit_guest
1635 nop
163630: ld r5,HSTATE_KVM_VCORE(r13)
1637 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1638
Paul Mackerrasde56a942011-06-29 00:21:34 +00001639 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301640 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001641 cmpdi r0, 0
1642 beq 18f
1643 li r0, 0
1644 mtspr SPRN_PCR, r0
164518:
1646 /* Signal secondary CPUs to continue */
1647 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000164819: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001649 mtspr SPRN_HDEC,r8
1650
165116: ld r8,KVM_HOST_LPCR(r4)
1652 mtspr SPRN_LPCR,r8
1653 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001654
1655 /* load host SLB entries */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001656 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001657
1658 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001659 li r3, SLBSHADOW_SAVEAREA
1660 LDX_BE r5, r8, r3
1661 addi r3, r3, 8
1662 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001663 andis. r7,r5,SLB_ESID_V@h
1664 beq 1f
1665 slbmte r6,r5
16661: addi r8,r8,16
1667 .endr
1668
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001669#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1670 /* Finish timing, if we have a vcpu */
1671 ld r4, HSTATE_KVM_VCPU(r13)
1672 cmpdi r4, 0
1673 li r3, 0
1674 beq 2f
1675 bl kvmhv_accumulate_time
16762:
1677#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001678 /* Unset guest mode */
1679 li r0, KVM_GUEST_MODE_NONE
1680 stb r0, HSTATE_IN_GUEST(r13)
1681
Paul Mackerrasc39c3ae2017-06-16 11:53:19 +10001682 ld r0, SFS+PPC_LR_STKOFF(r1)
1683 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10001684 mtlr r0
1685 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001686
Paul Mackerras697d3892011-12-12 12:36:37 +00001687/*
1688 * Check whether an HDSI is an HPTE not found fault or something else.
1689 * If it is an HPTE not found fault that is due to the guest accessing
1690 * a page that they have mapped but which we have paged out, then
1691 * we continue on with the guest exit path. In all other cases,
1692 * reflect the HDSI to the guest as a DSI.
1693 */
1694kvmppc_hdsi:
1695 mfspr r4, SPRN_HDAR
1696 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001697 /* HPTE not found fault or protection fault? */
1698 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001699 beq 1f /* if not, send it to the guest */
1700 andi. r0, r11, MSR_DR /* data relocation enabled? */
1701 beq 3f
1702 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001703 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001704 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1705 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000017064: std r4, VCPU_FAULT_DAR(r9)
1707 stw r6, VCPU_FAULT_DSISR(r9)
1708
1709 /* Search the hash table. */
1710 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001711 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001712 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001713 ld r9, HSTATE_KVM_VCPU(r13)
1714 ld r10, VCPU_PC(r9)
1715 ld r11, VCPU_MSR(r9)
1716 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1717 cmpdi r3, 0 /* retry the instruction */
1718 beq 6f
1719 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001720 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001721 cmpdi r3, -2 /* MMIO emulation; need instr word */
1722 beq 2f
1723
Paul Mackerrascf29b212015-10-27 16:10:20 +11001724 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001725 ld r4, VCPU_FAULT_DAR(r9)
1726 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110017271: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001728 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110017297: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001730 mtspr SPRN_SRR0, r10
1731 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001732 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001733 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001734fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000017356: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001736 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001737 mtctr r7
1738 mtxer r8
1739 mr r4, r9
1740 b fast_guest_return
1741
17423: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1743 ld r5, KVM_VRMA_SLB_V(r5)
1744 b 4b
1745
1746 /* If this is for emulated MMIO, load the instruction word */
17472: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1748
1749 /* Set guest mode to 'jump over instruction' so if lwz faults
1750 * we'll just continue at the next IP. */
1751 li r0, KVM_GUEST_MODE_SKIP
1752 stb r0, HSTATE_IN_GUEST(r13)
1753
1754 /* Do the access with MSR:DR enabled */
1755 mfmsr r3
1756 ori r4, r3, MSR_DR /* Enable paging for data */
1757 mtmsrd r4
1758 lwz r8, 0(r10)
1759 mtmsrd r3
1760
1761 /* Store the result */
1762 stw r8, VCPU_LAST_INST(r9)
1763
1764 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001765 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001766 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001767 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001768
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001769/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001770 * Similarly for an HISI, reflect it to the guest as an ISI unless
1771 * it is an HPTE not found fault for a page that we have paged out.
1772 */
1773kvmppc_hisi:
1774 andis. r0, r11, SRR1_ISI_NOPT@h
1775 beq 1f
1776 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1777 beq 3f
1778 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001779 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001780 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1781 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000017824:
1783 /* Search the hash table. */
1784 mr r3, r9 /* vcpu pointer */
1785 mr r4, r10
1786 mr r6, r11
1787 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001788 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001789 ld r9, HSTATE_KVM_VCPU(r13)
1790 ld r10, VCPU_PC(r9)
1791 ld r11, VCPU_MSR(r9)
1792 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1793 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001794 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001795 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001796 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001797
Paul Mackerrascf29b212015-10-27 16:10:20 +11001798 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001799 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110018001: li r0, BOOK3S_INTERRUPT_INST_STORAGE
18017: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00001802 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001803 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001804 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001805 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001806
18073: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1808 ld r5, KVM_VRMA_SLB_V(r6)
1809 b 4b
1810
1811/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001812 * Try to handle an hcall in real mode.
1813 * Returns to the guest if we handle it, or continues on up to
1814 * the kernel if we can't (i.e. if we don't have a handler for
1815 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001816 *
1817 * r5 - r8 contain hcall args,
1818 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001819 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001820hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001821 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001822 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001823 /* sc 1 from userspace - reflect to guest syscall */
1824 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001825 clrrdi r3,r3,2
1826 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001827 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001828 /* See if this hcall is enabled for in-kernel handling */
1829 ld r4, VCPU_KVM(r9)
1830 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1831 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1832 add r4, r4, r0
1833 ld r0, KVM_ENABLED_HCALLS(r4)
1834 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1835 srd r0, r0, r4
1836 andi. r0, r0, 1
1837 beq guest_exit_cont
1838 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001839 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001840 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001841 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001842 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001843 add r12,r3,r4
1844 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001845 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001846 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001847 bctrl
1848 cmpdi r3,H_TOO_HARD
1849 beq hcall_real_fallback
1850 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001851 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001852 ld r10,VCPU_PC(r4)
1853 ld r11,VCPU_MSR(r4)
1854 b fast_guest_return
1855
Liu Ping Fan27025a62013-11-19 14:12:48 +08001856sc_1_fast_return:
1857 mtspr SPRN_SRR0,r10
1858 mtspr SPRN_SRR1,r11
1859 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001860 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001861 mr r4,r9
1862 b fast_guest_return
1863
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001864 /* We've attempted a real mode hcall, but it's punted it back
1865 * to userspace. We need to restore some clobbered volatiles
1866 * before resuming the pass-it-to-qemu path */
1867hcall_real_fallback:
1868 li r12,BOOK3S_INTERRUPT_SYSCALL
1869 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001870
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001871 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001872
1873 .globl hcall_real_table
1874hcall_real_table:
1875 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001876 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1877 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1878 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10001879 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1880 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001881 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1882 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001883 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001884 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001885 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001886 .long 0 /* 0x2c */
1887 .long 0 /* 0x30 */
1888 .long 0 /* 0x34 */
1889 .long 0 /* 0x38 */
1890 .long 0 /* 0x3c */
1891 .long 0 /* 0x40 */
1892 .long 0 /* 0x44 */
1893 .long 0 /* 0x48 */
1894 .long 0 /* 0x4c */
1895 .long 0 /* 0x50 */
1896 .long 0 /* 0x54 */
1897 .long 0 /* 0x58 */
1898 .long 0 /* 0x5c */
1899 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001900#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001901 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1902 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1903 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001904 .long 0 /* 0x70 - H_IPOLL */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001905 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001906#else
1907 .long 0 /* 0x64 - H_EOI */
1908 .long 0 /* 0x68 - H_CPPR */
1909 .long 0 /* 0x6c - H_IPI */
1910 .long 0 /* 0x70 - H_IPOLL */
1911 .long 0 /* 0x74 - H_XIRR */
1912#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001913 .long 0 /* 0x78 */
1914 .long 0 /* 0x7c */
1915 .long 0 /* 0x80 */
1916 .long 0 /* 0x84 */
1917 .long 0 /* 0x88 */
1918 .long 0 /* 0x8c */
1919 .long 0 /* 0x90 */
1920 .long 0 /* 0x94 */
1921 .long 0 /* 0x98 */
1922 .long 0 /* 0x9c */
1923 .long 0 /* 0xa0 */
1924 .long 0 /* 0xa4 */
1925 .long 0 /* 0xa8 */
1926 .long 0 /* 0xac */
1927 .long 0 /* 0xb0 */
1928 .long 0 /* 0xb4 */
1929 .long 0 /* 0xb8 */
1930 .long 0 /* 0xbc */
1931 .long 0 /* 0xc0 */
1932 .long 0 /* 0xc4 */
1933 .long 0 /* 0xc8 */
1934 .long 0 /* 0xcc */
1935 .long 0 /* 0xd0 */
1936 .long 0 /* 0xd4 */
1937 .long 0 /* 0xd8 */
1938 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001939 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11001940 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001941 .long 0 /* 0xe8 */
1942 .long 0 /* 0xec */
1943 .long 0 /* 0xf0 */
1944 .long 0 /* 0xf4 */
1945 .long 0 /* 0xf8 */
1946 .long 0 /* 0xfc */
1947 .long 0 /* 0x100 */
1948 .long 0 /* 0x104 */
1949 .long 0 /* 0x108 */
1950 .long 0 /* 0x10c */
1951 .long 0 /* 0x110 */
1952 .long 0 /* 0x114 */
1953 .long 0 /* 0x118 */
1954 .long 0 /* 0x11c */
1955 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001956 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001957 .long 0 /* 0x128 */
1958 .long 0 /* 0x12c */
1959 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001960 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001961 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11001962 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11001963 .long 0 /* 0x140 */
1964 .long 0 /* 0x144 */
1965 .long 0 /* 0x148 */
1966 .long 0 /* 0x14c */
1967 .long 0 /* 0x150 */
1968 .long 0 /* 0x154 */
1969 .long 0 /* 0x158 */
1970 .long 0 /* 0x15c */
1971 .long 0 /* 0x160 */
1972 .long 0 /* 0x164 */
1973 .long 0 /* 0x168 */
1974 .long 0 /* 0x16c */
1975 .long 0 /* 0x170 */
1976 .long 0 /* 0x174 */
1977 .long 0 /* 0x178 */
1978 .long 0 /* 0x17c */
1979 .long 0 /* 0x180 */
1980 .long 0 /* 0x184 */
1981 .long 0 /* 0x188 */
1982 .long 0 /* 0x18c */
1983 .long 0 /* 0x190 */
1984 .long 0 /* 0x194 */
1985 .long 0 /* 0x198 */
1986 .long 0 /* 0x19c */
1987 .long 0 /* 0x1a0 */
1988 .long 0 /* 0x1a4 */
1989 .long 0 /* 0x1a8 */
1990 .long 0 /* 0x1ac */
1991 .long 0 /* 0x1b0 */
1992 .long 0 /* 0x1b4 */
1993 .long 0 /* 0x1b8 */
1994 .long 0 /* 0x1bc */
1995 .long 0 /* 0x1c0 */
1996 .long 0 /* 0x1c4 */
1997 .long 0 /* 0x1c8 */
1998 .long 0 /* 0x1cc */
1999 .long 0 /* 0x1d0 */
2000 .long 0 /* 0x1d4 */
2001 .long 0 /* 0x1d8 */
2002 .long 0 /* 0x1dc */
2003 .long 0 /* 0x1e0 */
2004 .long 0 /* 0x1e4 */
2005 .long 0 /* 0x1e8 */
2006 .long 0 /* 0x1ec */
2007 .long 0 /* 0x1f0 */
2008 .long 0 /* 0x1f4 */
2009 .long 0 /* 0x1f8 */
2010 .long 0 /* 0x1fc */
2011 .long 0 /* 0x200 */
2012 .long 0 /* 0x204 */
2013 .long 0 /* 0x208 */
2014 .long 0 /* 0x20c */
2015 .long 0 /* 0x210 */
2016 .long 0 /* 0x214 */
2017 .long 0 /* 0x218 */
2018 .long 0 /* 0x21c */
2019 .long 0 /* 0x220 */
2020 .long 0 /* 0x224 */
2021 .long 0 /* 0x228 */
2022 .long 0 /* 0x22c */
2023 .long 0 /* 0x230 */
2024 .long 0 /* 0x234 */
2025 .long 0 /* 0x238 */
2026 .long 0 /* 0x23c */
2027 .long 0 /* 0x240 */
2028 .long 0 /* 0x244 */
2029 .long 0 /* 0x248 */
2030 .long 0 /* 0x24c */
2031 .long 0 /* 0x250 */
2032 .long 0 /* 0x254 */
2033 .long 0 /* 0x258 */
2034 .long 0 /* 0x25c */
2035 .long 0 /* 0x260 */
2036 .long 0 /* 0x264 */
2037 .long 0 /* 0x268 */
2038 .long 0 /* 0x26c */
2039 .long 0 /* 0x270 */
2040 .long 0 /* 0x274 */
2041 .long 0 /* 0x278 */
2042 .long 0 /* 0x27c */
2043 .long 0 /* 0x280 */
2044 .long 0 /* 0x284 */
2045 .long 0 /* 0x288 */
2046 .long 0 /* 0x28c */
2047 .long 0 /* 0x290 */
2048 .long 0 /* 0x294 */
2049 .long 0 /* 0x298 */
2050 .long 0 /* 0x29c */
2051 .long 0 /* 0x2a0 */
2052 .long 0 /* 0x2a4 */
2053 .long 0 /* 0x2a8 */
2054 .long 0 /* 0x2ac */
2055 .long 0 /* 0x2b0 */
2056 .long 0 /* 0x2b4 */
2057 .long 0 /* 0x2b8 */
2058 .long 0 /* 0x2bc */
2059 .long 0 /* 0x2c0 */
2060 .long 0 /* 0x2c4 */
2061 .long 0 /* 0x2c8 */
2062 .long 0 /* 0x2cc */
2063 .long 0 /* 0x2d0 */
2064 .long 0 /* 0x2d4 */
2065 .long 0 /* 0x2d8 */
2066 .long 0 /* 0x2dc */
2067 .long 0 /* 0x2e0 */
2068 .long 0 /* 0x2e4 */
2069 .long 0 /* 0x2e8 */
2070 .long 0 /* 0x2ec */
2071 .long 0 /* 0x2f0 */
2072 .long 0 /* 0x2f4 */
2073 .long 0 /* 0x2f8 */
2074 .long 0 /* 0x2fc */
2075 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002076 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002077hcall_real_table_end:
2078
Paul Mackerras8563bf52014-01-08 21:25:29 +11002079_GLOBAL(kvmppc_h_set_xdabr)
2080 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2081 beq 6f
2082 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2083 andc. r0, r5, r0
2084 beq 3f
20856: li r3, H_PARAMETER
2086 blr
2087
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002088_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002089 li r5, DABRX_USER | DABRX_KERNEL
20903:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002091BEGIN_FTR_SECTION
2092 b 2f
2093END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002094 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002095 stw r5, VCPU_DABRX(r3)
2096 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002097 /* Work around P7 bug where DABR can get corrupted on mtspr */
20981: mtspr SPRN_DABR,r4
2099 mfspr r5, SPRN_DABR
2100 cmpd r4, r5
2101 bne 1b
2102 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002103 li r3,0
2104 blr
2105
Paul Mackerras8563bf52014-01-08 21:25:29 +11002106 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
21072: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002108 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002109 clrrdi r4, r4, 3
2110 std r4, VCPU_DAWR(r3)
2111 std r5, VCPU_DAWRX(r3)
2112 mtspr SPRN_DAWR, r4
2113 mtspr SPRN_DAWRX, r5
2114 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002115 blr
2116
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002117_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002118 ori r11,r11,MSR_EE
2119 std r11,VCPU_MSR(r3)
2120 li r0,1
2121 stb r0,VCPU_CEDED(r3)
2122 sync /* order setting ceded vs. testing prodded */
2123 lbz r5,VCPU_PRODDED(r3)
2124 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002125 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002126 li r12,0 /* set trap to 0 to say hcall is handled */
2127 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002128 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002129 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002130
2131 /*
2132 * Set our bit in the bitmask of napping threads unless all the
2133 * other threads are already napping, in which case we send this
2134 * up to the host.
2135 */
2136 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002137 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002138 lwz r8,VCORE_ENTRY_EXIT(r5)
2139 clrldi r8,r8,56
2140 li r0,1
2141 sld r0,r0,r6
2142 addi r6,r5,VCORE_NAPPING_THREADS
214331: lwarx r4,0,r6
2144 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002145 cmpw r4,r8
2146 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002147 stwcx. r4,0,r6
2148 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002149 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002150 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002151 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002152 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002153 lwz r7,VCORE_ENTRY_EXIT(r5)
2154 cmpwi r7,0x100
2155 bge 33f /* another thread already exiting */
2156
2157/*
2158 * Although not specifically required by the architecture, POWER7
2159 * preserves the following registers in nap mode, even if an SMT mode
2160 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2161 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2162 */
2163 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002164 std r14, VCPU_GPR(R14)(r3)
2165 std r15, VCPU_GPR(R15)(r3)
2166 std r16, VCPU_GPR(R16)(r3)
2167 std r17, VCPU_GPR(R17)(r3)
2168 std r18, VCPU_GPR(R18)(r3)
2169 std r19, VCPU_GPR(R19)(r3)
2170 std r20, VCPU_GPR(R20)(r3)
2171 std r21, VCPU_GPR(R21)(r3)
2172 std r22, VCPU_GPR(R22)(r3)
2173 std r23, VCPU_GPR(R23)(r3)
2174 std r24, VCPU_GPR(R24)(r3)
2175 std r25, VCPU_GPR(R25)(r3)
2176 std r26, VCPU_GPR(R26)(r3)
2177 std r27, VCPU_GPR(R27)(r3)
2178 std r28, VCPU_GPR(R28)(r3)
2179 std r29, VCPU_GPR(R29)(r3)
2180 std r30, VCPU_GPR(R30)(r3)
2181 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002182
2183 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002184 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002185
Paul Mackerras93d17392016-06-22 15:52:55 +10002186#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2187BEGIN_FTR_SECTION
2188 ld r9, HSTATE_KVM_VCPU(r13)
2189 bl kvmppc_save_tm
2190END_FTR_SECTION_IFSET(CPU_FTR_TM)
2191#endif
2192
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002193 /*
2194 * Set DEC to the smaller of DEC and HDEC, so that we wake
2195 * no later than the end of our timeslice (HDEC interrupts
2196 * don't wake us from nap).
2197 */
2198 mfspr r3, SPRN_DEC
2199 mfspr r4, SPRN_HDEC
2200 mftb r5
2201 cmpw r3, r4
2202 ble 67f
2203 mtspr SPRN_DEC, r4
220467:
2205 /* save expiry time of guest decrementer */
2206 extsw r3, r3
2207 add r3, r3, r5
2208 ld r4, HSTATE_KVM_VCPU(r13)
2209 ld r5, HSTATE_KVM_VCORE(r13)
2210 ld r6, VCORE_TB_OFFSET(r5)
2211 subf r3, r6, r3 /* convert to host TB value */
2212 std r3, VCPU_DEC_EXPIRES(r4)
2213
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002214#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2215 ld r4, HSTATE_KVM_VCPU(r13)
2216 addi r3, r4, VCPU_TB_CEDE
2217 bl kvmhv_accumulate_time
2218#endif
2219
Paul Mackerrasccc07772015-03-28 14:21:07 +11002220 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2221
Paul Mackerras19ccb762011-07-23 17:42:46 +10002222 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002223 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002224 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002225 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002226 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002227 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002228kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002229 mfspr r0, SPRN_CTRLF
2230 clrrdi r0, r0, 1
2231 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302232
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002233 li r0,1
2234 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002235 mfspr r5,SPRN_LPCR
2236 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002237BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002238 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002239 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002240END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002241 mtspr SPRN_LPCR,r5
2242 isync
2243 li r0, 0
2244 std r0, HSTATE_SCRATCH0(r13)
2245 ptesync
2246 ld r0, HSTATE_SCRATCH0(r13)
22471: cmpd r0, r0
2248 bne 1b
2249 nap
2250 b .
2251
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100225233: mr r4, r3
2253 li r3, 0
2254 li r12, 0
2255 b 34f
2256
Paul Mackerras19ccb762011-07-23 17:42:46 +10002257kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002258 /* get vcpu pointer */
2259 ld r4, HSTATE_KVM_VCPU(r13)
2260
Paul Mackerras19ccb762011-07-23 17:42:46 +10002261 /* Woken by external or decrementer interrupt */
2262 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002263
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002264#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2265 addi r3, r4, VCPU_TB_RMINTR
2266 bl kvmhv_accumulate_time
2267#endif
2268
Paul Mackerras93d17392016-06-22 15:52:55 +10002269#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2270BEGIN_FTR_SECTION
2271 bl kvmppc_restore_tm
2272END_FTR_SECTION_IFSET(CPU_FTR_TM)
2273#endif
2274
Paul Mackerras19ccb762011-07-23 17:42:46 +10002275 /* load up FP state */
2276 bl kvmppc_load_fp
2277
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002278 /* Restore guest decrementer */
2279 ld r3, VCPU_DEC_EXPIRES(r4)
2280 ld r5, HSTATE_KVM_VCORE(r13)
2281 ld r6, VCORE_TB_OFFSET(r5)
2282 add r3, r3, r6 /* convert host TB to guest TB value */
2283 mftb r7
2284 subf r3, r7, r3
2285 mtspr SPRN_DEC, r3
2286
Paul Mackerras19ccb762011-07-23 17:42:46 +10002287 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002288 ld r14, VCPU_GPR(R14)(r4)
2289 ld r15, VCPU_GPR(R15)(r4)
2290 ld r16, VCPU_GPR(R16)(r4)
2291 ld r17, VCPU_GPR(R17)(r4)
2292 ld r18, VCPU_GPR(R18)(r4)
2293 ld r19, VCPU_GPR(R19)(r4)
2294 ld r20, VCPU_GPR(R20)(r4)
2295 ld r21, VCPU_GPR(R21)(r4)
2296 ld r22, VCPU_GPR(R22)(r4)
2297 ld r23, VCPU_GPR(R23)(r4)
2298 ld r24, VCPU_GPR(R24)(r4)
2299 ld r25, VCPU_GPR(R25)(r4)
2300 ld r26, VCPU_GPR(R26)(r4)
2301 ld r27, VCPU_GPR(R27)(r4)
2302 ld r28, VCPU_GPR(R28)(r4)
2303 ld r29, VCPU_GPR(R29)(r4)
2304 ld r30, VCPU_GPR(R30)(r4)
2305 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002306
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002307 /* Check the wake reason in SRR1 to see why we got here */
2308 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002309
Suresh Warrier37f55d32016-08-19 15:35:46 +10002310 /*
2311 * Restore volatile registers since we could have called a
2312 * C routine in kvmppc_check_wake_reason
2313 * r4 = VCPU
2314 * r3 tells us whether we need to return to host or not
2315 * WARNING: it gets checked further down:
2316 * should not modify r3 until this check is done.
2317 */
2318 ld r4, HSTATE_KVM_VCPU(r13)
2319
Paul Mackerras19ccb762011-07-23 17:42:46 +10002320 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100232134: ld r5,HSTATE_KVM_VCORE(r13)
2322 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002323 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002324 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002325 addi r6,r5,VCORE_NAPPING_THREADS
232632: lwarx r7,0,r6
2327 andc r7,r7,r0
2328 stwcx. r7,0,r6
2329 bne 32b
2330 li r0,0
2331 stb r0,HSTATE_NAPPING(r13)
2332
Suresh Warrier37f55d32016-08-19 15:35:46 +10002333 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002334 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002335 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002336 cmpdi r3, 0
2337 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002338
Paul Mackerras19ccb762011-07-23 17:42:46 +10002339 /* see if any other thread is already exiting */
2340 lwz r0,VCORE_ENTRY_EXIT(r5)
2341 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002342 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002343
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002344 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002345
2346 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002347kvm_cede_prodded:
2348 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002349 stb r0,VCPU_PRODDED(r3)
2350 sync /* order testing prodded vs. clearing ceded */
2351 stb r0,VCPU_CEDED(r3)
2352 li r3,H_SUCCESS
2353 blr
2354
2355 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002356kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002357 ld r9, HSTATE_KVM_VCPU(r13)
2358 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002359
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002360 /* Try to handle a machine check in real mode */
2361machine_check_realmode:
2362 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002363 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002364 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002365 ld r9, HSTATE_KVM_VCPU(r13)
2366 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302367 /*
2368 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2369 * machine check interrupt (set HSRR0 to 0x200). And for handled
2370 * errors (no-fatal), just go back to guest execution with current
2371 * HSRR0 instead of exiting guest. This new approach will inject
2372 * machine check to guest for fatal error causing guest to crash.
2373 *
2374 * The old code used to return to host for unhandled errors which
2375 * was causing guest to hang with soft lockups inside guest and
2376 * makes it difficult to recover guest instance.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302377 *
2378 * if we receive machine check with MSR(RI=0) then deliver it to
2379 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302380 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302381 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002382 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2383 bne mc_cont /* if so, exit to host */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302384 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2385 beq 1f /* Deliver a machine check to guest */
2386 ld r10, VCPU_PC(r9)
2387 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302388 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002389 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053023901: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002391 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053023922: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002393
Paul Mackerrasde56a942011-06-29 00:21:34 +00002394/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002395 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002396 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002397 * 0 if nothing needs to be done
2398 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002399 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002400 * -2 if we handled a PCI passthrough interrupt (returned by
2401 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002402 *
2403 * Also sets r12 to the interrupt vector for any interrupt that needs
2404 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002405 * Modifies all volatile registers (since it may call a C function).
2406 * This routine calls kvmppc_read_intr, a C function, if an external
2407 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002408 */
2409kvmppc_check_wake_reason:
2410 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002411BEGIN_FTR_SECTION
2412 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2413FTR_SECTION_ELSE
2414 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2415ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2416 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002417 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002418 li r3, 0
2419 li r12, 0
2420 cmpwi r6, 6 /* was it the decrementer? */
2421 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002422BEGIN_FTR_SECTION
2423 cmpwi r6, 5 /* privileged doorbell? */
2424 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002425 cmpwi r6, 3 /* hypervisor doorbell? */
2426 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002427END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302428 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2429 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002430 li r3, 1 /* anything else, return 1 */
24310: blr
2432
Paul Mackerras5d00f662014-01-08 21:25:28 +11002433 /* hypervisor doorbell */
24343: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302435
2436 /*
2437 * Clear the doorbell as we will invoke the handler
2438 * explicitly in the guest exit path.
2439 */
2440 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2441 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002442 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002443 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002444 lbz r0, HSTATE_HOST_IPI(r13)
2445 cmpwi r0, 0
2446 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302447 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002448 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002449 blr
2450
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302451 /* Woken up due to Hypervisor maintenance interrupt */
24524: li r12, BOOK3S_INTERRUPT_HMI
2453 li r3, 1
2454 blr
2455
Suresh Warrier37f55d32016-08-19 15:35:46 +10002456 /* external interrupt - create a stack frame so we can call C */
24577: mflr r0
2458 std r0, PPC_LR_STKOFF(r1)
2459 stdu r1, -PPC_MIN_STKFRM(r1)
2460 bl kvmppc_read_intr
2461 nop
2462 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002463 cmpdi r3, 1
2464 ble 1f
2465
2466 /*
2467 * Return code of 2 means PCI passthrough interrupt, but
2468 * we need to return back to host to complete handling the
2469 * interrupt. Trap reason is expected in r12 by guest
2470 * exit code.
2471 */
2472 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
24731:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002474 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2475 addi r1, r1, PPC_MIN_STKFRM
2476 mtlr r0
2477 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002478
2479/*
2480 * Save away FP, VMX and VSX registers.
2481 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002482 * N.B. r30 and r31 are volatile across this function,
2483 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002484 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002485kvmppc_save_fp:
2486 mflr r30
2487 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002488 mfmsr r5
2489 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002490#ifdef CONFIG_ALTIVEC
2491BEGIN_FTR_SECTION
2492 oris r8,r8,MSR_VEC@h
2493END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2494#endif
2495#ifdef CONFIG_VSX
2496BEGIN_FTR_SECTION
2497 oris r8,r8,MSR_VSX@h
2498END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2499#endif
2500 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002501 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002502 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002503#ifdef CONFIG_ALTIVEC
2504BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002505 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002506 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002507END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2508#endif
2509 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002510 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002511 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002512 blr
2513
2514/*
2515 * Load up FP, VMX and VSX registers
2516 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002517 * N.B. r30 and r31 are volatile across this function,
2518 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002519 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002520kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002521 mflr r30
2522 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002523 mfmsr r9
2524 ori r8,r9,MSR_FP
2525#ifdef CONFIG_ALTIVEC
2526BEGIN_FTR_SECTION
2527 oris r8,r8,MSR_VEC@h
2528END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2529#endif
2530#ifdef CONFIG_VSX
2531BEGIN_FTR_SECTION
2532 oris r8,r8,MSR_VSX@h
2533END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2534#endif
2535 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002536 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002537 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002538#ifdef CONFIG_ALTIVEC
2539BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002540 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002541 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002542END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2543#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002544 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002545 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002546 mtlr r30
2547 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002548 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002549
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002550#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2551/*
2552 * Save transactional state and TM-related registers.
2553 * Called with r9 pointing to the vcpu struct.
2554 * This can modify all checkpointed registers, but
2555 * restores r1, r2 and r9 (vcpu pointer) before exit.
2556 */
2557kvmppc_save_tm:
2558 mflr r0
2559 std r0, PPC_LR_STKOFF(r1)
2560
2561 /* Turn on TM. */
2562 mfmsr r8
2563 li r0, 1
2564 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2565 mtmsrd r8
2566
2567 ld r5, VCPU_MSR(r9)
2568 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2569 beq 1f /* TM not active in guest. */
2570
2571 std r1, HSTATE_HOST_R1(r13)
2572 li r3, TM_CAUSE_KVM_RESCHED
2573
2574 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2575 li r5, 0
2576 mtmsrd r5, 1
2577
2578 /* All GPRs are volatile at this point. */
2579 TRECLAIM(R3)
2580
2581 /* Temporarily store r13 and r9 so we have some regs to play with */
2582 SET_SCRATCH0(r13)
2583 GET_PACA(r13)
2584 std r9, PACATMSCRATCH(r13)
2585 ld r9, HSTATE_KVM_VCPU(r13)
2586
2587 /* Get a few more GPRs free. */
2588 std r29, VCPU_GPRS_TM(29)(r9)
2589 std r30, VCPU_GPRS_TM(30)(r9)
2590 std r31, VCPU_GPRS_TM(31)(r9)
2591
2592 /* Save away PPR and DSCR soon so don't run with user values. */
2593 mfspr r31, SPRN_PPR
2594 HMT_MEDIUM
2595 mfspr r30, SPRN_DSCR
2596 ld r29, HSTATE_DSCR(r13)
2597 mtspr SPRN_DSCR, r29
2598
2599 /* Save all but r9, r13 & r29-r31 */
2600 reg = 0
2601 .rept 29
2602 .if (reg != 9) && (reg != 13)
2603 std reg, VCPU_GPRS_TM(reg)(r9)
2604 .endif
2605 reg = reg + 1
2606 .endr
2607 /* ... now save r13 */
2608 GET_SCRATCH0(r4)
2609 std r4, VCPU_GPRS_TM(13)(r9)
2610 /* ... and save r9 */
2611 ld r4, PACATMSCRATCH(r13)
2612 std r4, VCPU_GPRS_TM(9)(r9)
2613
2614 /* Reload stack pointer and TOC. */
2615 ld r1, HSTATE_HOST_R1(r13)
2616 ld r2, PACATOC(r13)
2617
2618 /* Set MSR RI now we have r1 and r13 back. */
2619 li r5, MSR_RI
2620 mtmsrd r5, 1
2621
2622 /* Save away checkpinted SPRs. */
2623 std r31, VCPU_PPR_TM(r9)
2624 std r30, VCPU_DSCR_TM(r9)
2625 mflr r5
2626 mfcr r6
2627 mfctr r7
2628 mfspr r8, SPRN_AMR
2629 mfspr r10, SPRN_TAR
Paul Mackerras75b10532016-11-07 15:09:58 +11002630 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002631 std r5, VCPU_LR_TM(r9)
2632 stw r6, VCPU_CR_TM(r9)
2633 std r7, VCPU_CTR_TM(r9)
2634 std r8, VCPU_AMR_TM(r9)
2635 std r10, VCPU_TAR_TM(r9)
Paul Mackerras75b10532016-11-07 15:09:58 +11002636 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002637
2638 /* Restore r12 as trap number. */
2639 lwz r12, VCPU_TRAP(r9)
2640
2641 /* Save FP/VSX. */
2642 addi r3, r9, VCPU_FPRS_TM
2643 bl store_fp_state
2644 addi r3, r9, VCPU_VRS_TM
2645 bl store_vr_state
2646 mfspr r6, SPRN_VRSAVE
2647 stw r6, VCPU_VRSAVE_TM(r9)
26481:
2649 /*
2650 * We need to save these SPRs after the treclaim so that the software
2651 * error code is recorded correctly in the TEXASR. Also the user may
2652 * change these outside of a transaction, so they must always be
2653 * context switched.
2654 */
2655 mfspr r5, SPRN_TFHAR
2656 mfspr r6, SPRN_TFIAR
2657 mfspr r7, SPRN_TEXASR
2658 std r5, VCPU_TFHAR(r9)
2659 std r6, VCPU_TFIAR(r9)
2660 std r7, VCPU_TEXASR(r9)
2661
2662 ld r0, PPC_LR_STKOFF(r1)
2663 mtlr r0
2664 blr
2665
2666/*
2667 * Restore transactional state and TM-related registers.
2668 * Called with r4 pointing to the vcpu struct.
2669 * This potentially modifies all checkpointed registers.
2670 * It restores r1, r2, r4 from the PACA.
2671 */
2672kvmppc_restore_tm:
2673 mflr r0
2674 std r0, PPC_LR_STKOFF(r1)
2675
2676 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2677 mfmsr r5
2678 li r6, MSR_TM >> 32
2679 sldi r6, r6, 32
2680 or r5, r5, r6
2681 ori r5, r5, MSR_FP
2682 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2683 mtmsrd r5
2684
2685 /*
2686 * The user may change these outside of a transaction, so they must
2687 * always be context switched.
2688 */
2689 ld r5, VCPU_TFHAR(r4)
2690 ld r6, VCPU_TFIAR(r4)
2691 ld r7, VCPU_TEXASR(r4)
2692 mtspr SPRN_TFHAR, r5
2693 mtspr SPRN_TFIAR, r6
2694 mtspr SPRN_TEXASR, r7
2695
2696 ld r5, VCPU_MSR(r4)
2697 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2698 beqlr /* TM not active in guest */
2699 std r1, HSTATE_HOST_R1(r13)
2700
2701 /* Make sure the failure summary is set, otherwise we'll program check
2702 * when we trechkpt. It's possible that this might have been not set
2703 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2704 * host.
2705 */
2706 oris r7, r7, (TEXASR_FS)@h
2707 mtspr SPRN_TEXASR, r7
2708
2709 /*
2710 * We need to load up the checkpointed state for the guest.
2711 * We need to do this early as it will blow away any GPRs, VSRs and
2712 * some SPRs.
2713 */
2714
2715 mr r31, r4
2716 addi r3, r31, VCPU_FPRS_TM
2717 bl load_fp_state
2718 addi r3, r31, VCPU_VRS_TM
2719 bl load_vr_state
2720 mr r4, r31
2721 lwz r7, VCPU_VRSAVE_TM(r4)
2722 mtspr SPRN_VRSAVE, r7
2723
2724 ld r5, VCPU_LR_TM(r4)
2725 lwz r6, VCPU_CR_TM(r4)
2726 ld r7, VCPU_CTR_TM(r4)
2727 ld r8, VCPU_AMR_TM(r4)
2728 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras75b10532016-11-07 15:09:58 +11002729 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002730 mtlr r5
2731 mtcr r6
2732 mtctr r7
2733 mtspr SPRN_AMR, r8
2734 mtspr SPRN_TAR, r9
Paul Mackerras75b10532016-11-07 15:09:58 +11002735 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002736
2737 /*
2738 * Load up PPR and DSCR values but don't put them in the actual SPRs
2739 * till the last moment to avoid running with userspace PPR and DSCR for
2740 * too long.
2741 */
2742 ld r29, VCPU_DSCR_TM(r4)
2743 ld r30, VCPU_PPR_TM(r4)
2744
2745 std r2, PACATMSCRATCH(r13) /* Save TOC */
2746
2747 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2748 li r5, 0
2749 mtmsrd r5, 1
2750
2751 /* Load GPRs r0-r28 */
2752 reg = 0
2753 .rept 29
2754 ld reg, VCPU_GPRS_TM(reg)(r31)
2755 reg = reg + 1
2756 .endr
2757
2758 mtspr SPRN_DSCR, r29
2759 mtspr SPRN_PPR, r30
2760
2761 /* Load final GPRs */
2762 ld 29, VCPU_GPRS_TM(29)(r31)
2763 ld 30, VCPU_GPRS_TM(30)(r31)
2764 ld 31, VCPU_GPRS_TM(31)(r31)
2765
2766 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2767 TRECHKPT
2768
2769 /* Now let's get back the state we need. */
2770 HMT_MEDIUM
2771 GET_PACA(r13)
2772 ld r29, HSTATE_DSCR(r13)
2773 mtspr SPRN_DSCR, r29
2774 ld r4, HSTATE_KVM_VCPU(r13)
2775 ld r1, HSTATE_HOST_R1(r13)
2776 ld r2, PACATMSCRATCH(r13)
2777
2778 /* Set the MSR RI since we have our registers back. */
2779 li r5, MSR_RI
2780 mtmsrd r5, 1
2781
2782 ld r0, PPC_LR_STKOFF(r1)
2783 mtlr r0
2784 blr
2785#endif
2786
Paul Mackerras44a3add2013-10-04 21:45:04 +10002787/*
2788 * We come here if we get any exception or interrupt while we are
2789 * executing host real mode code while in guest MMU context.
2790 * For now just spin, but we should do something better.
2791 */
2792kvmppc_bad_host_intr:
2793 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002794
2795/*
2796 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2797 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2798 * r11 has the guest MSR value (in/out)
2799 * r9 has a vcpu pointer (in)
2800 * r0 is used as a scratch register
2801 */
2802kvmppc_msr_interrupt:
2803 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2804 cmpwi r0, 2 /* Check if we are in transactional state.. */
2805 ld r11, VCPU_INTR_MSR(r9)
2806 bne 1f
2807 /* ... if transactional, change to suspended */
2808 li r0, 1
28091: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2810 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002811
2812/*
2813 * This works around a hardware bug on POWER8E processors, where
2814 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2815 * performance monitor interrupt. Instead, when we need to have
2816 * an interrupt pending, we have to arrange for a counter to overflow.
2817 */
2818kvmppc_fix_pmao:
2819 li r3, 0
2820 mtspr SPRN_MMCR2, r3
2821 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2822 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2823 mtspr SPRN_MMCR0, r3
2824 lis r3, 0x7fff
2825 ori r3, r3, 0xffff
2826 mtspr SPRN_PMC6, r3
2827 isync
2828 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002829
2830#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2831/*
2832 * Start timing an activity
2833 * r3 = pointer to time accumulation struct, r4 = vcpu
2834 */
2835kvmhv_start_timing:
2836 ld r5, HSTATE_KVM_VCORE(r13)
2837 lbz r6, VCORE_IN_GUEST(r5)
2838 cmpwi r6, 0
2839 beq 5f /* if in guest, need to */
2840 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
28415: mftb r5
2842 subf r5, r6, r5
2843 std r3, VCPU_CUR_ACTIVITY(r4)
2844 std r5, VCPU_ACTIVITY_START(r4)
2845 blr
2846
2847/*
2848 * Accumulate time to one activity and start another.
2849 * r3 = pointer to new time accumulation struct, r4 = vcpu
2850 */
2851kvmhv_accumulate_time:
2852 ld r5, HSTATE_KVM_VCORE(r13)
2853 lbz r8, VCORE_IN_GUEST(r5)
2854 cmpwi r8, 0
2855 beq 4f /* if in guest, need to */
2856 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
28574: ld r5, VCPU_CUR_ACTIVITY(r4)
2858 ld r6, VCPU_ACTIVITY_START(r4)
2859 std r3, VCPU_CUR_ACTIVITY(r4)
2860 mftb r7
2861 subf r7, r8, r7
2862 std r7, VCPU_ACTIVITY_START(r4)
2863 cmpdi r5, 0
2864 beqlr
2865 subf r3, r6, r7
2866 ld r8, TAS_SEQCOUNT(r5)
2867 cmpdi r8, 0
2868 addi r8, r8, 1
2869 std r8, TAS_SEQCOUNT(r5)
2870 lwsync
2871 ld r7, TAS_TOTAL(r5)
2872 add r7, r7, r3
2873 std r7, TAS_TOTAL(r5)
2874 ld r6, TAS_MIN(r5)
2875 ld r7, TAS_MAX(r5)
2876 beq 3f
2877 cmpd r3, r6
2878 bge 1f
28793: std r3, TAS_MIN(r5)
28801: cmpd r3, r7
2881 ble 2f
2882 std r3, TAS_MAX(r5)
28832: lwsync
2884 addi r8, r8, 1
2885 std r8, TAS_SEQCOUNT(r5)
2886 blr
2887#endif