blob: 01f4392a284d0ca07008d2961e19a75b2b00a64a [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110033
34#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000035
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110036/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
Paul Mackerrasde56a942011-06-29 00:21:34 +000040/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100041 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000042 * Must be called with interrupts hard-disabled.
43 *
44 * Input Registers:
45 *
46 * LR = return address to continue at after eventually re-enabling MMU
47 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100048_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100049 mflr r0
50 std r0, PPC_LR_STKOFF(r1)
51 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000052 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100053 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000054 li r0,MSR_RI
55 andc r0,r10,r0
56 li r6,MSR_IR | MSR_DR
57 andc r6,r10,r6
58 mtmsrd r0,1 /* clear RI in MSR */
59 mtsrr0 r5
60 mtsrr1 r6
61 RFI
62
Paul Mackerras218309b2013-09-06 13:23:44 +100063kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110064 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100065 bl kvmppc_hv_entry
66
67 /* Back from guest - restore host state and return to caller */
68
Michael Neulingeee7ff92014-01-08 21:25:19 +110069BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100070 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
72 li r6,7
73 mtspr SPRN_DABR,r5
74 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110075END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100076
77 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050078 ld r3,PACA_SPRG_VDSO(r13)
79 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100080
Paul Mackerras218309b2013-09-06 13:23:44 +100081 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
84 cmpwi r4, 0
85 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +100086BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100087 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +100088 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
89 cmpwi r4, MMCR0_PMAO
90 beql kvmppc_fix_pmao
91END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100092 lwz r3, HSTATE_PMC1(r13)
93 lwz r4, HSTATE_PMC2(r13)
94 lwz r5, HSTATE_PMC3(r13)
95 lwz r6, HSTATE_PMC4(r13)
96 lwz r8, HSTATE_PMC5(r13)
97 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100098 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000104 ld r3, HSTATE_MMCR0(r13)
105 ld r4, HSTATE_MMCR1(r13)
106 ld r5, HSTATE_MMCRA(r13)
107 ld r6, HSTATE_SIAR(r13)
108 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000109 mtspr SPRN_MMCR1, r4
110 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100111 mtspr SPRN_SIAR, r6
112 mtspr SPRN_SDAR, r7
113BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000114 ld r8, HSTATE_MMCR2(r13)
115 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100116 mtspr SPRN_MMCR2, r8
117 mtspr SPRN_SIER, r9
118END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000119 mtspr SPRN_MMCR0, r3
120 isync
12123:
122
123 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100124 * Reload DEC. HDEC interrupts were disabled when
125 * we reloaded the host's LPCR value.
126 */
127 ld r3, HSTATE_DECEXP(r13)
128 mftb r4
129 subf r4, r4, r3
130 mtspr SPRN_DEC, r4
131
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000132 /* hwthread_req may have got set by cede or no vcpu, so clear it */
133 li r0, 0
134 stb r0, HSTATE_HWTHREAD_REQ(r13)
135
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100136 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000137 * For external and machine check interrupts, we need
138 * to call the Linux handler to process the interrupt.
139 * We do that by jumping to absolute address 0x500 for
140 * external interrupts, or the machine_check_fwnmi label
141 * for machine checks (since firmware might have patched
142 * the vector area at 0x200). The [h]rfid at the end of the
143 * handler will return to the book3s_hv_interrupts.S code.
144 * For other interrupts we do the rfid to get back
145 * to the book3s_hv_interrupts.S code here.
146 */
147 ld r8, 112+PPC_LR_STKOFF(r1)
148 addi r1, r1, 112
149 ld r7, HSTATE_HOST_MSR(r13)
150
151 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
152 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000153 beq 11f
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +0530154 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
155 beq 15f /* Invoke the H_DOORBELL handler */
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530156 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
157 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000158
159 /* RFI into the highmem handler, or branch to interrupt handler */
160 mfmsr r6
161 li r0, MSR_RI
162 andc r6, r6, r0
163 mtmsrd r6, 1 /* Clear RI in MSR */
164 mtsrr0 r8
165 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000166 beq cr1, 13f /* machine check */
167 RFI
168
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
17011: mtspr SPRN_HSRR0, r8
171 mtspr SPRN_HSRR1, r7
172 ba 0x500
173
17413: b machine_check_fwnmi
175
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053017614: mtspr SPRN_HSRR0, r8
177 mtspr SPRN_HSRR1, r7
178 b hmi_exception_after_realmode
179
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +053018015: mtspr SPRN_HSRR0, r8
181 mtspr SPRN_HSRR1, r7
182 ba 0xe80
183
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100184kvmppc_primary_no_guest:
185 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100186 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
187 mfspr r3, SPRN_HDEC
188 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100189 /*
190 * Make sure the primary has finished the MMU switch.
191 * We should never get here on a secondary thread, but
192 * check it for robustness' sake.
193 */
194 ld r5, HSTATE_KVM_VCORE(r13)
19565: lbz r0, VCORE_IN_GUEST(r5)
196 cmpwi r0, 0
197 beq 65b
198 /* Set LPCR. */
199 ld r8,VCORE_LPCR(r5)
200 mtspr SPRN_LPCR,r8
201 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100202 /* set our bit in napping_threads */
203 ld r5, HSTATE_KVM_VCORE(r13)
204 lbz r7, HSTATE_PTID(r13)
205 li r0, 1
206 sld r0, r0, r7
207 addi r6, r5, VCORE_NAPPING_THREADS
2081: lwarx r3, 0, r6
209 or r3, r3, r0
210 stwcx. r3, 0, r6
211 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100212 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100213 isync
214 li r12, 0
215 lwz r7, VCORE_ENTRY_EXIT(r5)
216 cmpwi r7, 0x100
217 bge kvm_novcpu_exit /* another thread already exiting */
218 li r3, NAPPING_NOVCPU
219 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100220
Paul Mackerrasccc07772015-03-28 14:21:07 +1100221 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100222 b kvm_do_nap
223
Suresh Warrier37f55d32016-08-19 15:35:46 +1000224/*
225 * kvm_novcpu_wakeup
226 * Entered from kvm_start_guest if kvm_hstate.napping is set
227 * to NAPPING_NOVCPU
228 * r2 = kernel TOC
229 * r13 = paca
230 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100231kvm_novcpu_wakeup:
232 ld r1, HSTATE_HOST_R1(r13)
233 ld r5, HSTATE_KVM_VCORE(r13)
234 li r0, 0
235 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100236
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100237 /* check the wake reason */
238 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100239
Suresh Warrier37f55d32016-08-19 15:35:46 +1000240 /*
241 * Restore volatile registers since we could have called
242 * a C routine in kvmppc_check_wake_reason.
243 * r5 = VCORE
244 */
245 ld r5, HSTATE_KVM_VCORE(r13)
246
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100247 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100248 lwz r0, VCORE_ENTRY_EXIT(r5)
249 cmpwi r0, 0x100
250 bge kvm_novcpu_exit
251
252 /* clear our bit in napping_threads */
253 lbz r7, HSTATE_PTID(r13)
254 li r0, 1
255 sld r0, r0, r7
256 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002574: lwarx r7, 0, r6
258 andc r7, r7, r0
259 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100260 bne 4b
261
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100262 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100263 cmpdi r3, 0
264 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100265
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100266 /* See if our timeslice has expired (HDEC is negative) */
267 mfspr r0, SPRN_HDEC
268 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
269 cmpwi r0, 0
270 blt kvm_novcpu_exit
271
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100272 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
273 ld r4, HSTATE_KVM_VCPU(r13)
274 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100275 beq kvmppc_primary_no_guest
276
277#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
278 addi r3, r4, VCPU_TB_RMENTRY
279 bl kvmhv_start_timing
280#endif
281 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100282
283kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100284#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
285 ld r4, HSTATE_KVM_VCPU(r13)
286 cmpdi r4, 0
287 beq 13f
288 addi r3, r4, VCPU_TB_RMEXIT
289 bl kvmhv_accumulate_time
290#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110029113: mr r3, r12
292 stw r12, 112-4(r1)
293 bl kvmhv_commence_exit
294 nop
295 lwz r12, 112-4(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100296 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100297
Paul Mackerras371fefd2011-06-29 00:23:08 +0000298/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100299 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000300 * Relocation is off and most register values are lost.
301 * r13 points to the PACA.
302 */
303 .globl kvm_start_guest
304kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530305
306 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100307 mfspr r0, SPRN_CTRLF
308 ori r0, r0, 1
309 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530310
Paul Mackerras19ccb762011-07-23 17:42:46 +1000311 ld r2,PACATOC(r13)
312
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000313 li r0,KVM_HWTHREAD_IN_KVM
314 stb r0,HSTATE_HWTHREAD_STATE(r13)
315
316 /* NV GPR values from power7_idle() will no longer be valid */
317 li r0,1
318 stb r0,PACA_NAPSTATELOST(r13)
319
Paul Mackerras4619ac82013-04-17 20:31:41 +0000320 /* were we napping due to cede? */
321 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100322 cmpwi r0,NAPPING_CEDE
323 beq kvm_end_cede
324 cmpwi r0,NAPPING_NOVCPU
325 beq kvm_novcpu_wakeup
326
327 ld r1,PACAEMERGSP(r13)
328 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000329
330 /*
331 * We weren't napping due to cede, so this must be a secondary
332 * thread being woken up to run a guest, or being woken up due
333 * to a stray IPI. (Or due to some machine check or hypervisor
334 * maintenance interrupt while the core is in KVM.)
335 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000336
337 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100338 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000339 /*
340 * kvmppc_check_wake_reason could invoke a C routine, but we
341 * have no volatile registers to restore when we return.
342 */
343
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100344 cmpdi r3, 0
345 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000346
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000347 /* get vcore pointer, NULL if we have nothing to run */
348 ld r5,HSTATE_KVM_VCORE(r13)
349 cmpdi r5,0
350 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000351 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000352
Paul Mackerras56548fc2014-12-03 14:48:40 +1100353kvm_secondary_got_guest:
354
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100355 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530356 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100357 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000358
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000359 /* On thread 0 of a subcore, set HDEC to max */
360 lbz r4, HSTATE_PTID(r13)
361 cmpwi r4, 0
362 bne 63f
363 lis r6, 0x7fff
364 ori r6, r6, 0xffff
365 mtspr SPRN_HDEC, r6
366 /* and set per-LPAR registers, if doing dynamic micro-threading */
367 ld r6, HSTATE_SPLIT_MODE(r13)
368 cmpdi r6, 0
369 beq 63f
370 ld r0, KVM_SPLIT_RPR(r6)
371 mtspr SPRN_RPR, r0
372 ld r0, KVM_SPLIT_PMMAR(r6)
373 mtspr SPRN_PMMAR, r0
374 ld r0, KVM_SPLIT_LDBAR(r6)
375 mtspr SPRN_LDBAR, r0
376 isync
37763:
378 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100379 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000380 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100381 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000382
383 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000384 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000385 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000386 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100387 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000388 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100389 * kvmppc_run_core() is going to assume that all our vcpu
390 * state is visible in memory. This lwsync makes sure
391 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100392 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000393 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000394 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000395
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530396 /*
397 * All secondaries exiting guest will fall through this path.
398 * Before proceeding, just check for HMI interrupt and
399 * invoke opal hmi handler. By now we are sure that the
400 * primary thread on this core/subcore has already made partition
401 * switch/TB resync and we are good to call opal hmi handler.
402 */
403 cmpwi r12, BOOK3S_INTERRUPT_HMI
404 bne kvm_no_guest
405
406 li r3,0 /* NULL argument */
407 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100408/*
409 * At this point we have finished executing in the guest.
410 * We need to wait for hwthread_req to become zero, since
411 * we may not turn on the MMU while hwthread_req is non-zero.
412 * While waiting we also need to check if we get given a vcpu to run.
413 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000414kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100415 lbz r3, HSTATE_HWTHREAD_REQ(r13)
416 cmpwi r3, 0
417 bne 53f
418 HMT_MEDIUM
419 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000420 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100421 /* need to recheck hwthread_req after a barrier, to avoid race */
422 sync
423 lbz r3, HSTATE_HWTHREAD_REQ(r13)
424 cmpwi r3, 0
425 bne 54f
426/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530427 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100428 * of power7_nap in the powernv cpu offline loop. The value we
429 * put in r3 becomes the return value for power7_nap.
430 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000431 li r3, LPCR_PECE0
432 mfspr r4, SPRN_LPCR
433 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
434 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100435 li r3, 0
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530436 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100437
43853: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000439 ld r5, HSTATE_KVM_VCORE(r13)
440 cmpdi r5, 0
441 bne 60f
442 ld r3, HSTATE_SPLIT_MODE(r13)
443 cmpdi r3, 0
444 beq kvm_no_guest
445 lbz r0, KVM_SPLIT_DO_NAP(r3)
446 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100447 beq kvm_no_guest
448 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000449 b kvm_unsplit_nap
45060: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100451 b kvm_secondary_got_guest
452
45354: li r0, KVM_HWTHREAD_IN_KVM
454 stb r0, HSTATE_HWTHREAD_STATE(r13)
455 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000456
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000457/*
458 * Here the primary thread is trying to return the core to
459 * whole-core mode, so we need to nap.
460 */
461kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530462 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530463 * When secondaries are napping in kvm_unsplit_nap() with
464 * hwthread_req = 1, HMI goes ignored even though subcores are
465 * already exited the guest. Hence HMI keeps waking up secondaries
466 * from nap in a loop and secondaries always go back to nap since
467 * no vcore is assigned to them. This makes impossible for primary
468 * thread to get hold of secondary threads resulting into a soft
469 * lockup in KVM path.
470 *
471 * Let us check if HMI is pending and handle it before we go to nap.
472 */
473 cmpwi r12, BOOK3S_INTERRUPT_HMI
474 bne 55f
475 li r3, 0 /* NULL argument */
476 bl hmi_exception_realmode
47755:
478 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530479 * Ensure that secondary doesn't nap when it has
480 * its vcore pointer set.
481 */
482 sync /* matches smp_mb() before setting split_info.do_nap */
483 ld r0, HSTATE_KVM_VCORE(r13)
484 cmpdi r0, 0
485 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000486 /* clear any pending message */
487BEGIN_FTR_SECTION
488 lis r6, (PPC_DBELL_SERVER << (63-36))@h
489 PPC_MSGCLR(6)
490END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
491 /* Set kvm_split_mode.napped[tid] = 1 */
492 ld r3, HSTATE_SPLIT_MODE(r13)
493 li r0, 1
494 lhz r4, PACAPACAINDEX(r13)
495 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
496 addi r4, r4, KVM_SPLIT_NAPPED
497 stbx r0, r3, r4
498 /* Check the do_nap flag again after setting napped[] */
499 sync
500 lbz r0, KVM_SPLIT_DO_NAP(r3)
501 cmpwi r0, 0
502 beq 57f
503 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100504 mfspr r5, SPRN_LPCR
505 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
506 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000507
50857: li r0, 0
509 stbx r0, r3, r4
510 b kvm_no_guest
511
Paul Mackerras218309b2013-09-06 13:23:44 +1000512/******************************************************************************
513 * *
514 * Entry code *
515 * *
516 *****************************************************************************/
517
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100518/* Stack frame offsets */
519#define STACK_SLOT_TID (112-16)
520#define STACK_SLOT_PSSCR (112-24)
521
Paul Mackerrasde56a942011-06-29 00:21:34 +0000522.global kvmppc_hv_entry
523kvmppc_hv_entry:
524
525 /* Required state:
526 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100527 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000528 * MSR = ~IR|DR
529 * R13 = PACA
530 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000531 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000532 * all other volatile GPRS = free
533 */
534 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000535 std r0, PPC_LR_STKOFF(r1)
536 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000537
Paul Mackerrasde56a942011-06-29 00:21:34 +0000538 /* Save R1 in the PACA */
539 std r1, HSTATE_HOST_R1(r13)
540
Paul Mackerras44a3add2013-10-04 21:45:04 +1000541 li r6, KVM_GUEST_MODE_HOST_HV
542 stb r6, HSTATE_IN_GUEST(r13)
543
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100544#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
545 /* Store initial timestamp */
546 cmpdi r4, 0
547 beq 1f
548 addi r3, r4, VCPU_TB_RMENTRY
549 bl kvmhv_start_timing
5501:
551#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +0000552 /* Clear out SLB */
553 li r6,0
554 slbmte r6,r6
555 slbia
556 ptesync
557
Paul Mackerras9e368f22011-06-29 00:40:08 +0000558 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100559 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000560 * We don't have to lock against concurrent tlbies,
561 * but we do have to coordinate across hardware threads.
562 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100563 /* Set bit in entry map iff exit map is zero. */
564 ld r5, HSTATE_KVM_VCORE(r13)
565 li r7, 1
566 lbz r6, HSTATE_PTID(r13)
567 sld r7, r7, r6
568 addi r9, r5, VCORE_ENTRY_EXIT
56921: lwarx r3, 0, r9
570 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000571 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100572 or r3, r3, r7
573 stwcx. r3, 0, r9
Paul Mackerras371fefd2011-06-29 00:23:08 +0000574 bne 21b
575
576 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100577 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000578 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100579 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000580 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100581BEGIN_FTR_SECTION
582 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000583 li r0,LPID_RSVD /* switch to reserved LPID */
584 mtspr SPRN_LPID,r0
585 ptesync
586 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100587END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000588 mtspr SPRN_LPID,r7
589 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000590
591 /* See if we need to flush the TLB */
592 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
593 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
594 srdi r6,r6,6 /* doubleword number */
595 sldi r6,r6,3 /* address offset */
596 add r6,r6,r9
597 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000598 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000599 sld r0,r0,r7
600 ld r7,0(r6)
601 and. r7,r7,r0
602 beq 22f
60323: ldarx r7,0,r6 /* if set, clear the bit */
604 andc r7,r7,r0
605 stdcx. r7,0,r6
606 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100607 /* Flush the TLB of any entries for this LPID */
Paul Mackerras7c5b06c2016-11-18 08:28:51 +1100608 lwz r6,KVM_TLB_SETS(r9)
609 li r0,0 /* RS for P9 version of tlbiel */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000610 mtctr r6
611 li r7,0x800 /* IS field = 0b10 */
612 ptesync
61328: tlbiel r7
614 addi r7,r7,0x1000
615 bdnz 28b
616 ptesync
617
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000618 /* Add timebase offset onto timebase */
61922: ld r8,VCORE_TB_OFFSET(r5)
620 cmpdi r8,0
621 beq 37f
622 mftb r6 /* current host timebase */
623 add r8,r8,r6
624 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
625 mftb r7 /* check if lower 24 bits overflowed */
626 clrldi r6,r6,40
627 clrldi r7,r7,40
628 cmpld r7,r6
629 bge 37f
630 addis r8,r8,0x100 /* if so, increment upper 40 bits */
631 mtspr SPRN_TBU40,r8
632
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000633 /* Load guest PCR value to select appropriate compat mode */
63437: ld r7, VCORE_PCR(r5)
635 cmpdi r7, 0
636 beq 38f
637 mtspr SPRN_PCR, r7
63838:
Michael Neulingb005255e2014-01-08 21:25:21 +1100639
640BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000641 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100642 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000643 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100644 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000645 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100646END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
647
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530648 /* Mark the subcore state as inside guest */
649 bl kvmppc_subcore_enter_guest
650 nop
651 ld r5, HSTATE_KVM_VCORE(r13)
652 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000653 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000654 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000655
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100656 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110065710: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100658 beq kvmppc_primary_no_guest
659kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000660
661 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100662 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000663 cmpwi r5,0
664 beq 9f
665 mtctr r5
666 addi r6,r4,VCPU_SLB
6671: ld r8,VCPU_SLB_E(r6)
668 ld r9,VCPU_SLB_V(r6)
669 slbmte r9,r8
670 addi r6,r6,VCPU_SLB_SIZE
671 bdnz 1b
6729:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100673 /* Increment yield count if they have a VPA */
674 ld r3, VCPU_VPA(r4)
675 cmpdi r3, 0
676 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200677 li r6, LPPACA_YIELDCOUNT
678 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100679 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200680 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100681 li r6, 1
682 stb r6, VCPU_VPA_DIRTY(r4)
68325:
684
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100685 /* Save purr/spurr */
686 mfspr r5,SPRN_PURR
687 mfspr r6,SPRN_SPURR
688 std r5,HSTATE_PURR(r13)
689 std r6,HSTATE_SPURR(r13)
690 ld r7,VCPU_PURR(r4)
691 ld r8,VCPU_SPURR(r4)
692 mtspr SPRN_PURR,r7
693 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100694
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100695 /* Save host values of some registers */
696BEGIN_FTR_SECTION
697 mfspr r5, SPRN_TIDR
698 mfspr r6, SPRN_PSSCR
699 std r5, STACK_SLOT_TID(r1)
700 std r6, STACK_SLOT_PSSCR(r1)
701END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
702
Michael Neulingeee7ff92014-01-08 21:25:19 +1100703BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000704 /* Set partition DABR */
705 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100706 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000707 ld r6,VCPU_DABR(r4)
708 mtspr SPRN_DABRX,r5
709 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000710 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100711END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000712
Michael Neulinge4e38122014-03-25 10:47:02 +1100713#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
714BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +1000715 bl kvmppc_restore_tm
716END_FTR_SECTION_IFSET(CPU_FTR_TM)
Michael Neulinge4e38122014-03-25 10:47:02 +1100717#endif
718
Paul Mackerrasde56a942011-06-29 00:21:34 +0000719 /* Load guest PMU registers */
720 /* R4 is live here (vcpu pointer) */
721 li r3, 1
722 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
723 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
724 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000725BEGIN_FTR_SECTION
726 ld r3, VCPU_MMCR(r4)
727 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
728 cmpwi r5, MMCR0_PMAO
729 beql kvmppc_fix_pmao
730END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000731 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
732 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
733 lwz r6, VCPU_PMC + 8(r4)
734 lwz r7, VCPU_PMC + 12(r4)
735 lwz r8, VCPU_PMC + 16(r4)
736 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000737 mtspr SPRN_PMC1, r3
738 mtspr SPRN_PMC2, r5
739 mtspr SPRN_PMC3, r6
740 mtspr SPRN_PMC4, r7
741 mtspr SPRN_PMC5, r8
742 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000743 ld r3, VCPU_MMCR(r4)
744 ld r5, VCPU_MMCR + 8(r4)
745 ld r6, VCPU_MMCR + 16(r4)
746 ld r7, VCPU_SIAR(r4)
747 ld r8, VCPU_SDAR(r4)
748 mtspr SPRN_MMCR1, r5
749 mtspr SPRN_MMCRA, r6
750 mtspr SPRN_SIAR, r7
751 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100752BEGIN_FTR_SECTION
753 ld r5, VCPU_MMCR + 24(r4)
754 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100755 mtspr SPRN_MMCR2, r5
756 mtspr SPRN_SIER, r6
757BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100758 lwz r7, VCPU_PMC + 24(r4)
759 lwz r8, VCPU_PMC + 28(r4)
760 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100761 mtspr SPRN_SPMC1, r7
762 mtspr SPRN_SPMC2, r8
763 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100764END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100765END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000766 mtspr SPRN_MMCR0, r3
767 isync
768
769 /* Load up FP, VMX and VSX registers */
770 bl kvmppc_load_fp
771
772 ld r14, VCPU_GPR(R14)(r4)
773 ld r15, VCPU_GPR(R15)(r4)
774 ld r16, VCPU_GPR(R16)(r4)
775 ld r17, VCPU_GPR(R17)(r4)
776 ld r18, VCPU_GPR(R18)(r4)
777 ld r19, VCPU_GPR(R19)(r4)
778 ld r20, VCPU_GPR(R20)(r4)
779 ld r21, VCPU_GPR(R21)(r4)
780 ld r22, VCPU_GPR(R22)(r4)
781 ld r23, VCPU_GPR(R23)(r4)
782 ld r24, VCPU_GPR(R24)(r4)
783 ld r25, VCPU_GPR(R25)(r4)
784 ld r26, VCPU_GPR(R26)(r4)
785 ld r27, VCPU_GPR(R27)(r4)
786 ld r28, VCPU_GPR(R28)(r4)
787 ld r29, VCPU_GPR(R29)(r4)
788 ld r30, VCPU_GPR(R30)(r4)
789 ld r31, VCPU_GPR(R31)(r4)
790
Paul Mackerrasde56a942011-06-29 00:21:34 +0000791 /* Switch DSCR to guest value */
792 ld r5, VCPU_DSCR(r4)
793 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000794
Michael Neulingb005255e2014-01-08 21:25:21 +1100795BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100796 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100797 b 8f
798END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100799 /* Load up POWER8-specific registers */
800 ld r5, VCPU_IAMR(r4)
801 lwz r6, VCPU_PSPB(r4)
802 ld r7, VCPU_FSCR(r4)
803 mtspr SPRN_IAMR, r5
804 mtspr SPRN_PSPB, r6
805 mtspr SPRN_FSCR, r7
806 ld r5, VCPU_DAWR(r4)
807 ld r6, VCPU_DAWRX(r4)
808 ld r7, VCPU_CIABR(r4)
809 ld r8, VCPU_TAR(r4)
810 mtspr SPRN_DAWR, r5
811 mtspr SPRN_DAWRX, r6
812 mtspr SPRN_CIABR, r7
813 mtspr SPRN_TAR, r8
814 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100815 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000816 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100817 mtspr SPRN_EBBHR, r8
818 ld r5, VCPU_EBBRR(r4)
819 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100820 lwz r7, VCPU_GUEST_PID(r4)
821 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100822 mtspr SPRN_EBBRR, r5
823 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100824 mtspr SPRN_PID, r7
825 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100826BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100827 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100828 ld r5, VCPU_TCSCR(r4)
829 ld r6, VCPU_ACOP(r4)
830 ld r7, VCPU_CSIGR(r4)
831 ld r8, VCPU_TACR(r4)
832 mtspr SPRN_TCSCR, r5
833 mtspr SPRN_ACOP, r6
834 mtspr SPRN_CSIGR, r7
835 mtspr SPRN_TACR, r8
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100836FTR_SECTION_ELSE
837 /* POWER9-only registers */
838 ld r5, VCPU_TID(r4)
839 ld r6, VCPU_PSSCR(r4)
840 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
841 mtspr SPRN_TIDR, r5
842 mtspr SPRN_PSSCR, r6
843ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11008448:
845
Paul Mackerrasde56a942011-06-29 00:21:34 +0000846 /*
847 * Set the decrementer to the guest decrementer.
848 */
849 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100850 /* r8 is a host timebase value here, convert to guest TB */
851 ld r5,HSTATE_KVM_VCORE(r13)
852 ld r6,VCORE_TB_OFFSET(r5)
853 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000854 mftb r7
855 subf r3,r7,r8
856 mtspr SPRN_DEC,r3
857 stw r3,VCPU_DEC(r4)
858
859 ld r5, VCPU_SPRG0(r4)
860 ld r6, VCPU_SPRG1(r4)
861 ld r7, VCPU_SPRG2(r4)
862 ld r8, VCPU_SPRG3(r4)
863 mtspr SPRN_SPRG0, r5
864 mtspr SPRN_SPRG1, r6
865 mtspr SPRN_SPRG2, r7
866 mtspr SPRN_SPRG3, r8
867
Paul Mackerrasde56a942011-06-29 00:21:34 +0000868 /* Load up DAR and DSISR */
869 ld r5, VCPU_DAR(r4)
870 lwz r6, VCPU_DSISR(r4)
871 mtspr SPRN_DAR, r5
872 mtspr SPRN_DSISR, r6
873
Paul Mackerrasde56a942011-06-29 00:21:34 +0000874 /* Restore AMR and UAMOR, set AMOR to all 1s */
875 ld r5,VCPU_AMR(r4)
876 ld r6,VCPU_UAMOR(r4)
877 li r7,-1
878 mtspr SPRN_AMR,r5
879 mtspr SPRN_UAMOR,r6
880 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000881
882 /* Restore state of CTRL run bit; assume 1 on entry */
883 lwz r5,VCPU_CTRL(r4)
884 andi. r5,r5,1
885 bne 4f
886 mfspr r6,SPRN_CTRLF
887 clrrdi r6,r6,1
888 mtspr SPRN_CTRLT,r6
8894:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100890 /* Secondary threads wait for primary to have done partition switch */
891 ld r5, HSTATE_KVM_VCORE(r13)
892 lbz r6, HSTATE_PTID(r13)
893 cmpwi r6, 0
894 beq 21f
895 lbz r0, VCORE_IN_GUEST(r5)
896 cmpwi r0, 0
897 bne 21f
898 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100089920: lwz r3, VCORE_ENTRY_EXIT(r5)
900 cmpwi r3, 0x100
901 bge no_switch_exit
902 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100903 cmpwi r0, 0
904 beq 20b
905 HMT_MEDIUM
90621:
907 /* Set LPCR. */
908 ld r8,VCORE_LPCR(r5)
909 mtspr SPRN_LPCR,r8
910 isync
911
912 /* Check if HDEC expires soon */
913 mfspr r3, SPRN_HDEC
914 cmpwi r3, 512 /* 1 microsecond */
915 blt hdec_soon
916
Suresh Warrier37f55d32016-08-19 15:35:46 +1000917deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000918 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +1000919 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000920
921 mtctr r6
922 mtxer r7
923
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100924kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000925 ld r10, VCPU_PC(r4)
926 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000927 ld r6, VCPU_SRR0(r4)
928 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100929 mtspr SPRN_SRR0, r6
930 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000931
Paul Mackerras4619ac82013-04-17 20:31:41 +0000932 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000933 rldicl r11, r11, 63 - MSR_HV_LG, 1
934 rotldi r11, r11, 1 + MSR_HV_LG
935 ori r11, r11, MSR_ME
936
Paul Mackerras19ccb762011-07-23 17:42:46 +1000937 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100938 ld r0, VCPU_PENDING_EXC(r4)
939 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
940 cmpdi cr1, r0, 0
941 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100942 mfspr r8, SPRN_LPCR
943 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
944 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
945 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +1000946 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +1000947 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100948 li r0, BOOK3S_INTERRUPT_EXTERNAL
949 bne cr1, 12f
950 mfspr r0, SPRN_DEC
951 cmpwi r0, 0
952 li r0, BOOK3S_INTERRUPT_DECREMENTER
953 bge 5f
954
95512: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +1000956 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100957 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +1100958 mr r9, r4
959 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11009605:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000961
Liu Ping Fan27025a62013-11-19 14:12:48 +0800962/*
963 * Required state:
964 * R4 = vcpu
965 * R10: value for HSRR0
966 * R11: value for HSRR1
967 * R13 = PACA
968 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000969fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000970 li r0,0
971 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000972 mtspr SPRN_HSRR0,r10
973 mtspr SPRN_HSRR1,r11
974
975 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000976 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000977 stb r9, HSTATE_IN_GUEST(r13)
978
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100979#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
980 /* Accumulate timing */
981 addi r3, r4, VCPU_TB_GUEST
982 bl kvmhv_accumulate_time
983#endif
984
Paul Mackerrasde56a942011-06-29 00:21:34 +0000985 /* Enter guest */
986
Paul Mackerras0acb9112013-02-04 18:10:51 +0000987BEGIN_FTR_SECTION
988 ld r5, VCPU_CFAR(r4)
989 mtspr SPRN_CFAR, r5
990END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000991BEGIN_FTR_SECTION
992 ld r0, VCPU_PPR(r4)
993END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000994
Paul Mackerrasde56a942011-06-29 00:21:34 +0000995 ld r5, VCPU_LR(r4)
996 lwz r6, VCPU_CR(r4)
997 mtlr r5
998 mtcr r6
999
Michael Neulingc75df6f2012-06-25 13:33:10 +00001000 ld r1, VCPU_GPR(R1)(r4)
1001 ld r2, VCPU_GPR(R2)(r4)
1002 ld r3, VCPU_GPR(R3)(r4)
1003 ld r5, VCPU_GPR(R5)(r4)
1004 ld r6, VCPU_GPR(R6)(r4)
1005 ld r7, VCPU_GPR(R7)(r4)
1006 ld r8, VCPU_GPR(R8)(r4)
1007 ld r9, VCPU_GPR(R9)(r4)
1008 ld r10, VCPU_GPR(R10)(r4)
1009 ld r11, VCPU_GPR(R11)(r4)
1010 ld r12, VCPU_GPR(R12)(r4)
1011 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001012
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001013BEGIN_FTR_SECTION
1014 mtspr SPRN_PPR, r0
1015END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1016 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001017 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001018
1019 hrfid
1020 b .
1021
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001022secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001023 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001024 cmpdi r4, 0
1025 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001026 stw r12, VCPU_TRAP(r4)
1027#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001028 addi r3, r4, VCPU_TB_RMEXIT
1029 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001030#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100103111: b kvmhv_switch_to_host
1032
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001033no_switch_exit:
1034 HMT_MEDIUM
1035 li r12, 0
1036 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001037hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001038 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000103912: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001040 mr r9, r4
1041#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001042 addi r3, r4, VCPU_TB_RMEXIT
1043 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001044#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001045 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001046
Paul Mackerrasde56a942011-06-29 00:21:34 +00001047/******************************************************************************
1048 * *
1049 * Exit code *
1050 * *
1051 *****************************************************************************/
1052
1053/*
1054 * We come here from the first-level interrupt handlers.
1055 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301056 .globl kvmppc_interrupt_hv
1057kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001058 /*
1059 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001060 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001061 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001062 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001063 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001064 * guest R13 saved in SPRN_SCRATCH0
1065 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001066 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001067 lbz r9, HSTATE_IN_GUEST(r13)
1068 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1069 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301070#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1071 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001072 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301073 beq kvmppc_interrupt_pr
1074#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001075 /* We're now back in the host but in guest MMU context */
1076 li r9, KVM_GUEST_MODE_HOST_HV
1077 stb r9, HSTATE_IN_GUEST(r13)
1078
Paul Mackerrasde56a942011-06-29 00:21:34 +00001079 ld r9, HSTATE_KVM_VCPU(r13)
1080
1081 /* Save registers */
1082
Michael Neulingc75df6f2012-06-25 13:33:10 +00001083 std r0, VCPU_GPR(R0)(r9)
1084 std r1, VCPU_GPR(R1)(r9)
1085 std r2, VCPU_GPR(R2)(r9)
1086 std r3, VCPU_GPR(R3)(r9)
1087 std r4, VCPU_GPR(R4)(r9)
1088 std r5, VCPU_GPR(R5)(r9)
1089 std r6, VCPU_GPR(R6)(r9)
1090 std r7, VCPU_GPR(R7)(r9)
1091 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001092 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001093 std r0, VCPU_GPR(R9)(r9)
1094 std r10, VCPU_GPR(R10)(r9)
1095 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001096 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001097 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001098 /* CR is in the high half of r12 */
1099 srdi r4, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001100 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001101BEGIN_FTR_SECTION
1102 ld r3, HSTATE_CFAR(r13)
1103 std r3, VCPU_CFAR(r9)
1104END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001105BEGIN_FTR_SECTION
1106 ld r4, HSTATE_PPR(r13)
1107 std r4, VCPU_PPR(r9)
1108END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001109
1110 /* Restore R1/R2 so we can handle faults */
1111 ld r1, HSTATE_HOST_R1(r13)
1112 ld r2, PACATOC(r13)
1113
1114 mfspr r10, SPRN_SRR0
1115 mfspr r11, SPRN_SRR1
1116 std r10, VCPU_SRR0(r9)
1117 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001118 /* trap is in the low half of r12, clear CR from the high half */
1119 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001120 andi. r0, r12, 2 /* need to read HSRR0/1? */
1121 beq 1f
1122 mfspr r10, SPRN_HSRR0
1123 mfspr r11, SPRN_HSRR1
1124 clrrdi r12, r12, 2
11251: std r10, VCPU_PC(r9)
1126 std r11, VCPU_MSR(r9)
1127
1128 GET_SCRATCH0(r3)
1129 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001130 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001131 std r4, VCPU_LR(r9)
1132
Paul Mackerrasde56a942011-06-29 00:21:34 +00001133 stw r12,VCPU_TRAP(r9)
1134
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001135#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1136 addi r3, r9, VCPU_TB_RMINTR
1137 mr r4, r9
1138 bl kvmhv_accumulate_time
1139 ld r5, VCPU_GPR(R5)(r9)
1140 ld r6, VCPU_GPR(R6)(r9)
1141 ld r7, VCPU_GPR(R7)(r9)
1142 ld r8, VCPU_GPR(R8)(r9)
1143#endif
1144
Paul Mackerras4a157d62014-12-03 13:30:39 +11001145 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001146 if this is an HEI (HV emulation interrupt, e40) */
1147 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001148 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001149 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1150 bne 11f
1151 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100115211: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001153
1154 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001155#ifdef CONFIG_RELOCATABLE
1156 ld r3, HSTATE_SCRATCH1(r13)
1157 mtctr r3
1158#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001159 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001160#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001161 mfxer r4
1162 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001163 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001164
Paul Mackerras697d3892011-12-12 12:36:37 +00001165 /* If this is a page table miss then see if it's theirs or ours */
1166 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1167 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001168 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1169 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001170
Paul Mackerrasde56a942011-06-29 00:21:34 +00001171 /* See if this is a leftover HDEC interrupt */
1172 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1173 bne 2f
1174 mfspr r3,SPRN_HDEC
1175 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001176 mr r4,r9
1177 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000011782:
Paul Mackerras697d3892011-12-12 12:36:37 +00001179 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001180 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1181 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001182
Paul Mackerras66feed62015-03-28 14:21:12 +11001183 /* Hypervisor doorbell - exit only if host IPI flag set */
1184 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1185 bne 3f
1186 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301187 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001188 beq 4f
1189 b guest_exit_cont
11903:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001191 /* External interrupt ? */
1192 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001193 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001194
1195 /* External interrupt, first check for host_ipi. If this is
1196 * set, we know the host wants us out so let's do it now
1197 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001198 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001199
1200 /*
1201 * Restore the active volatile registers after returning from
1202 * a C function.
1203 */
1204 ld r9, HSTATE_KVM_VCPU(r13)
1205 li r12, BOOK3S_INTERRUPT_EXTERNAL
1206
1207 /*
1208 * kvmppc_read_intr return codes:
1209 *
1210 * Exit to host (r3 > 0)
1211 * 1 An interrupt is pending that needs to be handled by the host
1212 * Exit guest and return to host by branching to guest_exit_cont
1213 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001214 * 2 Passthrough that needs completion in the host
1215 * Exit guest and return to host by branching to guest_exit_cont
1216 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1217 * to indicate to the host to complete handling the interrupt
1218 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001219 * Before returning to guest, we check if any CPU is heading out
1220 * to the host and if so, we head out also. If no CPUs are heading
1221 * check return values <= 0.
1222 *
1223 * Return to guest (r3 <= 0)
1224 * 0 No external interrupt is pending
1225 * -1 A guest wakeup IPI (which has now been cleared)
1226 * In either case, we return to guest to deliver any pending
1227 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001228 *
1229 * -2 A PCI passthrough external interrupt was handled
1230 * (interrupt was delivered directly to guest)
1231 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001232 */
1233
Suresh Warrierf7af5202016-08-19 15:35:52 +10001234 cmpdi r3, 1
1235 ble 1f
1236
1237 /* Return code = 2 */
1238 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1239 stw r12, VCPU_TRAP(r9)
1240 b guest_exit_cont
1241
12421: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001243 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001244 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001245
Suresh Warrier37f55d32016-08-19 15:35:46 +10001246 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110012474: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001248 lwz r0, VCORE_ENTRY_EXIT(r5)
1249 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001250 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001251 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001252
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001253guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001254 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001255 mfdar r6
1256 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001257 std r6, VCPU_DAR(r9)
1258 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001259 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001260 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001261 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001262 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001263 stw r7, VCPU_FAULT_DSISR(r9)
1264
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001265 /* See if it is a machine check */
1266 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1267 beq machine_check_realmode
1268mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001269#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1270 addi r3, r9, VCPU_TB_RMEXIT
1271 mr r4, r9
1272 bl kvmhv_accumulate_time
1273#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001274
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301275 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001276 /* Increment exit count, poke other threads to exit */
1277 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001278 nop
1279 ld r9, HSTATE_KVM_VCPU(r13)
1280 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001281
Paul Mackerrasec257162015-06-24 21:18:03 +10001282 /* Stop others sending VCPU interrupts to this physical CPU */
1283 li r0, -1
1284 stw r0, VCPU_CPU(r9)
1285 stw r0, VCPU_THREAD_CPU(r9)
1286
Paul Mackerrasde56a942011-06-29 00:21:34 +00001287 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001288 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001289 stw r6,VCPU_CTRL(r9)
1290 andi. r0,r6,1
1291 bne 4f
1292 ori r6,r6,1
1293 mtspr SPRN_CTRLT,r6
12944:
1295 /* Read the guest SLB and save it away */
1296 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1297 mtctr r0
1298 li r6,0
1299 addi r7,r9,VCPU_SLB
1300 li r5,0
13011: slbmfee r8,r6
1302 andis. r0,r8,SLB_ESID_V@h
1303 beq 2f
1304 add r8,r8,r6 /* put index in */
1305 slbmfev r3,r6
1306 std r8,VCPU_SLB_E(r7)
1307 std r3,VCPU_SLB_V(r7)
1308 addi r7,r7,VCPU_SLB_SIZE
1309 addi r5,r5,1
13102: addi r6,r6,1
1311 bdnz 1b
1312 stw r5,VCPU_SLB_MAX(r9)
1313
1314 /*
1315 * Save the guest PURR/SPURR
1316 */
1317 mfspr r5,SPRN_PURR
1318 mfspr r6,SPRN_SPURR
1319 ld r7,VCPU_PURR(r9)
1320 ld r8,VCPU_SPURR(r9)
1321 std r5,VCPU_PURR(r9)
1322 std r6,VCPU_SPURR(r9)
1323 subf r5,r7,r5
1324 subf r6,r8,r6
1325
1326 /*
1327 * Restore host PURR/SPURR and add guest times
1328 * so that the time in the guest gets accounted.
1329 */
1330 ld r3,HSTATE_PURR(r13)
1331 ld r4,HSTATE_SPURR(r13)
1332 add r3,r3,r5
1333 add r4,r4,r6
1334 mtspr SPRN_PURR,r3
1335 mtspr SPRN_SPURR,r4
1336
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001337 /* Save DEC */
1338 mfspr r5,SPRN_DEC
1339 mftb r6
1340 extsw r5,r5
1341 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001342 /* r5 is a guest timebase value here, convert to host TB */
1343 ld r3,HSTATE_KVM_VCORE(r13)
1344 ld r4,VCORE_TB_OFFSET(r3)
1345 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001346 std r5,VCPU_DEC_EXPIRES(r9)
1347
Michael Neulingb005255e2014-01-08 21:25:21 +11001348BEGIN_FTR_SECTION
1349 b 8f
1350END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001351 /* Save POWER8-specific registers */
1352 mfspr r5, SPRN_IAMR
1353 mfspr r6, SPRN_PSPB
1354 mfspr r7, SPRN_FSCR
1355 std r5, VCPU_IAMR(r9)
1356 stw r6, VCPU_PSPB(r9)
1357 std r7, VCPU_FSCR(r9)
1358 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001359 mfspr r7, SPRN_TAR
1360 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001361 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001362 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001363 std r8, VCPU_EBBHR(r9)
1364 mfspr r5, SPRN_EBBRR
1365 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001366 mfspr r7, SPRN_PID
1367 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001368 std r5, VCPU_EBBRR(r9)
1369 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001370 stw r7, VCPU_GUEST_PID(r9)
1371 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001372BEGIN_FTR_SECTION
1373 mfspr r5, SPRN_TCSCR
1374 mfspr r6, SPRN_ACOP
1375 mfspr r7, SPRN_CSIGR
1376 mfspr r8, SPRN_TACR
1377 std r5, VCPU_TCSCR(r9)
1378 std r6, VCPU_ACOP(r9)
1379 std r7, VCPU_CSIGR(r9)
1380 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001381FTR_SECTION_ELSE
1382 mfspr r5, SPRN_TIDR
1383 mfspr r6, SPRN_PSSCR
1384 std r5, VCPU_TID(r9)
1385 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1386 rotldi r6, r6, 60
1387 std r6, VCPU_PSSCR(r9)
1388ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001389 /*
1390 * Restore various registers to 0, where non-zero values
1391 * set by the guest could disrupt the host.
1392 */
1393 li r0, 0
1394 mtspr SPRN_IAMR, r0
1395 mtspr SPRN_CIABR, r0
1396 mtspr SPRN_DAWRX, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001397 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001398BEGIN_FTR_SECTION
1399 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001400 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1401 li r0, 1
1402 sldi r0, r0, 31
1403 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001404END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +110014058:
1406
Paul Mackerrasde56a942011-06-29 00:21:34 +00001407 /* Save and reset AMR and UAMOR before turning on the MMU */
1408 mfspr r5,SPRN_AMR
1409 mfspr r6,SPRN_UAMOR
1410 std r5,VCPU_AMR(r9)
1411 std r6,VCPU_UAMOR(r9)
1412 li r6,0
1413 mtspr SPRN_AMR,r6
1414
Paul Mackerrasde56a942011-06-29 00:21:34 +00001415 /* Switch DSCR back to host value */
1416 mfspr r8, SPRN_DSCR
1417 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001418 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001419 mtspr SPRN_DSCR, r7
1420
1421 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001422 std r14, VCPU_GPR(R14)(r9)
1423 std r15, VCPU_GPR(R15)(r9)
1424 std r16, VCPU_GPR(R16)(r9)
1425 std r17, VCPU_GPR(R17)(r9)
1426 std r18, VCPU_GPR(R18)(r9)
1427 std r19, VCPU_GPR(R19)(r9)
1428 std r20, VCPU_GPR(R20)(r9)
1429 std r21, VCPU_GPR(R21)(r9)
1430 std r22, VCPU_GPR(R22)(r9)
1431 std r23, VCPU_GPR(R23)(r9)
1432 std r24, VCPU_GPR(R24)(r9)
1433 std r25, VCPU_GPR(R25)(r9)
1434 std r26, VCPU_GPR(R26)(r9)
1435 std r27, VCPU_GPR(R27)(r9)
1436 std r28, VCPU_GPR(R28)(r9)
1437 std r29, VCPU_GPR(R29)(r9)
1438 std r30, VCPU_GPR(R30)(r9)
1439 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001440
1441 /* Save SPRGs */
1442 mfspr r3, SPRN_SPRG0
1443 mfspr r4, SPRN_SPRG1
1444 mfspr r5, SPRN_SPRG2
1445 mfspr r6, SPRN_SPRG3
1446 std r3, VCPU_SPRG0(r9)
1447 std r4, VCPU_SPRG1(r9)
1448 std r5, VCPU_SPRG2(r9)
1449 std r6, VCPU_SPRG3(r9)
1450
Paul Mackerras89436332012-03-02 01:38:23 +00001451 /* save FP state */
1452 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001453 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001454
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001455#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1456BEGIN_FTR_SECTION
Paul Mackerrasf024ee02016-06-22 14:21:59 +10001457 bl kvmppc_save_tm
1458END_FTR_SECTION_IFSET(CPU_FTR_TM)
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001459#endif
1460
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001461 /* Increment yield count if they have a VPA */
1462 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1463 cmpdi r8, 0
1464 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001465 li r4, LPPACA_YIELDCOUNT
1466 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001467 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001468 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001469 li r3, 1
1470 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000147125:
1472 /* Save PMU registers if requested */
1473 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001474BEGIN_FTR_SECTION
1475 /*
1476 * POWER8 seems to have a hardware bug where setting
1477 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1478 * when some counters are already negative doesn't seem
1479 * to cause a performance monitor alert (and hence interrupt).
1480 * The effect of this is that when saving the PMU state,
1481 * if there is no PMU alert pending when we read MMCR0
1482 * before freezing the counters, but one becomes pending
1483 * before we read the counters, we lose it.
1484 * To work around this, we need a way to freeze the counters
1485 * before reading MMCR0. Normally, freezing the counters
1486 * is done by writing MMCR0 (to set MMCR0[FC]) which
1487 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1488 * we can also freeze the counters using MMCR2, by writing
1489 * 1s to all the counter freeze condition bits (there are
1490 * 9 bits each for 6 counters).
1491 */
1492 li r3, -1 /* set all freeze bits */
1493 clrrdi r3, r3, 10
1494 mfspr r10, SPRN_MMCR2
1495 mtspr SPRN_MMCR2, r3
1496 isync
1497END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001498 li r3, 1
1499 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1500 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1501 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001502 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001503 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001504 li r7, 0
1505 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001506 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001507 beq 21f /* if no VPA, save PMU stuff anyway */
1508 lbz r7, LPPACA_PMCINUSE(r8)
1509 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1510 bne 21f
1511 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1512 b 22f
151321: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001514 mfspr r7, SPRN_SIAR
1515 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001516 std r4, VCPU_MMCR(r9)
1517 std r5, VCPU_MMCR + 8(r9)
1518 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001519BEGIN_FTR_SECTION
1520 std r10, VCPU_MMCR + 24(r9)
1521END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001522 std r7, VCPU_SIAR(r9)
1523 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001524 mfspr r3, SPRN_PMC1
1525 mfspr r4, SPRN_PMC2
1526 mfspr r5, SPRN_PMC3
1527 mfspr r6, SPRN_PMC4
1528 mfspr r7, SPRN_PMC5
1529 mfspr r8, SPRN_PMC6
1530 stw r3, VCPU_PMC(r9)
1531 stw r4, VCPU_PMC + 4(r9)
1532 stw r5, VCPU_PMC + 8(r9)
1533 stw r6, VCPU_PMC + 12(r9)
1534 stw r7, VCPU_PMC + 16(r9)
1535 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001536BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001537 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001538 std r5, VCPU_SIER(r9)
1539BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001540 mfspr r6, SPRN_SPMC1
1541 mfspr r7, SPRN_SPMC2
1542 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001543 stw r6, VCPU_PMC + 24(r9)
1544 stw r7, VCPU_PMC + 28(r9)
1545 std r8, VCPU_MMCR + 32(r9)
1546 lis r4, 0x8000
1547 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001548END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001549END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000155022:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001551 /* Clear out SLB */
1552 li r5,0
1553 slbmte r5,r5
1554 slbia
1555 ptesync
1556
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001557 /* Restore host values of some registers */
1558BEGIN_FTR_SECTION
1559 ld r5, STACK_SLOT_TID(r1)
1560 ld r6, STACK_SLOT_PSSCR(r1)
1561 mtspr SPRN_TIDR, r5
1562 mtspr SPRN_PSSCR, r6
1563END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1564
Paul Mackerrasde56a942011-06-29 00:21:34 +00001565 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001566 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001567 * We don't have to lock against tlbies but we do
1568 * have to coordinate the hardware threads.
1569 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001570kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001571 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001572 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001573 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1574 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001575 cmpwi r3,0
1576 beq 15f
1577 HMT_LOW
157813: lbz r3,VCORE_IN_GUEST(r5)
1579 cmpwi r3,0
1580 bne 13b
1581 HMT_MEDIUM
1582 b 16f
1583
1584 /* Primary thread waits for all the secondaries to exit guest */
158515: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001586 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001587 clrldi r3,r3,56
1588 cmpw r3,r0
1589 bne 15b
1590 isync
1591
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001592 /* Did we actually switch to the guest at all? */
1593 lbz r6, VCORE_IN_GUEST(r5)
1594 cmpwi r6, 0
1595 beq 19f
1596
Paul Mackerrasde56a942011-06-29 00:21:34 +00001597 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001598 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11001599BEGIN_FTR_SECTION
1600 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001601 li r8,LPID_RSVD /* switch to reserved LPID */
1602 mtspr SPRN_LPID,r8
1603 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11001604 mtspr SPRN_SDR1,r6 /* switch to host page table */
1605END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001606 mtspr SPRN_LPID,r7
1607 isync
1608
Michael Neulingb005255e2014-01-08 21:25:21 +11001609BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001610 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11001611 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001612 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11001613 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10001614 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11001615 /* clear DPDES so we don't get guest doorbells in the host */
1616 li r8, 0
1617 mtspr SPRN_DPDES, r8
1618END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1619
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301620 /* If HMI, call kvmppc_realmode_hmi_handler() */
1621 cmpwi r12, BOOK3S_INTERRUPT_HMI
1622 bne 27f
1623 bl kvmppc_realmode_hmi_handler
1624 nop
1625 li r12, BOOK3S_INTERRUPT_HMI
1626 /*
1627 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1628 * the TB. Hence it is not required to subtract guest timebase
1629 * offset from timebase. So, skip it.
1630 *
1631 * Also, do not call kvmppc_subcore_exit_guest() because it has
1632 * been invoked as part of kvmppc_realmode_hmi_handler().
1633 */
1634 b 30f
1635
163627:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001637 /* Subtract timebase offset from timebase */
1638 ld r8,VCORE_TB_OFFSET(r5)
1639 cmpdi r8,0
1640 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001641 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001642 subf r8,r8,r6
1643 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1644 mftb r7 /* check if lower 24 bits overflowed */
1645 clrldi r6,r6,40
1646 clrldi r7,r7,40
1647 cmpld r7,r6
1648 bge 17f
1649 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1650 mtspr SPRN_TBU40,r8
1651
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530165217: bl kvmppc_subcore_exit_guest
1653 nop
165430: ld r5,HSTATE_KVM_VCORE(r13)
1655 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1656
Paul Mackerrasde56a942011-06-29 00:21:34 +00001657 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301658 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001659 cmpdi r0, 0
1660 beq 18f
1661 li r0, 0
1662 mtspr SPRN_PCR, r0
166318:
1664 /* Signal secondary CPUs to continue */
1665 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000166619: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001667 mtspr SPRN_HDEC,r8
1668
166916: ld r8,KVM_HOST_LPCR(r4)
1670 mtspr SPRN_LPCR,r8
1671 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001672
1673 /* load host SLB entries */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001674 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001675
1676 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001677 li r3, SLBSHADOW_SAVEAREA
1678 LDX_BE r5, r8, r3
1679 addi r3, r3, 8
1680 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001681 andis. r7,r5,SLB_ESID_V@h
1682 beq 1f
1683 slbmte r6,r5
16841: addi r8,r8,16
1685 .endr
1686
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001687#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1688 /* Finish timing, if we have a vcpu */
1689 ld r4, HSTATE_KVM_VCPU(r13)
1690 cmpdi r4, 0
1691 li r3, 0
1692 beq 2f
1693 bl kvmhv_accumulate_time
16942:
1695#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001696 /* Unset guest mode */
1697 li r0, KVM_GUEST_MODE_NONE
1698 stb r0, HSTATE_IN_GUEST(r13)
1699
Paul Mackerras218309b2013-09-06 13:23:44 +10001700 ld r0, 112+PPC_LR_STKOFF(r1)
1701 addi r1, r1, 112
1702 mtlr r0
1703 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001704
Paul Mackerras697d3892011-12-12 12:36:37 +00001705/*
1706 * Check whether an HDSI is an HPTE not found fault or something else.
1707 * If it is an HPTE not found fault that is due to the guest accessing
1708 * a page that they have mapped but which we have paged out, then
1709 * we continue on with the guest exit path. In all other cases,
1710 * reflect the HDSI to the guest as a DSI.
1711 */
1712kvmppc_hdsi:
1713 mfspr r4, SPRN_HDAR
1714 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001715 /* HPTE not found fault or protection fault? */
1716 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001717 beq 1f /* if not, send it to the guest */
Paul Mackerrasef8c6402017-01-30 21:21:43 +11001718BEGIN_FTR_SECTION
1719 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1720 b 4f
1721END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00001722 andi. r0, r11, MSR_DR /* data relocation enabled? */
1723 beq 3f
1724 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001725 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001726 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1727 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000017284: std r4, VCPU_FAULT_DAR(r9)
1729 stw r6, VCPU_FAULT_DSISR(r9)
1730
1731 /* Search the hash table. */
1732 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001733 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001734 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001735 ld r9, HSTATE_KVM_VCPU(r13)
1736 ld r10, VCPU_PC(r9)
1737 ld r11, VCPU_MSR(r9)
1738 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1739 cmpdi r3, 0 /* retry the instruction */
1740 beq 6f
1741 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001742 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001743 cmpdi r3, -2 /* MMIO emulation; need instr word */
1744 beq 2f
1745
Paul Mackerrascf29b212015-10-27 16:10:20 +11001746 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001747 ld r4, VCPU_FAULT_DAR(r9)
1748 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110017491: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001750 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110017517: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001752 mtspr SPRN_SRR0, r10
1753 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001754 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001755 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001756fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000017576: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001758 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001759 mtctr r7
1760 mtxer r8
1761 mr r4, r9
1762 b fast_guest_return
1763
17643: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1765 ld r5, KVM_VRMA_SLB_V(r5)
1766 b 4b
1767
1768 /* If this is for emulated MMIO, load the instruction word */
17692: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1770
1771 /* Set guest mode to 'jump over instruction' so if lwz faults
1772 * we'll just continue at the next IP. */
1773 li r0, KVM_GUEST_MODE_SKIP
1774 stb r0, HSTATE_IN_GUEST(r13)
1775
1776 /* Do the access with MSR:DR enabled */
1777 mfmsr r3
1778 ori r4, r3, MSR_DR /* Enable paging for data */
1779 mtmsrd r4
1780 lwz r8, 0(r10)
1781 mtmsrd r3
1782
1783 /* Store the result */
1784 stw r8, VCPU_LAST_INST(r9)
1785
1786 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001787 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001788 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001789 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001790
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001791/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001792 * Similarly for an HISI, reflect it to the guest as an ISI unless
1793 * it is an HPTE not found fault for a page that we have paged out.
1794 */
1795kvmppc_hisi:
1796 andis. r0, r11, SRR1_ISI_NOPT@h
1797 beq 1f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11001798BEGIN_FTR_SECTION
1799 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
1800 b 4f
1801END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00001802 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1803 beq 3f
1804 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001805 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001806 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1807 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000018084:
1809 /* Search the hash table. */
1810 mr r3, r9 /* vcpu pointer */
1811 mr r4, r10
1812 mr r6, r11
1813 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001814 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001815 ld r9, HSTATE_KVM_VCPU(r13)
1816 ld r10, VCPU_PC(r9)
1817 ld r11, VCPU_MSR(r9)
1818 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1819 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001820 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001821 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001822 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001823
Paul Mackerrascf29b212015-10-27 16:10:20 +11001824 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001825 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110018261: li r0, BOOK3S_INTERRUPT_INST_STORAGE
18277: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00001828 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001829 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001830 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001831 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001832
18333: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1834 ld r5, KVM_VRMA_SLB_V(r6)
1835 b 4b
1836
1837/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001838 * Try to handle an hcall in real mode.
1839 * Returns to the guest if we handle it, or continues on up to
1840 * the kernel if we can't (i.e. if we don't have a handler for
1841 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001842 *
1843 * r5 - r8 contain hcall args,
1844 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001845 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001846hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001847 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001848 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001849 /* sc 1 from userspace - reflect to guest syscall */
1850 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001851 clrrdi r3,r3,2
1852 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001853 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001854 /* See if this hcall is enabled for in-kernel handling */
1855 ld r4, VCPU_KVM(r9)
1856 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1857 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1858 add r4, r4, r0
1859 ld r0, KVM_ENABLED_HCALLS(r4)
1860 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1861 srd r0, r0, r4
1862 andi. r0, r0, 1
1863 beq guest_exit_cont
1864 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001865 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001866 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001867 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001868 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001869 add r12,r3,r4
1870 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001871 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001872 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001873 bctrl
1874 cmpdi r3,H_TOO_HARD
1875 beq hcall_real_fallback
1876 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001877 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001878 ld r10,VCPU_PC(r4)
1879 ld r11,VCPU_MSR(r4)
1880 b fast_guest_return
1881
Liu Ping Fan27025a62013-11-19 14:12:48 +08001882sc_1_fast_return:
1883 mtspr SPRN_SRR0,r10
1884 mtspr SPRN_SRR1,r11
1885 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001886 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001887 mr r4,r9
1888 b fast_guest_return
1889
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001890 /* We've attempted a real mode hcall, but it's punted it back
1891 * to userspace. We need to restore some clobbered volatiles
1892 * before resuming the pass-it-to-qemu path */
1893hcall_real_fallback:
1894 li r12,BOOK3S_INTERRUPT_SYSCALL
1895 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001896
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001897 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001898
1899 .globl hcall_real_table
1900hcall_real_table:
1901 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001902 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1903 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1904 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10001905 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1906 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001907 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1908 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001909 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001910 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001911 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001912 .long 0 /* 0x2c */
1913 .long 0 /* 0x30 */
1914 .long 0 /* 0x34 */
1915 .long 0 /* 0x38 */
1916 .long 0 /* 0x3c */
1917 .long 0 /* 0x40 */
1918 .long 0 /* 0x44 */
1919 .long 0 /* 0x48 */
1920 .long 0 /* 0x4c */
1921 .long 0 /* 0x50 */
1922 .long 0 /* 0x54 */
1923 .long 0 /* 0x58 */
1924 .long 0 /* 0x5c */
1925 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001926#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001927 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1928 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1929 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001930 .long 0 /* 0x70 - H_IPOLL */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001931 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001932#else
1933 .long 0 /* 0x64 - H_EOI */
1934 .long 0 /* 0x68 - H_CPPR */
1935 .long 0 /* 0x6c - H_IPI */
1936 .long 0 /* 0x70 - H_IPOLL */
1937 .long 0 /* 0x74 - H_XIRR */
1938#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001939 .long 0 /* 0x78 */
1940 .long 0 /* 0x7c */
1941 .long 0 /* 0x80 */
1942 .long 0 /* 0x84 */
1943 .long 0 /* 0x88 */
1944 .long 0 /* 0x8c */
1945 .long 0 /* 0x90 */
1946 .long 0 /* 0x94 */
1947 .long 0 /* 0x98 */
1948 .long 0 /* 0x9c */
1949 .long 0 /* 0xa0 */
1950 .long 0 /* 0xa4 */
1951 .long 0 /* 0xa8 */
1952 .long 0 /* 0xac */
1953 .long 0 /* 0xb0 */
1954 .long 0 /* 0xb4 */
1955 .long 0 /* 0xb8 */
1956 .long 0 /* 0xbc */
1957 .long 0 /* 0xc0 */
1958 .long 0 /* 0xc4 */
1959 .long 0 /* 0xc8 */
1960 .long 0 /* 0xcc */
1961 .long 0 /* 0xd0 */
1962 .long 0 /* 0xd4 */
1963 .long 0 /* 0xd8 */
1964 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001965 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11001966 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001967 .long 0 /* 0xe8 */
1968 .long 0 /* 0xec */
1969 .long 0 /* 0xf0 */
1970 .long 0 /* 0xf4 */
1971 .long 0 /* 0xf8 */
1972 .long 0 /* 0xfc */
1973 .long 0 /* 0x100 */
1974 .long 0 /* 0x104 */
1975 .long 0 /* 0x108 */
1976 .long 0 /* 0x10c */
1977 .long 0 /* 0x110 */
1978 .long 0 /* 0x114 */
1979 .long 0 /* 0x118 */
1980 .long 0 /* 0x11c */
1981 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001982 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001983 .long 0 /* 0x128 */
1984 .long 0 /* 0x12c */
1985 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001986 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11001987 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11001988 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11001989 .long 0 /* 0x140 */
1990 .long 0 /* 0x144 */
1991 .long 0 /* 0x148 */
1992 .long 0 /* 0x14c */
1993 .long 0 /* 0x150 */
1994 .long 0 /* 0x154 */
1995 .long 0 /* 0x158 */
1996 .long 0 /* 0x15c */
1997 .long 0 /* 0x160 */
1998 .long 0 /* 0x164 */
1999 .long 0 /* 0x168 */
2000 .long 0 /* 0x16c */
2001 .long 0 /* 0x170 */
2002 .long 0 /* 0x174 */
2003 .long 0 /* 0x178 */
2004 .long 0 /* 0x17c */
2005 .long 0 /* 0x180 */
2006 .long 0 /* 0x184 */
2007 .long 0 /* 0x188 */
2008 .long 0 /* 0x18c */
2009 .long 0 /* 0x190 */
2010 .long 0 /* 0x194 */
2011 .long 0 /* 0x198 */
2012 .long 0 /* 0x19c */
2013 .long 0 /* 0x1a0 */
2014 .long 0 /* 0x1a4 */
2015 .long 0 /* 0x1a8 */
2016 .long 0 /* 0x1ac */
2017 .long 0 /* 0x1b0 */
2018 .long 0 /* 0x1b4 */
2019 .long 0 /* 0x1b8 */
2020 .long 0 /* 0x1bc */
2021 .long 0 /* 0x1c0 */
2022 .long 0 /* 0x1c4 */
2023 .long 0 /* 0x1c8 */
2024 .long 0 /* 0x1cc */
2025 .long 0 /* 0x1d0 */
2026 .long 0 /* 0x1d4 */
2027 .long 0 /* 0x1d8 */
2028 .long 0 /* 0x1dc */
2029 .long 0 /* 0x1e0 */
2030 .long 0 /* 0x1e4 */
2031 .long 0 /* 0x1e8 */
2032 .long 0 /* 0x1ec */
2033 .long 0 /* 0x1f0 */
2034 .long 0 /* 0x1f4 */
2035 .long 0 /* 0x1f8 */
2036 .long 0 /* 0x1fc */
2037 .long 0 /* 0x200 */
2038 .long 0 /* 0x204 */
2039 .long 0 /* 0x208 */
2040 .long 0 /* 0x20c */
2041 .long 0 /* 0x210 */
2042 .long 0 /* 0x214 */
2043 .long 0 /* 0x218 */
2044 .long 0 /* 0x21c */
2045 .long 0 /* 0x220 */
2046 .long 0 /* 0x224 */
2047 .long 0 /* 0x228 */
2048 .long 0 /* 0x22c */
2049 .long 0 /* 0x230 */
2050 .long 0 /* 0x234 */
2051 .long 0 /* 0x238 */
2052 .long 0 /* 0x23c */
2053 .long 0 /* 0x240 */
2054 .long 0 /* 0x244 */
2055 .long 0 /* 0x248 */
2056 .long 0 /* 0x24c */
2057 .long 0 /* 0x250 */
2058 .long 0 /* 0x254 */
2059 .long 0 /* 0x258 */
2060 .long 0 /* 0x25c */
2061 .long 0 /* 0x260 */
2062 .long 0 /* 0x264 */
2063 .long 0 /* 0x268 */
2064 .long 0 /* 0x26c */
2065 .long 0 /* 0x270 */
2066 .long 0 /* 0x274 */
2067 .long 0 /* 0x278 */
2068 .long 0 /* 0x27c */
2069 .long 0 /* 0x280 */
2070 .long 0 /* 0x284 */
2071 .long 0 /* 0x288 */
2072 .long 0 /* 0x28c */
2073 .long 0 /* 0x290 */
2074 .long 0 /* 0x294 */
2075 .long 0 /* 0x298 */
2076 .long 0 /* 0x29c */
2077 .long 0 /* 0x2a0 */
2078 .long 0 /* 0x2a4 */
2079 .long 0 /* 0x2a8 */
2080 .long 0 /* 0x2ac */
2081 .long 0 /* 0x2b0 */
2082 .long 0 /* 0x2b4 */
2083 .long 0 /* 0x2b8 */
2084 .long 0 /* 0x2bc */
2085 .long 0 /* 0x2c0 */
2086 .long 0 /* 0x2c4 */
2087 .long 0 /* 0x2c8 */
2088 .long 0 /* 0x2cc */
2089 .long 0 /* 0x2d0 */
2090 .long 0 /* 0x2d4 */
2091 .long 0 /* 0x2d8 */
2092 .long 0 /* 0x2dc */
2093 .long 0 /* 0x2e0 */
2094 .long 0 /* 0x2e4 */
2095 .long 0 /* 0x2e8 */
2096 .long 0 /* 0x2ec */
2097 .long 0 /* 0x2f0 */
2098 .long 0 /* 0x2f4 */
2099 .long 0 /* 0x2f8 */
2100 .long 0 /* 0x2fc */
2101 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002102 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002103hcall_real_table_end:
2104
Paul Mackerras8563bf52014-01-08 21:25:29 +11002105_GLOBAL(kvmppc_h_set_xdabr)
2106 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2107 beq 6f
2108 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2109 andc. r0, r5, r0
2110 beq 3f
21116: li r3, H_PARAMETER
2112 blr
2113
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002114_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002115 li r5, DABRX_USER | DABRX_KERNEL
21163:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002117BEGIN_FTR_SECTION
2118 b 2f
2119END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002120 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002121 stw r5, VCPU_DABRX(r3)
2122 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002123 /* Work around P7 bug where DABR can get corrupted on mtspr */
21241: mtspr SPRN_DABR,r4
2125 mfspr r5, SPRN_DABR
2126 cmpd r4, r5
2127 bne 1b
2128 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002129 li r3,0
2130 blr
2131
Paul Mackerras8563bf52014-01-08 21:25:29 +11002132 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
21332: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002134 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002135 clrrdi r4, r4, 3
2136 std r4, VCPU_DAWR(r3)
2137 std r5, VCPU_DAWRX(r3)
2138 mtspr SPRN_DAWR, r4
2139 mtspr SPRN_DAWRX, r5
2140 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002141 blr
2142
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002143_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002144 ori r11,r11,MSR_EE
2145 std r11,VCPU_MSR(r3)
2146 li r0,1
2147 stb r0,VCPU_CEDED(r3)
2148 sync /* order setting ceded vs. testing prodded */
2149 lbz r5,VCPU_PRODDED(r3)
2150 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002151 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002152 li r12,0 /* set trap to 0 to say hcall is handled */
2153 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002154 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002155 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002156
2157 /*
2158 * Set our bit in the bitmask of napping threads unless all the
2159 * other threads are already napping, in which case we send this
2160 * up to the host.
2161 */
2162 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002163 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002164 lwz r8,VCORE_ENTRY_EXIT(r5)
2165 clrldi r8,r8,56
2166 li r0,1
2167 sld r0,r0,r6
2168 addi r6,r5,VCORE_NAPPING_THREADS
216931: lwarx r4,0,r6
2170 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002171 cmpw r4,r8
2172 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002173 stwcx. r4,0,r6
2174 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002175 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002176 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002177 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002178 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002179 lwz r7,VCORE_ENTRY_EXIT(r5)
2180 cmpwi r7,0x100
2181 bge 33f /* another thread already exiting */
2182
2183/*
2184 * Although not specifically required by the architecture, POWER7
2185 * preserves the following registers in nap mode, even if an SMT mode
2186 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2187 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2188 */
2189 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002190 std r14, VCPU_GPR(R14)(r3)
2191 std r15, VCPU_GPR(R15)(r3)
2192 std r16, VCPU_GPR(R16)(r3)
2193 std r17, VCPU_GPR(R17)(r3)
2194 std r18, VCPU_GPR(R18)(r3)
2195 std r19, VCPU_GPR(R19)(r3)
2196 std r20, VCPU_GPR(R20)(r3)
2197 std r21, VCPU_GPR(R21)(r3)
2198 std r22, VCPU_GPR(R22)(r3)
2199 std r23, VCPU_GPR(R23)(r3)
2200 std r24, VCPU_GPR(R24)(r3)
2201 std r25, VCPU_GPR(R25)(r3)
2202 std r26, VCPU_GPR(R26)(r3)
2203 std r27, VCPU_GPR(R27)(r3)
2204 std r28, VCPU_GPR(R28)(r3)
2205 std r29, VCPU_GPR(R29)(r3)
2206 std r30, VCPU_GPR(R30)(r3)
2207 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002208
2209 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002210 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002211
Paul Mackerras93d17392016-06-22 15:52:55 +10002212#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2213BEGIN_FTR_SECTION
2214 ld r9, HSTATE_KVM_VCPU(r13)
2215 bl kvmppc_save_tm
2216END_FTR_SECTION_IFSET(CPU_FTR_TM)
2217#endif
2218
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002219 /*
2220 * Set DEC to the smaller of DEC and HDEC, so that we wake
2221 * no later than the end of our timeslice (HDEC interrupts
2222 * don't wake us from nap).
2223 */
2224 mfspr r3, SPRN_DEC
2225 mfspr r4, SPRN_HDEC
2226 mftb r5
2227 cmpw r3, r4
2228 ble 67f
2229 mtspr SPRN_DEC, r4
223067:
2231 /* save expiry time of guest decrementer */
2232 extsw r3, r3
2233 add r3, r3, r5
2234 ld r4, HSTATE_KVM_VCPU(r13)
2235 ld r5, HSTATE_KVM_VCORE(r13)
2236 ld r6, VCORE_TB_OFFSET(r5)
2237 subf r3, r6, r3 /* convert to host TB value */
2238 std r3, VCPU_DEC_EXPIRES(r4)
2239
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002240#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2241 ld r4, HSTATE_KVM_VCPU(r13)
2242 addi r3, r4, VCPU_TB_CEDE
2243 bl kvmhv_accumulate_time
2244#endif
2245
Paul Mackerrasccc07772015-03-28 14:21:07 +11002246 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2247
Paul Mackerras19ccb762011-07-23 17:42:46 +10002248 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002249 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002250 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002251 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002252 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002253 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002254kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002255 mfspr r0, SPRN_CTRLF
2256 clrrdi r0, r0, 1
2257 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302258
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002259 li r0,1
2260 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002261 mfspr r5,SPRN_LPCR
2262 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002263BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002264 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002265 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002266END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002267
2268kvm_nap_sequence: /* desired LPCR value in r5 */
2269BEGIN_FTR_SECTION
2270 /*
2271 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2272 * enable state loss = 1 (allow SMT mode switch)
2273 * requested level = 0 (just stop dispatching)
2274 */
2275 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2276 mtspr SPRN_PSSCR, r3
2277 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2278 li r4, LPCR_PECE_HVEE@higher
2279 sldi r4, r4, 32
2280 or r5, r5, r4
2281END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002282 mtspr SPRN_LPCR,r5
2283 isync
2284 li r0, 0
2285 std r0, HSTATE_SCRATCH0(r13)
2286 ptesync
2287 ld r0, HSTATE_SCRATCH0(r13)
22881: cmpd r0, r0
2289 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002290BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002291 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002292FTR_SECTION_ELSE
2293 PPC_STOP
2294ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002295 b .
2296
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100229733: mr r4, r3
2298 li r3, 0
2299 li r12, 0
2300 b 34f
2301
Paul Mackerras19ccb762011-07-23 17:42:46 +10002302kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002303 /* get vcpu pointer */
2304 ld r4, HSTATE_KVM_VCPU(r13)
2305
Paul Mackerras19ccb762011-07-23 17:42:46 +10002306 /* Woken by external or decrementer interrupt */
2307 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002308
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002309#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2310 addi r3, r4, VCPU_TB_RMINTR
2311 bl kvmhv_accumulate_time
2312#endif
2313
Paul Mackerras93d17392016-06-22 15:52:55 +10002314#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2315BEGIN_FTR_SECTION
2316 bl kvmppc_restore_tm
2317END_FTR_SECTION_IFSET(CPU_FTR_TM)
2318#endif
2319
Paul Mackerras19ccb762011-07-23 17:42:46 +10002320 /* load up FP state */
2321 bl kvmppc_load_fp
2322
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002323 /* Restore guest decrementer */
2324 ld r3, VCPU_DEC_EXPIRES(r4)
2325 ld r5, HSTATE_KVM_VCORE(r13)
2326 ld r6, VCORE_TB_OFFSET(r5)
2327 add r3, r3, r6 /* convert host TB to guest TB value */
2328 mftb r7
2329 subf r3, r7, r3
2330 mtspr SPRN_DEC, r3
2331
Paul Mackerras19ccb762011-07-23 17:42:46 +10002332 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002333 ld r14, VCPU_GPR(R14)(r4)
2334 ld r15, VCPU_GPR(R15)(r4)
2335 ld r16, VCPU_GPR(R16)(r4)
2336 ld r17, VCPU_GPR(R17)(r4)
2337 ld r18, VCPU_GPR(R18)(r4)
2338 ld r19, VCPU_GPR(R19)(r4)
2339 ld r20, VCPU_GPR(R20)(r4)
2340 ld r21, VCPU_GPR(R21)(r4)
2341 ld r22, VCPU_GPR(R22)(r4)
2342 ld r23, VCPU_GPR(R23)(r4)
2343 ld r24, VCPU_GPR(R24)(r4)
2344 ld r25, VCPU_GPR(R25)(r4)
2345 ld r26, VCPU_GPR(R26)(r4)
2346 ld r27, VCPU_GPR(R27)(r4)
2347 ld r28, VCPU_GPR(R28)(r4)
2348 ld r29, VCPU_GPR(R29)(r4)
2349 ld r30, VCPU_GPR(R30)(r4)
2350 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002351
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002352 /* Check the wake reason in SRR1 to see why we got here */
2353 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002354
Suresh Warrier37f55d32016-08-19 15:35:46 +10002355 /*
2356 * Restore volatile registers since we could have called a
2357 * C routine in kvmppc_check_wake_reason
2358 * r4 = VCPU
2359 * r3 tells us whether we need to return to host or not
2360 * WARNING: it gets checked further down:
2361 * should not modify r3 until this check is done.
2362 */
2363 ld r4, HSTATE_KVM_VCPU(r13)
2364
Paul Mackerras19ccb762011-07-23 17:42:46 +10002365 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100236634: ld r5,HSTATE_KVM_VCORE(r13)
2367 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002368 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002369 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002370 addi r6,r5,VCORE_NAPPING_THREADS
237132: lwarx r7,0,r6
2372 andc r7,r7,r0
2373 stwcx. r7,0,r6
2374 bne 32b
2375 li r0,0
2376 stb r0,HSTATE_NAPPING(r13)
2377
Suresh Warrier37f55d32016-08-19 15:35:46 +10002378 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002379 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002380 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002381 cmpdi r3, 0
2382 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002383
Paul Mackerras19ccb762011-07-23 17:42:46 +10002384 /* see if any other thread is already exiting */
2385 lwz r0,VCORE_ENTRY_EXIT(r5)
2386 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002387 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002388
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002389 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002390
2391 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002392kvm_cede_prodded:
2393 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002394 stb r0,VCPU_PRODDED(r3)
2395 sync /* order testing prodded vs. clearing ceded */
2396 stb r0,VCPU_CEDED(r3)
2397 li r3,H_SUCCESS
2398 blr
2399
2400 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002401kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002402 ld r9, HSTATE_KVM_VCPU(r13)
2403 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002404
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002405 /* Try to handle a machine check in real mode */
2406machine_check_realmode:
2407 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002408 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002409 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002410 ld r9, HSTATE_KVM_VCPU(r13)
2411 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302412 /*
2413 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2414 * machine check interrupt (set HSRR0 to 0x200). And for handled
2415 * errors (no-fatal), just go back to guest execution with current
2416 * HSRR0 instead of exiting guest. This new approach will inject
2417 * machine check to guest for fatal error causing guest to crash.
2418 *
2419 * The old code used to return to host for unhandled errors which
2420 * was causing guest to hang with soft lockups inside guest and
2421 * makes it difficult to recover guest instance.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302422 *
2423 * if we receive machine check with MSR(RI=0) then deliver it to
2424 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302425 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302426 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002427 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2428 bne mc_cont /* if so, exit to host */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302429 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2430 beq 1f /* Deliver a machine check to guest */
2431 ld r10, VCPU_PC(r9)
2432 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302433 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002434 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053024351: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002436 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053024372: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002438
Paul Mackerrasde56a942011-06-29 00:21:34 +00002439/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002440 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002441 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002442 * 0 if nothing needs to be done
2443 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002444 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10002445 * -2 if we handled a PCI passthrough interrupt (returned by
2446 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002447 *
2448 * Also sets r12 to the interrupt vector for any interrupt that needs
2449 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10002450 * Modifies all volatile registers (since it may call a C function).
2451 * This routine calls kvmppc_read_intr, a C function, if an external
2452 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002453 */
2454kvmppc_check_wake_reason:
2455 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002456BEGIN_FTR_SECTION
2457 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2458FTR_SECTION_ELSE
2459 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2460ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2461 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10002462 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002463 li r3, 0
2464 li r12, 0
2465 cmpwi r6, 6 /* was it the decrementer? */
2466 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002467BEGIN_FTR_SECTION
2468 cmpwi r6, 5 /* privileged doorbell? */
2469 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002470 cmpwi r6, 3 /* hypervisor doorbell? */
2471 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002472END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302473 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2474 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002475 li r3, 1 /* anything else, return 1 */
24760: blr
2477
Paul Mackerras5d00f662014-01-08 21:25:28 +11002478 /* hypervisor doorbell */
24793: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302480
2481 /*
2482 * Clear the doorbell as we will invoke the handler
2483 * explicitly in the guest exit path.
2484 */
2485 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2486 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002487 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002488 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002489 lbz r0, HSTATE_HOST_IPI(r13)
2490 cmpwi r0, 0
2491 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302492 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002493 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002494 blr
2495
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302496 /* Woken up due to Hypervisor maintenance interrupt */
24974: li r12, BOOK3S_INTERRUPT_HMI
2498 li r3, 1
2499 blr
2500
Suresh Warrier37f55d32016-08-19 15:35:46 +10002501 /* external interrupt - create a stack frame so we can call C */
25027: mflr r0
2503 std r0, PPC_LR_STKOFF(r1)
2504 stdu r1, -PPC_MIN_STKFRM(r1)
2505 bl kvmppc_read_intr
2506 nop
2507 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10002508 cmpdi r3, 1
2509 ble 1f
2510
2511 /*
2512 * Return code of 2 means PCI passthrough interrupt, but
2513 * we need to return back to host to complete handling the
2514 * interrupt. Trap reason is expected in r12 by guest
2515 * exit code.
2516 */
2517 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
25181:
Suresh Warrier37f55d32016-08-19 15:35:46 +10002519 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
2520 addi r1, r1, PPC_MIN_STKFRM
2521 mtlr r0
2522 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00002523
2524/*
2525 * Save away FP, VMX and VSX registers.
2526 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002527 * N.B. r30 and r31 are volatile across this function,
2528 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002529 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002530kvmppc_save_fp:
2531 mflr r30
2532 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00002533 mfmsr r5
2534 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00002535#ifdef CONFIG_ALTIVEC
2536BEGIN_FTR_SECTION
2537 oris r8,r8,MSR_VEC@h
2538END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2539#endif
2540#ifdef CONFIG_VSX
2541BEGIN_FTR_SECTION
2542 oris r8,r8,MSR_VSX@h
2543END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2544#endif
2545 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002546 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002547 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002548#ifdef CONFIG_ALTIVEC
2549BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002550 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002551 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002552END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2553#endif
2554 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002555 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002556 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002557 blr
2558
2559/*
2560 * Load up FP, VMX and VSX registers
2561 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002562 * N.B. r30 and r31 are volatile across this function,
2563 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002564 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002565kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002566 mflr r30
2567 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002568 mfmsr r9
2569 ori r8,r9,MSR_FP
2570#ifdef CONFIG_ALTIVEC
2571BEGIN_FTR_SECTION
2572 oris r8,r8,MSR_VEC@h
2573END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2574#endif
2575#ifdef CONFIG_VSX
2576BEGIN_FTR_SECTION
2577 oris r8,r8,MSR_VSX@h
2578END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2579#endif
2580 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002581 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002582 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002583#ifdef CONFIG_ALTIVEC
2584BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002585 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002586 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002587END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2588#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002589 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002590 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002591 mtlr r30
2592 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002593 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002594
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002595#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2596/*
2597 * Save transactional state and TM-related registers.
2598 * Called with r9 pointing to the vcpu struct.
2599 * This can modify all checkpointed registers, but
2600 * restores r1, r2 and r9 (vcpu pointer) before exit.
2601 */
2602kvmppc_save_tm:
2603 mflr r0
2604 std r0, PPC_LR_STKOFF(r1)
2605
2606 /* Turn on TM. */
2607 mfmsr r8
2608 li r0, 1
2609 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
2610 mtmsrd r8
2611
2612 ld r5, VCPU_MSR(r9)
2613 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2614 beq 1f /* TM not active in guest. */
2615
2616 std r1, HSTATE_HOST_R1(r13)
2617 li r3, TM_CAUSE_KVM_RESCHED
2618
2619 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2620 li r5, 0
2621 mtmsrd r5, 1
2622
2623 /* All GPRs are volatile at this point. */
2624 TRECLAIM(R3)
2625
2626 /* Temporarily store r13 and r9 so we have some regs to play with */
2627 SET_SCRATCH0(r13)
2628 GET_PACA(r13)
2629 std r9, PACATMSCRATCH(r13)
2630 ld r9, HSTATE_KVM_VCPU(r13)
2631
2632 /* Get a few more GPRs free. */
2633 std r29, VCPU_GPRS_TM(29)(r9)
2634 std r30, VCPU_GPRS_TM(30)(r9)
2635 std r31, VCPU_GPRS_TM(31)(r9)
2636
2637 /* Save away PPR and DSCR soon so don't run with user values. */
2638 mfspr r31, SPRN_PPR
2639 HMT_MEDIUM
2640 mfspr r30, SPRN_DSCR
2641 ld r29, HSTATE_DSCR(r13)
2642 mtspr SPRN_DSCR, r29
2643
2644 /* Save all but r9, r13 & r29-r31 */
2645 reg = 0
2646 .rept 29
2647 .if (reg != 9) && (reg != 13)
2648 std reg, VCPU_GPRS_TM(reg)(r9)
2649 .endif
2650 reg = reg + 1
2651 .endr
2652 /* ... now save r13 */
2653 GET_SCRATCH0(r4)
2654 std r4, VCPU_GPRS_TM(13)(r9)
2655 /* ... and save r9 */
2656 ld r4, PACATMSCRATCH(r13)
2657 std r4, VCPU_GPRS_TM(9)(r9)
2658
2659 /* Reload stack pointer and TOC. */
2660 ld r1, HSTATE_HOST_R1(r13)
2661 ld r2, PACATOC(r13)
2662
2663 /* Set MSR RI now we have r1 and r13 back. */
2664 li r5, MSR_RI
2665 mtmsrd r5, 1
2666
2667 /* Save away checkpinted SPRs. */
2668 std r31, VCPU_PPR_TM(r9)
2669 std r30, VCPU_DSCR_TM(r9)
2670 mflr r5
2671 mfcr r6
2672 mfctr r7
2673 mfspr r8, SPRN_AMR
2674 mfspr r10, SPRN_TAR
Paul Mackerras0d808df2016-11-07 15:09:58 +11002675 mfxer r11
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002676 std r5, VCPU_LR_TM(r9)
2677 stw r6, VCPU_CR_TM(r9)
2678 std r7, VCPU_CTR_TM(r9)
2679 std r8, VCPU_AMR_TM(r9)
2680 std r10, VCPU_TAR_TM(r9)
Paul Mackerras0d808df2016-11-07 15:09:58 +11002681 std r11, VCPU_XER_TM(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002682
2683 /* Restore r12 as trap number. */
2684 lwz r12, VCPU_TRAP(r9)
2685
2686 /* Save FP/VSX. */
2687 addi r3, r9, VCPU_FPRS_TM
2688 bl store_fp_state
2689 addi r3, r9, VCPU_VRS_TM
2690 bl store_vr_state
2691 mfspr r6, SPRN_VRSAVE
2692 stw r6, VCPU_VRSAVE_TM(r9)
26931:
2694 /*
2695 * We need to save these SPRs after the treclaim so that the software
2696 * error code is recorded correctly in the TEXASR. Also the user may
2697 * change these outside of a transaction, so they must always be
2698 * context switched.
2699 */
2700 mfspr r5, SPRN_TFHAR
2701 mfspr r6, SPRN_TFIAR
2702 mfspr r7, SPRN_TEXASR
2703 std r5, VCPU_TFHAR(r9)
2704 std r6, VCPU_TFIAR(r9)
2705 std r7, VCPU_TEXASR(r9)
2706
2707 ld r0, PPC_LR_STKOFF(r1)
2708 mtlr r0
2709 blr
2710
2711/*
2712 * Restore transactional state and TM-related registers.
2713 * Called with r4 pointing to the vcpu struct.
2714 * This potentially modifies all checkpointed registers.
2715 * It restores r1, r2, r4 from the PACA.
2716 */
2717kvmppc_restore_tm:
2718 mflr r0
2719 std r0, PPC_LR_STKOFF(r1)
2720
2721 /* Turn on TM/FP/VSX/VMX so we can restore them. */
2722 mfmsr r5
2723 li r6, MSR_TM >> 32
2724 sldi r6, r6, 32
2725 or r5, r5, r6
2726 ori r5, r5, MSR_FP
2727 oris r5, r5, (MSR_VEC | MSR_VSX)@h
2728 mtmsrd r5
2729
2730 /*
2731 * The user may change these outside of a transaction, so they must
2732 * always be context switched.
2733 */
2734 ld r5, VCPU_TFHAR(r4)
2735 ld r6, VCPU_TFIAR(r4)
2736 ld r7, VCPU_TEXASR(r4)
2737 mtspr SPRN_TFHAR, r5
2738 mtspr SPRN_TFIAR, r6
2739 mtspr SPRN_TEXASR, r7
2740
2741 ld r5, VCPU_MSR(r4)
2742 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
2743 beqlr /* TM not active in guest */
2744 std r1, HSTATE_HOST_R1(r13)
2745
2746 /* Make sure the failure summary is set, otherwise we'll program check
2747 * when we trechkpt. It's possible that this might have been not set
2748 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
2749 * host.
2750 */
2751 oris r7, r7, (TEXASR_FS)@h
2752 mtspr SPRN_TEXASR, r7
2753
2754 /*
2755 * We need to load up the checkpointed state for the guest.
2756 * We need to do this early as it will blow away any GPRs, VSRs and
2757 * some SPRs.
2758 */
2759
2760 mr r31, r4
2761 addi r3, r31, VCPU_FPRS_TM
2762 bl load_fp_state
2763 addi r3, r31, VCPU_VRS_TM
2764 bl load_vr_state
2765 mr r4, r31
2766 lwz r7, VCPU_VRSAVE_TM(r4)
2767 mtspr SPRN_VRSAVE, r7
2768
2769 ld r5, VCPU_LR_TM(r4)
2770 lwz r6, VCPU_CR_TM(r4)
2771 ld r7, VCPU_CTR_TM(r4)
2772 ld r8, VCPU_AMR_TM(r4)
2773 ld r9, VCPU_TAR_TM(r4)
Paul Mackerras0d808df2016-11-07 15:09:58 +11002774 ld r10, VCPU_XER_TM(r4)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002775 mtlr r5
2776 mtcr r6
2777 mtctr r7
2778 mtspr SPRN_AMR, r8
2779 mtspr SPRN_TAR, r9
Paul Mackerras0d808df2016-11-07 15:09:58 +11002780 mtxer r10
Paul Mackerrasf024ee02016-06-22 14:21:59 +10002781
2782 /*
2783 * Load up PPR and DSCR values but don't put them in the actual SPRs
2784 * till the last moment to avoid running with userspace PPR and DSCR for
2785 * too long.
2786 */
2787 ld r29, VCPU_DSCR_TM(r4)
2788 ld r30, VCPU_PPR_TM(r4)
2789
2790 std r2, PACATMSCRATCH(r13) /* Save TOC */
2791
2792 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
2793 li r5, 0
2794 mtmsrd r5, 1
2795
2796 /* Load GPRs r0-r28 */
2797 reg = 0
2798 .rept 29
2799 ld reg, VCPU_GPRS_TM(reg)(r31)
2800 reg = reg + 1
2801 .endr
2802
2803 mtspr SPRN_DSCR, r29
2804 mtspr SPRN_PPR, r30
2805
2806 /* Load final GPRs */
2807 ld 29, VCPU_GPRS_TM(29)(r31)
2808 ld 30, VCPU_GPRS_TM(30)(r31)
2809 ld 31, VCPU_GPRS_TM(31)(r31)
2810
2811 /* TM checkpointed state is now setup. All GPRs are now volatile. */
2812 TRECHKPT
2813
2814 /* Now let's get back the state we need. */
2815 HMT_MEDIUM
2816 GET_PACA(r13)
2817 ld r29, HSTATE_DSCR(r13)
2818 mtspr SPRN_DSCR, r29
2819 ld r4, HSTATE_KVM_VCPU(r13)
2820 ld r1, HSTATE_HOST_R1(r13)
2821 ld r2, PACATMSCRATCH(r13)
2822
2823 /* Set the MSR RI since we have our registers back. */
2824 li r5, MSR_RI
2825 mtmsrd r5, 1
2826
2827 ld r0, PPC_LR_STKOFF(r1)
2828 mtlr r0
2829 blr
2830#endif
2831
Paul Mackerras44a3add2013-10-04 21:45:04 +10002832/*
2833 * We come here if we get any exception or interrupt while we are
2834 * executing host real mode code while in guest MMU context.
2835 * For now just spin, but we should do something better.
2836 */
2837kvmppc_bad_host_intr:
2838 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002839
2840/*
2841 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2842 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2843 * r11 has the guest MSR value (in/out)
2844 * r9 has a vcpu pointer (in)
2845 * r0 is used as a scratch register
2846 */
2847kvmppc_msr_interrupt:
2848 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2849 cmpwi r0, 2 /* Check if we are in transactional state.. */
2850 ld r11, VCPU_INTR_MSR(r9)
2851 bne 1f
2852 /* ... if transactional, change to suspended */
2853 li r0, 1
28541: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2855 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002856
2857/*
2858 * This works around a hardware bug on POWER8E processors, where
2859 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2860 * performance monitor interrupt. Instead, when we need to have
2861 * an interrupt pending, we have to arrange for a counter to overflow.
2862 */
2863kvmppc_fix_pmao:
2864 li r3, 0
2865 mtspr SPRN_MMCR2, r3
2866 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2867 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2868 mtspr SPRN_MMCR0, r3
2869 lis r3, 0x7fff
2870 ori r3, r3, 0xffff
2871 mtspr SPRN_PMC6, r3
2872 isync
2873 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002874
2875#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2876/*
2877 * Start timing an activity
2878 * r3 = pointer to time accumulation struct, r4 = vcpu
2879 */
2880kvmhv_start_timing:
2881 ld r5, HSTATE_KVM_VCORE(r13)
2882 lbz r6, VCORE_IN_GUEST(r5)
2883 cmpwi r6, 0
2884 beq 5f /* if in guest, need to */
2885 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
28865: mftb r5
2887 subf r5, r6, r5
2888 std r3, VCPU_CUR_ACTIVITY(r4)
2889 std r5, VCPU_ACTIVITY_START(r4)
2890 blr
2891
2892/*
2893 * Accumulate time to one activity and start another.
2894 * r3 = pointer to new time accumulation struct, r4 = vcpu
2895 */
2896kvmhv_accumulate_time:
2897 ld r5, HSTATE_KVM_VCORE(r13)
2898 lbz r8, VCORE_IN_GUEST(r5)
2899 cmpwi r8, 0
2900 beq 4f /* if in guest, need to */
2901 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
29024: ld r5, VCPU_CUR_ACTIVITY(r4)
2903 ld r6, VCPU_ACTIVITY_START(r4)
2904 std r3, VCPU_CUR_ACTIVITY(r4)
2905 mftb r7
2906 subf r7, r8, r7
2907 std r7, VCPU_ACTIVITY_START(r4)
2908 cmpdi r5, 0
2909 beqlr
2910 subf r3, r6, r7
2911 ld r8, TAS_SEQCOUNT(r5)
2912 cmpdi r8, 0
2913 addi r8, r8, 1
2914 std r8, TAS_SEQCOUNT(r5)
2915 lwsync
2916 ld r7, TAS_TOTAL(r5)
2917 add r7, r7, r3
2918 std r7, TAS_TOTAL(r5)
2919 ld r6, TAS_MIN(r5)
2920 ld r7, TAS_MAX(r5)
2921 beq 3f
2922 cmpd r3, r6
2923 bge 1f
29243: std r3, TAS_MIN(r5)
29251: cmpd r3, r7
2926 ble 2f
2927 std r3, TAS_MAX(r5)
29282: lwsync
2929 addi r8, r8, 1
2930 std r8, TAS_SEQCOUNT(r5)
2931 blr
2932#endif