blob: 7fe3077a1ef642465c9b2e6724087948790c9780 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
Michael Ellerman345712c2019-11-13 21:05:44 +110021#include <asm/code-patching-asm.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000022#include <asm/kvm_asm.h>
23#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100024#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000025#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100026#include <asm/ptrace.h>
27#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000028#include <asm/asm-offsets.h>
29#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000030#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053031#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110032#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053033#include <asm/opal.h>
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100034#include <asm/xive-regs.h>
Paul Mackerras857b99e2017-09-01 16:17:27 +100035#include <asm/thread_info.h>
Christophe Leroyec0c4642018-07-05 16:24:57 +000036#include <asm/asm-compat.h>
Christophe Leroy2c86cd12018-07-05 16:25:01 +000037#include <asm/feature-fixups.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110038
Paul Mackerras2f272462017-05-22 16:25:14 +100039/* Sign-extend HDEC if not on POWER9 */
40#define EXTEND_HDEC(reg) \
41BEGIN_FTR_SECTION; \
42 extsw reg, reg; \
43END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
44
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110045/* Values in HSTATE_NAPPING(r13) */
46#define NAPPING_CEDE 1
47#define NAPPING_NOVCPU 2
48
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100049/* Stack frame offsets for kvmppc_hv_entry */
Paul Mackerras769377f2017-02-15 14:30:17 +110050#define SFS 160
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100051#define STACK_SLOT_TRAP (SFS-4)
52#define STACK_SLOT_TID (SFS-16)
53#define STACK_SLOT_PSSCR (SFS-24)
54#define STACK_SLOT_PID (SFS-32)
55#define STACK_SLOT_IAMR (SFS-40)
56#define STACK_SLOT_CIABR (SFS-48)
57#define STACK_SLOT_DAWR (SFS-56)
58#define STACK_SLOT_DAWRX (SFS-64)
Paul Mackerras769377f2017-02-15 14:30:17 +110059#define STACK_SLOT_HFSCR (SFS-72)
Michael Ellerman915c9d02019-02-22 13:22:08 +110060#define STACK_SLOT_AMR (SFS-80)
61#define STACK_SLOT_UAMOR (SFS-88)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +100062
Paul Mackerrasde56a942011-06-29 00:21:34 +000063/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100064 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000065 * Must be called with interrupts hard-disabled.
66 *
67 * Input Registers:
68 *
69 * LR = return address to continue at after eventually re-enabling MMU
70 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100071_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100072 mflr r0
73 std r0, PPC_LR_STKOFF(r1)
74 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000075 mfmsr r10
Paul Mackerras8b24e692017-06-26 15:45:51 +100076 std r10, HSTATE_HOST_MSR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100077 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000078 li r0,MSR_RI
79 andc r0,r10,r0
80 li r6,MSR_IR | MSR_DR
81 andc r6,r10,r6
82 mtmsrd r0,1 /* clear RI in MSR */
83 mtsrr0 r5
84 mtsrr1 r6
Nicholas Piggin222f20f2018-01-10 03:07:15 +110085 RFI_TO_KERNEL
Paul Mackerrasde56a942011-06-29 00:21:34 +000086
Paul Mackerras218309b2013-09-06 13:23:44 +100087kvmppc_call_hv_entry:
Paul Mackerrasc0101502017-10-19 14:11:23 +110088BEGIN_FTR_SECTION
89 /* On P9, do LPCR setting, if necessary */
90 ld r3, HSTATE_SPLIT_MODE(r13)
91 cmpdi r3, 0
92 beq 46f
93 lwz r4, KVM_SPLIT_DO_SET(r3)
94 cmpwi r4, 0
95 beq 46f
96 bl kvmhv_p9_set_lpcr
97 nop
9846:
99END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
100
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100101 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000102 bl kvmppc_hv_entry
103
104 /* Back from guest - restore host state and return to caller */
105
Michael Neulingeee7ff92014-01-08 21:25:19 +1100106BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +1000107 /* Restore host DABR and DABRX */
108 ld r5,HSTATE_DABR(r13)
109 li r6,7
110 mtspr SPRN_DABR,r5
111 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100112END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000113
114 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -0500115 ld r3,PACA_SPRG_VDSO(r13)
116 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +1000117
Paul Mackerras218309b2013-09-06 13:23:44 +1000118 /* Reload the host's PMU registers */
Nicholas Piggin8e0b634b2018-02-14 01:08:11 +1000119 lbz r4, PACA_PMCINUSE(r13) /* is the host using the PMU? */
Paul Mackerras218309b2013-09-06 13:23:44 +1000120 cmpwi r4, 0
121 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000122BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000123 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000124 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
125 cmpwi r4, MMCR0_PMAO
126 beql kvmppc_fix_pmao
127END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000128 lwz r3, HSTATE_PMC1(r13)
129 lwz r4, HSTATE_PMC2(r13)
130 lwz r5, HSTATE_PMC3(r13)
131 lwz r6, HSTATE_PMC4(r13)
132 lwz r8, HSTATE_PMC5(r13)
133 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000134 mtspr SPRN_PMC1, r3
135 mtspr SPRN_PMC2, r4
136 mtspr SPRN_PMC3, r5
137 mtspr SPRN_PMC4, r6
138 mtspr SPRN_PMC5, r8
139 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000140 ld r3, HSTATE_MMCR0(r13)
141 ld r4, HSTATE_MMCR1(r13)
142 ld r5, HSTATE_MMCRA(r13)
143 ld r6, HSTATE_SIAR(r13)
144 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000145 mtspr SPRN_MMCR1, r4
146 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100147 mtspr SPRN_SIAR, r6
148 mtspr SPRN_SDAR, r7
149BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000150 ld r8, HSTATE_MMCR2(r13)
151 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100152 mtspr SPRN_MMCR2, r8
153 mtspr SPRN_SIER, r9
154END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000155 mtspr SPRN_MMCR0, r3
156 isync
15723:
158
159 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100160 * Reload DEC. HDEC interrupts were disabled when
161 * we reloaded the host's LPCR value.
162 */
163 ld r3, HSTATE_DECEXP(r13)
164 mftb r4
165 subf r4, r4, r3
166 mtspr SPRN_DEC, r4
167
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000168 /* hwthread_req may have got set by cede or no vcpu, so clear it */
169 li r0, 0
170 stb r0, HSTATE_HWTHREAD_REQ(r13)
171
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100172 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +0530173 * For external interrupts we need to call the Linux
174 * handler to process the interrupt. We do that by jumping
175 * to absolute address 0x500 for external interrupts.
176 * The [h]rfid at the end of the handler will return to
177 * the book3s_hv_interrupts.S code. For other interrupts
178 * we do the rfid to get back to the book3s_hv_interrupts.S
179 * code here.
Paul Mackerras218309b2013-09-06 13:23:44 +1000180 */
181 ld r8, 112+PPC_LR_STKOFF(r1)
182 addi r1, r1, 112
183 ld r7, HSTATE_HOST_MSR(r13)
184
Paul Mackerras8b24e692017-06-26 15:45:51 +1000185 /* Return the trap number on this thread as the return value */
186 mr r3, r12
187
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100188 /*
189 * If we came back from the guest via a relocation-on interrupt,
190 * we will be in virtual mode at this point, which makes it a
191 * little easier to get back to the caller.
192 */
193 mfmsr r0
194 andi. r0, r0, MSR_IR /* in real mode? */
195 bne .Lvirt_return
196
Paul Mackerras8b24e692017-06-26 15:45:51 +1000197 /* RFI into the highmem handler */
Paul Mackerras218309b2013-09-06 13:23:44 +1000198 mfmsr r6
199 li r0, MSR_RI
200 andc r6, r6, r0
201 mtmsrd r6, 1 /* Clear RI in MSR */
202 mtsrr0 r8
203 mtsrr1 r7
Nicholas Piggin222f20f2018-01-10 03:07:15 +1100204 RFI_TO_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000205
Paul Mackerras8b24e692017-06-26 15:45:51 +1000206 /* Virtual-mode return */
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100207.Lvirt_return:
Paul Mackerras8b24e692017-06-26 15:45:51 +1000208 mtlr r8
Paul Mackerras53af3ba2017-01-30 21:21:51 +1100209 blr
210
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100211kvmppc_primary_no_guest:
212 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100213 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
Paul Mackerras2f272462017-05-22 16:25:14 +1000214 /* HDEC may be larger than DEC for arch >= v3.00, but since the */
215 /* HDEC value came from DEC in the first place, it will fit */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100216 mfspr r3, SPRN_HDEC
217 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100218 /*
219 * Make sure the primary has finished the MMU switch.
220 * We should never get here on a secondary thread, but
221 * check it for robustness' sake.
222 */
223 ld r5, HSTATE_KVM_VCORE(r13)
22465: lbz r0, VCORE_IN_GUEST(r5)
225 cmpwi r0, 0
226 beq 65b
227 /* Set LPCR. */
228 ld r8,VCORE_LPCR(r5)
229 mtspr SPRN_LPCR,r8
230 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100231 /* set our bit in napping_threads */
232 ld r5, HSTATE_KVM_VCORE(r13)
233 lbz r7, HSTATE_PTID(r13)
234 li r0, 1
235 sld r0, r0, r7
236 addi r6, r5, VCORE_NAPPING_THREADS
2371: lwarx r3, 0, r6
238 or r3, r3, r0
239 stwcx. r3, 0, r6
240 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100241 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100242 isync
243 li r12, 0
244 lwz r7, VCORE_ENTRY_EXIT(r5)
245 cmpwi r7, 0x100
246 bge kvm_novcpu_exit /* another thread already exiting */
247 li r3, NAPPING_NOVCPU
248 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100249
Paul Mackerrasccc07772015-03-28 14:21:07 +1100250 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100251 b kvm_do_nap
252
Suresh Warrier37f55d32016-08-19 15:35:46 +1000253/*
254 * kvm_novcpu_wakeup
255 * Entered from kvm_start_guest if kvm_hstate.napping is set
256 * to NAPPING_NOVCPU
257 * r2 = kernel TOC
258 * r13 = paca
259 */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100260kvm_novcpu_wakeup:
261 ld r1, HSTATE_HOST_R1(r13)
262 ld r5, HSTATE_KVM_VCORE(r13)
263 li r0, 0
264 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100265
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100266 /* check the wake reason */
267 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100268
Suresh Warrier37f55d32016-08-19 15:35:46 +1000269 /*
270 * Restore volatile registers since we could have called
271 * a C routine in kvmppc_check_wake_reason.
272 * r5 = VCORE
273 */
274 ld r5, HSTATE_KVM_VCORE(r13)
275
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100276 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100277 lwz r0, VCORE_ENTRY_EXIT(r5)
278 cmpwi r0, 0x100
279 bge kvm_novcpu_exit
280
281 /* clear our bit in napping_threads */
282 lbz r7, HSTATE_PTID(r13)
283 li r0, 1
284 sld r0, r0, r7
285 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002864: lwarx r7, 0, r6
287 andc r7, r7, r0
288 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100289 bne 4b
290
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100291 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100292 cmpdi r3, 0
293 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100294
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100295 /* See if our timeslice has expired (HDEC is negative) */
296 mfspr r0, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +1000297 EXTEND_HDEC(r0)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100298 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerras2f272462017-05-22 16:25:14 +1000299 cmpdi r0, 0
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100300 blt kvm_novcpu_exit
301
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100302 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
303 ld r4, HSTATE_KVM_VCPU(r13)
304 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100305 beq kvmppc_primary_no_guest
306
307#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
308 addi r3, r4, VCPU_TB_RMENTRY
309 bl kvmhv_start_timing
310#endif
311 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100312
313kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100314#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
315 ld r4, HSTATE_KVM_VCPU(r13)
316 cmpdi r4, 0
317 beq 13f
318 addi r3, r4, VCPU_TB_RMEXIT
319 bl kvmhv_accumulate_time
320#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110032113: mr r3, r12
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000322 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerraseddb60f2015-03-28 14:21:11 +1100323 bl kvmhv_commence_exit
324 nop
Paul Mackerras6af27c82015-03-28 14:21:10 +1100325 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100326
Paul Mackerras371fefd2011-06-29 00:23:08 +0000327/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100328 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000329 * Relocation is off and most register values are lost.
330 * r13 points to the PACA.
Nicholas Piggin9d292502017-06-13 23:05:51 +1000331 * r3 contains the SRR1 wakeup value, SRR1 is trashed.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000332 */
333 .globl kvm_start_guest
334kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530335 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100336 mfspr r0, SPRN_CTRLF
337 ori r0, r0, 1
338 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530339
Nicholas Piggin9d292502017-06-13 23:05:51 +1000340 /*
341 * Could avoid this and pass it through in r3. For now,
342 * code expects it to be in SRR1.
343 */
344 mtspr SPRN_SRR1,r3
345
Paul Mackerras19ccb762011-07-23 17:42:46 +1000346 ld r2,PACATOC(r13)
347
Naveen N. Raoa4bc64d2018-04-19 12:34:05 +0530348 li r0,0
349 stb r0,PACA_FTRACE_ENABLED(r13)
350
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000351 li r0,KVM_HWTHREAD_IN_KVM
352 stb r0,HSTATE_HWTHREAD_STATE(r13)
353
354 /* NV GPR values from power7_idle() will no longer be valid */
355 li r0,1
356 stb r0,PACA_NAPSTATELOST(r13)
357
Paul Mackerras4619ac82013-04-17 20:31:41 +0000358 /* were we napping due to cede? */
359 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100360 cmpwi r0,NAPPING_CEDE
361 beq kvm_end_cede
362 cmpwi r0,NAPPING_NOVCPU
363 beq kvm_novcpu_wakeup
364
365 ld r1,PACAEMERGSP(r13)
366 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000367
368 /*
369 * We weren't napping due to cede, so this must be a secondary
370 * thread being woken up to run a guest, or being woken up due
371 * to a stray IPI. (Or due to some machine check or hypervisor
372 * maintenance interrupt while the core is in KVM.)
373 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000374
375 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100376 bl kvmppc_check_wake_reason
Suresh Warrier37f55d32016-08-19 15:35:46 +1000377 /*
378 * kvmppc_check_wake_reason could invoke a C routine, but we
379 * have no volatile registers to restore when we return.
380 */
381
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100382 cmpdi r3, 0
383 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000384
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000385 /* get vcore pointer, NULL if we have nothing to run */
386 ld r5,HSTATE_KVM_VCORE(r13)
387 cmpdi r5,0
388 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000389 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000390
Paul Mackerras56548fc2014-12-03 14:48:40 +1100391kvm_secondary_got_guest:
392
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100393 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530394 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100395 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000396
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000397 /* On thread 0 of a subcore, set HDEC to max */
398 lbz r4, HSTATE_PTID(r13)
399 cmpwi r4, 0
400 bne 63f
Paul Mackerras2f272462017-05-22 16:25:14 +1000401 LOAD_REG_ADDR(r6, decrementer_max)
402 ld r6, 0(r6)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000403 mtspr SPRN_HDEC, r6
404 /* and set per-LPAR registers, if doing dynamic micro-threading */
405 ld r6, HSTATE_SPLIT_MODE(r13)
406 cmpdi r6, 0
407 beq 63f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100408BEGIN_FTR_SECTION
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000409 ld r0, KVM_SPLIT_RPR(r6)
410 mtspr SPRN_RPR, r0
411 ld r0, KVM_SPLIT_PMMAR(r6)
412 mtspr SPRN_PMMAR, r0
413 ld r0, KVM_SPLIT_LDBAR(r6)
414 mtspr SPRN_LDBAR, r0
415 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100416FTR_SECTION_ELSE
417 /* On P9 we use the split_info for coordinating LPCR changes */
418 lwz r4, KVM_SPLIT_DO_SET(r6)
419 cmpwi r4, 0
Alexander Grafd20fe502018-02-08 18:38:53 +0100420 beq 1f
Paul Mackerrasc0101502017-10-19 14:11:23 +1100421 mr r3, r6
422 bl kvmhv_p9_set_lpcr
423 nop
Alexander Grafd20fe502018-02-08 18:38:53 +01004241:
Paul Mackerrasc0101502017-10-19 14:11:23 +1100425ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasb4deba52015-07-02 20:38:16 +100042663:
427 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100428 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000429 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100430 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000431
432 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000433 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000434 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000435 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100436 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000437 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100438 * kvmppc_run_core() is going to assume that all our vcpu
439 * state is visible in memory. This lwsync makes sure
440 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100441 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000442 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000443 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000444
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530445 /*
446 * All secondaries exiting guest will fall through this path.
447 * Before proceeding, just check for HMI interrupt and
448 * invoke opal hmi handler. By now we are sure that the
449 * primary thread on this core/subcore has already made partition
450 * switch/TB resync and we are good to call opal hmi handler.
451 */
452 cmpwi r12, BOOK3S_INTERRUPT_HMI
453 bne kvm_no_guest
454
455 li r3,0 /* NULL argument */
456 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100457/*
458 * At this point we have finished executing in the guest.
459 * We need to wait for hwthread_req to become zero, since
460 * we may not turn on the MMU while hwthread_req is non-zero.
461 * While waiting we also need to check if we get given a vcpu to run.
462 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000463kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100464 lbz r3, HSTATE_HWTHREAD_REQ(r13)
465 cmpwi r3, 0
466 bne 53f
467 HMT_MEDIUM
468 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000469 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100470 /* need to recheck hwthread_req after a barrier, to avoid race */
471 sync
472 lbz r3, HSTATE_HWTHREAD_REQ(r13)
473 cmpwi r3, 0
474 bne 54f
475/*
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530476 * We jump to pnv_wakeup_loss, which will return to the caller
Paul Mackerras56548fc2014-12-03 14:48:40 +1100477 * of power7_nap in the powernv cpu offline loop. The value we
Nicholas Piggin9d292502017-06-13 23:05:51 +1000478 * put in r3 becomes the return value for power7_nap. pnv_wakeup_loss
479 * requires SRR1 in r12.
Paul Mackerras56548fc2014-12-03 14:48:40 +1100480 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000481 li r3, LPCR_PECE0
482 mfspr r4, SPRN_LPCR
483 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
484 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100485 li r3, 0
Nicholas Piggin9d292502017-06-13 23:05:51 +1000486 mfspr r12,SPRN_SRR1
Shreyas B. Prabhu5fa6b6b2016-07-08 11:50:46 +0530487 b pnv_wakeup_loss
Paul Mackerras56548fc2014-12-03 14:48:40 +1100488
48953: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000490 ld r5, HSTATE_KVM_VCORE(r13)
491 cmpdi r5, 0
492 bne 60f
493 ld r3, HSTATE_SPLIT_MODE(r13)
494 cmpdi r3, 0
495 beq kvm_no_guest
Paul Mackerrasc0101502017-10-19 14:11:23 +1100496 lwz r0, KVM_SPLIT_DO_SET(r3)
497 cmpwi r0, 0
498 bne kvmhv_do_set
499 lwz r0, KVM_SPLIT_DO_RESTORE(r3)
500 cmpwi r0, 0
501 bne kvmhv_do_restore
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000502 lbz r0, KVM_SPLIT_DO_NAP(r3)
503 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100504 beq kvm_no_guest
505 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000506 b kvm_unsplit_nap
50760: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100508 b kvm_secondary_got_guest
509
51054: li r0, KVM_HWTHREAD_IN_KVM
511 stb r0, HSTATE_HWTHREAD_STATE(r13)
512 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000513
Paul Mackerrasc0101502017-10-19 14:11:23 +1100514kvmhv_do_set:
515 /* Set LPCR, LPIDR etc. on P9 */
516 HMT_MEDIUM
517 bl kvmhv_p9_set_lpcr
518 nop
519 b kvm_no_guest
520
521kvmhv_do_restore:
522 HMT_MEDIUM
523 bl kvmhv_p9_restore_lpcr
524 nop
525 b kvm_no_guest
526
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000527/*
528 * Here the primary thread is trying to return the core to
529 * whole-core mode, so we need to nap.
530 */
531kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530532 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530533 * When secondaries are napping in kvm_unsplit_nap() with
534 * hwthread_req = 1, HMI goes ignored even though subcores are
535 * already exited the guest. Hence HMI keeps waking up secondaries
536 * from nap in a loop and secondaries always go back to nap since
537 * no vcore is assigned to them. This makes impossible for primary
538 * thread to get hold of secondary threads resulting into a soft
539 * lockup in KVM path.
540 *
541 * Let us check if HMI is pending and handle it before we go to nap.
542 */
543 cmpwi r12, BOOK3S_INTERRUPT_HMI
544 bne 55f
545 li r3, 0 /* NULL argument */
546 bl hmi_exception_realmode
54755:
548 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530549 * Ensure that secondary doesn't nap when it has
550 * its vcore pointer set.
551 */
552 sync /* matches smp_mb() before setting split_info.do_nap */
553 ld r0, HSTATE_KVM_VCORE(r13)
554 cmpdi r0, 0
555 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000556 /* clear any pending message */
557BEGIN_FTR_SECTION
558 lis r6, (PPC_DBELL_SERVER << (63-36))@h
559 PPC_MSGCLR(6)
560END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
561 /* Set kvm_split_mode.napped[tid] = 1 */
562 ld r3, HSTATE_SPLIT_MODE(r13)
563 li r0, 1
Paul Mackerrasc0101502017-10-19 14:11:23 +1100564 lbz r4, HSTATE_TID(r13)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000565 addi r4, r4, KVM_SPLIT_NAPPED
566 stbx r0, r3, r4
567 /* Check the do_nap flag again after setting napped[] */
568 sync
569 lbz r0, KVM_SPLIT_DO_NAP(r3)
570 cmpwi r0, 0
571 beq 57f
572 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
Paul Mackerrasbf53c882016-11-18 14:34:07 +1100573 mfspr r5, SPRN_LPCR
574 rlwimi r5, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
575 b kvm_nap_sequence
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000576
57757: li r0, 0
578 stbx r0, r3, r4
579 b kvm_no_guest
580
Paul Mackerras218309b2013-09-06 13:23:44 +1000581/******************************************************************************
582 * *
583 * Entry code *
584 * *
585 *****************************************************************************/
586
Paul Mackerrasde56a942011-06-29 00:21:34 +0000587.global kvmppc_hv_entry
588kvmppc_hv_entry:
589
590 /* Required state:
591 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100592 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000593 * MSR = ~IR|DR
594 * R13 = PACA
595 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000596 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000597 * all other volatile GPRS = free
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100598 * Does not preserve non-volatile GPRs or CR fields
Paul Mackerrasde56a942011-06-29 00:21:34 +0000599 */
600 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000601 std r0, PPC_LR_STKOFF(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000602 stdu r1, -SFS(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000603
Paul Mackerrasde56a942011-06-29 00:21:34 +0000604 /* Save R1 in the PACA */
605 std r1, HSTATE_HOST_R1(r13)
606
Paul Mackerras44a3add2013-10-04 21:45:04 +1000607 li r6, KVM_GUEST_MODE_HOST_HV
608 stb r6, HSTATE_IN_GUEST(r13)
609
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100610#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
611 /* Store initial timestamp */
612 cmpdi r4, 0
613 beq 1f
614 addi r3, r4, VCPU_TB_RMENTRY
615 bl kvmhv_start_timing
6161:
617#endif
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100618
619 /* Use cr7 as an indication of radix mode */
620 ld r5, HSTATE_KVM_VCORE(r13)
621 ld r9, VCORE_KVM(r5) /* pointer to struct kvm */
622 lbz r0, KVM_RADIX(r9)
623 cmpwi cr7, r0, 0
624
Paul Mackerras9e368f22011-06-29 00:40:08 +0000625 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100626 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000627 * We don't have to lock against concurrent tlbies,
628 * but we do have to coordinate across hardware threads.
629 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100630 /* Set bit in entry map iff exit map is zero. */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100631 li r7, 1
632 lbz r6, HSTATE_PTID(r13)
633 sld r7, r7, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100634 addi r8, r5, VCORE_ENTRY_EXIT
63521: lwarx r3, 0, r8
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100636 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000637 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100638 or r3, r3, r7
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100639 stwcx. r3, 0, r8
Paul Mackerras371fefd2011-06-29 00:23:08 +0000640 bne 21b
641
642 /* Primary thread switches to guest partition. */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000643 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100644 bne 10f
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000645
646 /* Radix has already switched LPID and flushed core TLB */
647 bne cr7, 22f
648
Paul Mackerrasde56a942011-06-29 00:21:34 +0000649 lwz r7,KVM_LPID(r9)
Paul Mackerras7a840842016-11-16 22:25:20 +1100650BEGIN_FTR_SECTION
651 ld r6,KVM_SDR1(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000652 li r0,LPID_RSVD /* switch to reserved LPID */
653 mtspr SPRN_LPID,r0
654 ptesync
655 mtspr SPRN_SDR1,r6 /* switch to partition page table */
Paul Mackerras7a840842016-11-16 22:25:20 +1100656END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000657 mtspr SPRN_LPID,r7
658 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000659
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000660 /* See if we need to flush the TLB. Hash has to be done in RM */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000661 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100662BEGIN_FTR_SECTION
663 /*
664 * On POWER9, individual threads can come in here, but the
665 * TLB is shared between the 4 threads in a core, hence
666 * invalidating on one thread invalidates for all.
667 * Thus we make all 4 threads use the same bit here.
668 */
669 clrrdi r6,r6,2
670END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000671 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
672 srdi r6,r6,6 /* doubleword number */
673 sldi r6,r6,3 /* address offset */
674 add r6,r6,r9
675 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100676 li r8,1
677 sld r8,r8,r7
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000678 ld r7,0(r6)
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100679 and. r7,r7,r8
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000680 beq 22f
Paul Mackerrasca252052014-01-08 21:25:22 +1100681 /* Flush the TLB of any entries for this LPID */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100682 lwz r0,KVM_TLB_SETS(r9)
683 mtctr r0
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000684 li r7,0x800 /* IS field = 0b10 */
685 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +1100686 li r0,0 /* RS for P9 version of tlbiel */
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110068728: tlbiel r7 /* On P9, rs=0, RIC=0, PRS=0, R=0 */
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000688 addi r7,r7,0x1000
689 bdnz 28b
Nicholas Piggin9a4506e2018-05-17 17:06:29 +1000690 ptesync
Paul Mackerrasa29ebea2017-01-30 21:21:50 +110069123: ldarx r7,0,r6 /* clear the bit after TLB flushed */
692 andc r7,r7,r8
693 stdcx. r7,0,r6
694 bne 23b
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000695
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000696 /* Add timebase offset onto timebase */
69722: ld r8,VCORE_TB_OFFSET(r5)
698 cmpdi r8,0
699 beq 37f
Paul Mackerras57b8daa2018-04-20 22:51:11 +1000700 std r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000701 mftb r6 /* current host timebase */
702 add r8,r8,r6
703 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
704 mftb r7 /* check if lower 24 bits overflowed */
705 clrldi r6,r6,40
706 clrldi r7,r7,40
707 cmpld r7,r6
708 bge 37f
709 addis r8,r8,0x100 /* if so, increment upper 40 bits */
710 mtspr SPRN_TBU40,r8
711
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000712 /* Load guest PCR value to select appropriate compat mode */
71337: ld r7, VCORE_PCR(r5)
714 cmpdi r7, 0
715 beq 38f
716 mtspr SPRN_PCR, r7
71738:
Michael Neulingb005255e2014-01-08 21:25:21 +1100718
719BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000720 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +1100721 ld r8, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000722 ld r7, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +1100723 mtspr SPRN_DPDES, r8
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000724 mtspr SPRN_VTB, r7
Michael Neulingb005255e2014-01-08 21:25:21 +1100725END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
726
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530727 /* Mark the subcore state as inside guest */
728 bl kvmppc_subcore_enter_guest
729 nop
730 ld r5, HSTATE_KVM_VCORE(r13)
731 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000732 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000733 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000734
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100735 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110073610: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100737 beq kvmppc_primary_no_guest
738kvmppc_got_guest:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100739 /* Increment yield count if they have a VPA */
740 ld r3, VCPU_VPA(r4)
741 cmpdi r3, 0
742 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200743 li r6, LPPACA_YIELDCOUNT
744 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100745 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200746 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100747 li r6, 1
748 stb r6, VCPU_VPA_DIRTY(r4)
74925:
750
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100751 /* Save purr/spurr */
752 mfspr r5,SPRN_PURR
753 mfspr r6,SPRN_SPURR
754 std r5,HSTATE_PURR(r13)
755 std r6,HSTATE_SPURR(r13)
756 ld r7,VCPU_PURR(r4)
757 ld r8,VCPU_SPURR(r4)
758 mtspr SPRN_PURR,r7
759 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100760
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100761 /* Save host values of some registers */
762BEGIN_FTR_SECTION
763 mfspr r5, SPRN_TIDR
764 mfspr r6, SPRN_PSSCR
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100765 mfspr r7, SPRN_PID
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100766 std r5, STACK_SLOT_TID(r1)
767 std r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +1100768 std r7, STACK_SLOT_PID(r1)
Paul Mackerras769377f2017-02-15 14:30:17 +1100769 mfspr r5, SPRN_HFSCR
770 std r5, STACK_SLOT_HFSCR(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100771END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000772BEGIN_FTR_SECTION
773 mfspr r5, SPRN_CIABR
774 mfspr r6, SPRN_DAWR
775 mfspr r7, SPRN_DAWRX
Michael Ellerman915c9d02019-02-22 13:22:08 +1100776 mfspr r8, SPRN_IAMR
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000777 std r5, STACK_SLOT_CIABR(r1)
778 std r6, STACK_SLOT_DAWR(r1)
779 std r7, STACK_SLOT_DAWRX(r1)
Michael Ellerman915c9d02019-02-22 13:22:08 +1100780 std r8, STACK_SLOT_IAMR(r1)
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +1000781END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100782
Michael Ellerman915c9d02019-02-22 13:22:08 +1100783 mfspr r5, SPRN_AMR
784 std r5, STACK_SLOT_AMR(r1)
785 mfspr r6, SPRN_UAMOR
786 std r6, STACK_SLOT_UAMOR(r1)
787
Michael Neulingeee7ff92014-01-08 21:25:19 +1100788BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000789 /* Set partition DABR */
790 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100791 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000792 ld r6,VCPU_DABR(r4)
793 mtspr SPRN_DABRX,r5
794 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000795 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100796END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000797
Michael Neulinge4e38122014-03-25 10:47:02 +1100798#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100799/*
800 * Branch around the call if both CPU_FTR_TM and
801 * CPU_FTR_P9_TM_HV_ASSIST are off.
802 */
Michael Neulinge4e38122014-03-25 10:47:02 +1100803BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100804 b 91f
805END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +1000806 /*
807 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
808 */
Simon Guo6f597c62018-05-23 15:01:48 +0800809 mr r3, r4
810 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +1000811 bl kvmppc_restore_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +0800812 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +110081391:
Michael Neulinge4e38122014-03-25 10:47:02 +1100814#endif
815
Paul Mackerrasde56a942011-06-29 00:21:34 +0000816 /* Load guest PMU registers */
817 /* R4 is live here (vcpu pointer) */
818 li r3, 1
819 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
820 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
821 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000822BEGIN_FTR_SECTION
823 ld r3, VCPU_MMCR(r4)
824 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
825 cmpwi r5, MMCR0_PMAO
826 beql kvmppc_fix_pmao
827END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000828 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
829 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
830 lwz r6, VCPU_PMC + 8(r4)
831 lwz r7, VCPU_PMC + 12(r4)
832 lwz r8, VCPU_PMC + 16(r4)
833 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000834 mtspr SPRN_PMC1, r3
835 mtspr SPRN_PMC2, r5
836 mtspr SPRN_PMC3, r6
837 mtspr SPRN_PMC4, r7
838 mtspr SPRN_PMC5, r8
839 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000840 ld r3, VCPU_MMCR(r4)
841 ld r5, VCPU_MMCR + 8(r4)
842 ld r6, VCPU_MMCR + 16(r4)
843 ld r7, VCPU_SIAR(r4)
844 ld r8, VCPU_SDAR(r4)
845 mtspr SPRN_MMCR1, r5
846 mtspr SPRN_MMCRA, r6
847 mtspr SPRN_SIAR, r7
848 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100849BEGIN_FTR_SECTION
850 ld r5, VCPU_MMCR + 24(r4)
851 ld r6, VCPU_SIER(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100852 mtspr SPRN_MMCR2, r5
853 mtspr SPRN_SIER, r6
854BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100855 lwz r7, VCPU_PMC + 24(r4)
856 lwz r8, VCPU_PMC + 28(r4)
857 ld r9, VCPU_MMCR + 32(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100858 mtspr SPRN_SPMC1, r7
859 mtspr SPRN_SPMC2, r8
860 mtspr SPRN_MMCRS, r9
Paul Mackerras83677f52016-11-16 22:33:27 +1100861END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +1100862END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000863 mtspr SPRN_MMCR0, r3
864 isync
865
866 /* Load up FP, VMX and VSX registers */
867 bl kvmppc_load_fp
868
869 ld r14, VCPU_GPR(R14)(r4)
870 ld r15, VCPU_GPR(R15)(r4)
871 ld r16, VCPU_GPR(R16)(r4)
872 ld r17, VCPU_GPR(R17)(r4)
873 ld r18, VCPU_GPR(R18)(r4)
874 ld r19, VCPU_GPR(R19)(r4)
875 ld r20, VCPU_GPR(R20)(r4)
876 ld r21, VCPU_GPR(R21)(r4)
877 ld r22, VCPU_GPR(R22)(r4)
878 ld r23, VCPU_GPR(R23)(r4)
879 ld r24, VCPU_GPR(R24)(r4)
880 ld r25, VCPU_GPR(R25)(r4)
881 ld r26, VCPU_GPR(R26)(r4)
882 ld r27, VCPU_GPR(R27)(r4)
883 ld r28, VCPU_GPR(R28)(r4)
884 ld r29, VCPU_GPR(R29)(r4)
885 ld r30, VCPU_GPR(R30)(r4)
886 ld r31, VCPU_GPR(R31)(r4)
887
Paul Mackerrasde56a942011-06-29 00:21:34 +0000888 /* Switch DSCR to guest value */
889 ld r5, VCPU_DSCR(r4)
890 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000891
Michael Neulingb005255e2014-01-08 21:25:21 +1100892BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100893 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100894 b 8f
895END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +1100896 /* Load up POWER8-specific registers */
897 ld r5, VCPU_IAMR(r4)
898 lwz r6, VCPU_PSPB(r4)
899 ld r7, VCPU_FSCR(r4)
900 mtspr SPRN_IAMR, r5
901 mtspr SPRN_PSPB, r6
902 mtspr SPRN_FSCR, r7
903 ld r5, VCPU_DAWR(r4)
904 ld r6, VCPU_DAWRX(r4)
905 ld r7, VCPU_CIABR(r4)
906 ld r8, VCPU_TAR(r4)
Michael Neulingb53221e2018-03-27 15:37:22 +1100907 /*
908 * Handle broken DAWR case by not writing it. This means we
909 * can still store the DAWR register for migration.
910 */
911BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +1100912 mtspr SPRN_DAWR, r5
913 mtspr SPRN_DAWRX, r6
Michael Neulingb53221e2018-03-27 15:37:22 +1100914END_FTR_SECTION_IFSET(CPU_FTR_DAWR)
Michael Neulingb005255e2014-01-08 21:25:21 +1100915 mtspr SPRN_CIABR, r7
916 mtspr SPRN_TAR, r8
917 ld r5, VCPU_IC(r4)
Michael Neuling7b490412014-01-08 21:25:32 +1100918 ld r8, VCPU_EBBHR(r4)
Paul Mackerras88b02cf92016-09-15 13:42:52 +1000919 mtspr SPRN_IC, r5
Michael Neulingb005255e2014-01-08 21:25:21 +1100920 mtspr SPRN_EBBHR, r8
921 ld r5, VCPU_EBBRR(r4)
922 ld r6, VCPU_BESCR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100923 lwz r7, VCPU_GUEST_PID(r4)
924 ld r8, VCPU_WORT(r4)
Paul Mackerras83677f52016-11-16 22:33:27 +1100925 mtspr SPRN_EBBRR, r5
926 mtspr SPRN_BESCR, r6
Michael Neulingb005255e2014-01-08 21:25:21 +1100927 mtspr SPRN_PID, r7
928 mtspr SPRN_WORT, r8
Paul Mackerras83677f52016-11-16 22:33:27 +1100929BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100930 /* POWER8-only registers */
Paul Mackerras83677f52016-11-16 22:33:27 +1100931 ld r5, VCPU_TCSCR(r4)
932 ld r6, VCPU_ACOP(r4)
933 ld r7, VCPU_CSIGR(r4)
934 ld r8, VCPU_TACR(r4)
935 mtspr SPRN_TCSCR, r5
936 mtspr SPRN_ACOP, r6
937 mtspr SPRN_CSIGR, r7
938 mtspr SPRN_TACR, r8
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100939 nop
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100940FTR_SECTION_ELSE
941 /* POWER9-only registers */
942 ld r5, VCPU_TID(r4)
943 ld r6, VCPU_PSSCR(r4)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100944 lbz r8, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100945 oris r6, r6, PSSCR_EC@h /* This makes stop trap to HV */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100946 rldimi r6, r8, PSSCR_FAKE_SUSPEND_LG, 63 - PSSCR_FAKE_SUSPEND_LG
Paul Mackerras769377f2017-02-15 14:30:17 +1100947 ld r7, VCPU_HFSCR(r4)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100948 mtspr SPRN_TIDR, r5
949 mtspr SPRN_PSSCR, r6
Paul Mackerras769377f2017-02-15 14:30:17 +1100950 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +1100951ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11009528:
953
Paul Mackerrasde56a942011-06-29 00:21:34 +0000954 ld r5, VCPU_SPRG0(r4)
955 ld r6, VCPU_SPRG1(r4)
956 ld r7, VCPU_SPRG2(r4)
957 ld r8, VCPU_SPRG3(r4)
958 mtspr SPRN_SPRG0, r5
959 mtspr SPRN_SPRG1, r6
960 mtspr SPRN_SPRG2, r7
961 mtspr SPRN_SPRG3, r8
962
Paul Mackerrasde56a942011-06-29 00:21:34 +0000963 /* Load up DAR and DSISR */
964 ld r5, VCPU_DAR(r4)
965 lwz r6, VCPU_DSISR(r4)
966 mtspr SPRN_DAR, r5
967 mtspr SPRN_DSISR, r6
968
Paul Mackerrasde56a942011-06-29 00:21:34 +0000969 /* Restore AMR and UAMOR, set AMOR to all 1s */
970 ld r5,VCPU_AMR(r4)
971 ld r6,VCPU_UAMOR(r4)
972 li r7,-1
973 mtspr SPRN_AMR,r5
974 mtspr SPRN_UAMOR,r6
975 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000976
977 /* Restore state of CTRL run bit; assume 1 on entry */
978 lwz r5,VCPU_CTRL(r4)
979 andi. r5,r5,1
980 bne 4f
981 mfspr r6,SPRN_CTRLF
982 clrrdi r6,r6,1
983 mtspr SPRN_CTRLT,r6
9844:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100985 /* Secondary threads wait for primary to have done partition switch */
986 ld r5, HSTATE_KVM_VCORE(r13)
987 lbz r6, HSTATE_PTID(r13)
988 cmpwi r6, 0
989 beq 21f
990 lbz r0, VCORE_IN_GUEST(r5)
991 cmpwi r0, 0
992 bne 21f
993 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100099420: lwz r3, VCORE_ENTRY_EXIT(r5)
995 cmpwi r3, 0x100
996 bge no_switch_exit
997 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100998 cmpwi r0, 0
999 beq 20b
1000 HMT_MEDIUM
100121:
1002 /* Set LPCR. */
1003 ld r8,VCORE_LPCR(r5)
1004 mtspr SPRN_LPCR,r8
1005 isync
1006
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001007 /*
1008 * Set the decrementer to the guest decrementer.
1009 */
1010 ld r8,VCPU_DEC_EXPIRES(r4)
1011 /* r8 is a host timebase value here, convert to guest TB */
1012 ld r5,HSTATE_KVM_VCORE(r13)
1013 ld r6,VCORE_TB_OFFSET_APPL(r5)
1014 add r8,r8,r6
1015 mftb r7
1016 subf r3,r7,r8
1017 mtspr SPRN_DEC,r3
1018
Paul Mackerras6af27c82015-03-28 14:21:10 +11001019 /* Check if HDEC expires soon */
1020 mfspr r3, SPRN_HDEC
Paul Mackerras2f272462017-05-22 16:25:14 +10001021 EXTEND_HDEC(r3)
1022 cmpdi r3, 512 /* 1 microsecond */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001023 blt hdec_soon
1024
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001025 /* For hash guest, clear out and reload the SLB */
1026 ld r6, VCPU_KVM(r4)
1027 lbz r0, KVM_RADIX(r6)
1028 cmpwi r0, 0
1029 bne 9f
1030 li r6, 0
1031 slbmte r6, r6
1032 slbia
1033 ptesync
1034
1035 /* Load up guest SLB entries (N.B. slb_max will be 0 for radix) */
1036 lwz r5,VCPU_SLB_MAX(r4)
1037 cmpwi r5,0
1038 beq 9f
1039 mtctr r5
1040 addi r6,r4,VCPU_SLB
10411: ld r8,VCPU_SLB_E(r6)
1042 ld r9,VCPU_SLB_V(r6)
1043 slbmte r9,r8
1044 addi r6,r6,VCPU_SLB_SIZE
1045 bdnz 1b
10469:
1047
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001048#ifdef CONFIG_KVM_XICS
1049 /* We are entering the guest on that thread, push VCPU to XIVE */
1050 ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
Andreas Schwab0bfa33c2017-08-15 14:37:01 +10001051 cmpldi cr0, r10, 0
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001052 beq no_xive
1053 ld r11, VCPU_XIVE_SAVED_STATE(r4)
1054 li r9, TM_QW1_OS
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001055 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001056 stdcix r11,r9,r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001057 lwz r11, VCPU_XIVE_CAM_WORD(r4)
1058 li r9, TM_QW1_OS + TM_WORD2
1059 stwcix r11,r9,r10
1060 li r9, 1
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001061 stb r9, VCPU_XIVE_PUSHED(r4)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001062 eieio
Benjamin Herrenschmidt2267ea72018-01-12 13:37:13 +11001063
1064 /*
1065 * We clear the irq_pending flag. There is a small chance of a
1066 * race vs. the escalation interrupt happening on another
1067 * processor setting it again, but the only consequence is to
1068 * cause a spurrious wakeup on the next H_CEDE which is not an
1069 * issue.
1070 */
1071 li r0,0
1072 stb r0, VCPU_IRQ_PENDING(r4)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11001073
1074 /*
1075 * In single escalation mode, if the escalation interrupt is
1076 * on, we mask it.
1077 */
1078 lbz r0, VCPU_XIVE_ESC_ON(r4)
1079 cmpwi r0,0
1080 beq 1f
1081 ld r10, VCPU_XIVE_ESC_RADDR(r4)
1082 li r9, XIVE_ESB_SET_PQ_01
1083 ldcix r0, r10, r9
1084 sync
1085
1086 /* We have a possible subtle race here: The escalation interrupt might
1087 * have fired and be on its way to the host queue while we mask it,
1088 * and if we unmask it early enough (re-cede right away), there is
1089 * a theorical possibility that it fires again, thus landing in the
1090 * target queue more than once which is a big no-no.
1091 *
1092 * Fortunately, solving this is rather easy. If the above load setting
1093 * PQ to 01 returns a previous value where P is set, then we know the
1094 * escalation interrupt is somewhere on its way to the host. In that
1095 * case we simply don't clear the xive_esc_on flag below. It will be
1096 * eventually cleared by the handler for the escalation interrupt.
1097 *
1098 * Then, when doing a cede, we check that flag again before re-enabling
1099 * the escalation interrupt, and if set, we abort the cede.
1100 */
1101 andi. r0, r0, XIVE_ESB_VAL_P
1102 bne- 1f
1103
1104 /* Now P is 0, we can clear the flag */
1105 li r0, 0
1106 stb r0, VCPU_XIVE_ESC_ON(r4)
11071:
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001108no_xive:
1109#endif /* CONFIG_KVM_XICS */
1110
Suresh Warrier37f55d32016-08-19 15:35:46 +10001111deliver_guest_interrupt:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001112 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +10001113 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001114
1115 mtctr r6
1116 mtxer r7
1117
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001118kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001119 ld r10, VCPU_PC(r4)
1120 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001121 ld r6, VCPU_SRR0(r4)
1122 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001123 mtspr SPRN_SRR0, r6
1124 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001125
Paul Mackerras4619ac82013-04-17 20:31:41 +00001126 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001127 rldicl r11, r11, 63 - MSR_HV_LG, 1
1128 rotldi r11, r11, 1 + MSR_HV_LG
1129 ori r11, r11, MSR_ME
1130
Paul Mackerras19ccb762011-07-23 17:42:46 +10001131 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001132 ld r0, VCPU_PENDING_EXC(r4)
1133 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1134 cmpdi cr1, r0, 0
1135 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001136 mfspr r8, SPRN_LPCR
1137 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1138 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1139 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001140 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001141 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001142 li r0, BOOK3S_INTERRUPT_EXTERNAL
1143 bne cr1, 12f
1144 mfspr r0, SPRN_DEC
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10001145BEGIN_FTR_SECTION
1146 /* On POWER9 check whether the guest has large decrementer enabled */
1147 andis. r8, r8, LPCR_LD@h
1148 bne 15f
1149END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1150 extsw r0, r0
115115: cmpdi r0, 0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001152 li r0, BOOK3S_INTERRUPT_DECREMENTER
1153 bge 5f
1154
115512: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001156 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001157 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001158 mr r9, r4
1159 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110011605:
Paul Mackerras57900692017-05-16 16:41:20 +10001161BEGIN_FTR_SECTION
1162 b fast_guest_return
1163END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
1164 /* On POWER9, check for pending doorbell requests */
1165 lbz r0, VCPU_DBELL_REQ(r4)
1166 cmpwi r0, 0
1167 beq fast_guest_return
1168 ld r5, HSTATE_KVM_VCORE(r13)
1169 /* Set DPDES register so the CPU will take a doorbell interrupt */
1170 li r0, 1
1171 mtspr SPRN_DPDES, r0
1172 std r0, VCORE_DPDES(r5)
1173 /* Make sure other cpus see vcore->dpdes set before dbell req clear */
1174 lwsync
1175 /* Clear the pending doorbell request */
1176 li r0, 0
1177 stb r0, VCPU_DBELL_REQ(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001178
Liu Ping Fan27025a62013-11-19 14:12:48 +08001179/*
1180 * Required state:
1181 * R4 = vcpu
1182 * R10: value for HSRR0
1183 * R11: value for HSRR1
1184 * R13 = PACA
1185 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001186fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001187 li r0,0
1188 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001189 mtspr SPRN_HSRR0,r10
1190 mtspr SPRN_HSRR1,r11
1191
1192 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001193 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001194 stb r9, HSTATE_IN_GUEST(r13)
1195
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001196#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1197 /* Accumulate timing */
1198 addi r3, r4, VCPU_TB_GUEST
1199 bl kvmhv_accumulate_time
1200#endif
1201
Paul Mackerrasde56a942011-06-29 00:21:34 +00001202 /* Enter guest */
1203
Paul Mackerras0acb9112013-02-04 18:10:51 +00001204BEGIN_FTR_SECTION
1205 ld r5, VCPU_CFAR(r4)
1206 mtspr SPRN_CFAR, r5
1207END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001208BEGIN_FTR_SECTION
1209 ld r0, VCPU_PPR(r4)
1210END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001211
Paul Mackerrasde56a942011-06-29 00:21:34 +00001212 ld r5, VCPU_LR(r4)
Paul Mackerras3ac71802018-10-08 16:30:58 +11001213 ld r6, VCPU_CR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001214 mtlr r5
1215 mtcr r6
1216
Michael Neulingc75df6f2012-06-25 13:33:10 +00001217 ld r1, VCPU_GPR(R1)(r4)
1218 ld r2, VCPU_GPR(R2)(r4)
1219 ld r3, VCPU_GPR(R3)(r4)
1220 ld r5, VCPU_GPR(R5)(r4)
1221 ld r6, VCPU_GPR(R6)(r4)
1222 ld r7, VCPU_GPR(R7)(r4)
1223 ld r8, VCPU_GPR(R8)(r4)
1224 ld r9, VCPU_GPR(R9)(r4)
1225 ld r10, VCPU_GPR(R10)(r4)
1226 ld r11, VCPU_GPR(R11)(r4)
1227 ld r12, VCPU_GPR(R12)(r4)
1228 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001229
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001230BEGIN_FTR_SECTION
1231 mtspr SPRN_PPR, r0
1232END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Michael Neulinge001fa72017-09-15 15:26:14 +10001233
1234/* Move canary into DSISR to check for later */
1235BEGIN_FTR_SECTION
1236 li r0, 0x7fff
1237 mtspr SPRN_HDSISR, r0
1238END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1239
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001240 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001241 ld r4, VCPU_GPR(R4)(r4)
Nicholas Piggin222f20f2018-01-10 03:07:15 +11001242 HRFI_TO_GUEST
Paul Mackerrasde56a942011-06-29 00:21:34 +00001243 b .
1244
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001245secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001246 li r12, 0
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001247 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001248 cmpdi r4, 0
1249 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001250 stw r12, VCPU_TRAP(r4)
1251#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001252 addi r3, r4, VCPU_TB_RMEXIT
1253 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001254#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100125511: b kvmhv_switch_to_host
1256
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001257no_switch_exit:
1258 HMT_MEDIUM
1259 li r12, 0
1260 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001261hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001262 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000126312: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001264 mr r9, r4
1265#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001266 addi r3, r4, VCPU_TB_RMEXIT
1267 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001268#endif
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001269 b guest_bypass
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001270
Paul Mackerrasde56a942011-06-29 00:21:34 +00001271/******************************************************************************
1272 * *
1273 * Exit code *
1274 * *
1275 *****************************************************************************/
1276
1277/*
1278 * We come here from the first-level interrupt handlers.
1279 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301280 .globl kvmppc_interrupt_hv
1281kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001282 /*
1283 * Register contents:
Nicholas Piggind3918e72016-12-22 04:29:25 +10001284 * R12 = (guest CR << 32) | interrupt vector
Paul Mackerrasde56a942011-06-29 00:21:34 +00001285 * R13 = PACA
Nicholas Piggind3918e72016-12-22 04:29:25 +10001286 * guest R12 saved in shadow VCPU SCRATCH0
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001287 * guest CTR saved in shadow VCPU SCRATCH1 if RELOCATABLE
Paul Mackerrasde56a942011-06-29 00:21:34 +00001288 * guest R13 saved in SPRN_SCRATCH0
1289 */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001290 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001291 lbz r9, HSTATE_IN_GUEST(r13)
1292 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1293 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301294#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1295 cmpwi r9, KVM_GUEST_MODE_GUEST
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001296 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301297 beq kvmppc_interrupt_pr
1298#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001299 /* We're now back in the host but in guest MMU context */
1300 li r9, KVM_GUEST_MODE_HOST_HV
1301 stb r9, HSTATE_IN_GUEST(r13)
1302
Paul Mackerrasde56a942011-06-29 00:21:34 +00001303 ld r9, HSTATE_KVM_VCPU(r13)
1304
1305 /* Save registers */
1306
Michael Neulingc75df6f2012-06-25 13:33:10 +00001307 std r0, VCPU_GPR(R0)(r9)
1308 std r1, VCPU_GPR(R1)(r9)
1309 std r2, VCPU_GPR(R2)(r9)
1310 std r3, VCPU_GPR(R3)(r9)
1311 std r4, VCPU_GPR(R4)(r9)
1312 std r5, VCPU_GPR(R5)(r9)
1313 std r6, VCPU_GPR(R6)(r9)
1314 std r7, VCPU_GPR(R7)(r9)
1315 std r8, VCPU_GPR(R8)(r9)
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001316 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001317 std r0, VCPU_GPR(R9)(r9)
1318 std r10, VCPU_GPR(R10)(r9)
1319 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001320 ld r3, HSTATE_SCRATCH0(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001321 std r3, VCPU_GPR(R12)(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001322 /* CR is in the high half of r12 */
1323 srdi r4, r12, 32
Paul Mackerras3ac71802018-10-08 16:30:58 +11001324 std r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001325BEGIN_FTR_SECTION
1326 ld r3, HSTATE_CFAR(r13)
1327 std r3, VCPU_CFAR(r9)
1328END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001329BEGIN_FTR_SECTION
1330 ld r4, HSTATE_PPR(r13)
1331 std r4, VCPU_PPR(r9)
1332END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001333
1334 /* Restore R1/R2 so we can handle faults */
1335 ld r1, HSTATE_HOST_R1(r13)
1336 ld r2, PACATOC(r13)
1337
1338 mfspr r10, SPRN_SRR0
1339 mfspr r11, SPRN_SRR1
1340 std r10, VCPU_SRR0(r9)
1341 std r11, VCPU_SRR1(r9)
Nicholas Piggind3918e72016-12-22 04:29:25 +10001342 /* trap is in the low half of r12, clear CR from the high half */
1343 clrldi r12, r12, 32
Paul Mackerrasde56a942011-06-29 00:21:34 +00001344 andi. r0, r12, 2 /* need to read HSRR0/1? */
1345 beq 1f
1346 mfspr r10, SPRN_HSRR0
1347 mfspr r11, SPRN_HSRR1
1348 clrrdi r12, r12, 2
13491: std r10, VCPU_PC(r9)
1350 std r11, VCPU_MSR(r9)
1351
1352 GET_SCRATCH0(r3)
1353 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001354 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001355 std r4, VCPU_LR(r9)
1356
Paul Mackerrasde56a942011-06-29 00:21:34 +00001357 stw r12,VCPU_TRAP(r9)
1358
Paul Mackerras8b24e692017-06-26 15:45:51 +10001359 /*
1360 * Now that we have saved away SRR0/1 and HSRR0/1,
1361 * interrupts are recoverable in principle, so set MSR_RI.
1362 * This becomes important for relocation-on interrupts from
1363 * the guest, which we can get in radix mode on POWER9.
1364 */
1365 li r0, MSR_RI
1366 mtmsrd r0, 1
1367
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001368#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1369 addi r3, r9, VCPU_TB_RMINTR
1370 mr r4, r9
1371 bl kvmhv_accumulate_time
1372 ld r5, VCPU_GPR(R5)(r9)
1373 ld r6, VCPU_GPR(R6)(r9)
1374 ld r7, VCPU_GPR(R7)(r9)
1375 ld r8, VCPU_GPR(R8)(r9)
1376#endif
1377
Paul Mackerras4a157d62014-12-03 13:30:39 +11001378 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001379 if this is an HEI (HV emulation interrupt, e40) */
1380 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001381 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001382 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1383 bne 11f
1384 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100138511: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001386
1387 /* these are volatile across C function calls */
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001388#ifdef CONFIG_RELOCATABLE
1389 ld r3, HSTATE_SCRATCH1(r13)
1390 mtctr r3
1391#else
Paul Mackerras697d3892011-12-12 12:36:37 +00001392 mfctr r3
Nicholas Piggina97a65d2017-01-27 14:00:34 +10001393#endif
Paul Mackerras697d3892011-12-12 12:36:37 +00001394 mfxer r4
1395 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001396 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001397
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001398#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1399 /* For softpatch interrupt, go off and do TM instruction emulation */
1400 cmpwi r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
1401 beq kvmppc_tm_emul
1402#endif
1403
Paul Mackerras697d3892011-12-12 12:36:37 +00001404 /* If this is a page table miss then see if it's theirs or ours */
1405 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1406 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001407 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1408 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001409
Paul Mackerrasde56a942011-06-29 00:21:34 +00001410 /* See if this is a leftover HDEC interrupt */
1411 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1412 bne 2f
1413 mfspr r3,SPRN_HDEC
Paul Mackerrasa4faf2e2017-08-25 19:52:12 +10001414 EXTEND_HDEC(r3)
1415 cmpdi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001416 mr r4,r9
1417 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000014182:
Paul Mackerras697d3892011-12-12 12:36:37 +00001419 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001420 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1421 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001422
Paul Mackerras66feed62015-03-28 14:21:12 +11001423 /* Hypervisor doorbell - exit only if host IPI flag set */
1424 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1425 bne 3f
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001426BEGIN_FTR_SECTION
1427 PPC_MSGSYNC
Nicholas Piggin2cde3712017-10-10 20:18:28 +10001428 lwsync
Nicholas Pigginbd0fdb12017-03-13 03:03:49 +10001429END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11001430 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301431 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001432 beq 4f
1433 b guest_exit_cont
14343:
Paul Mackerras769377f2017-02-15 14:30:17 +11001435 /* If it's a hypervisor facility unavailable interrupt, save HFSCR */
1436 cmpwi r12, BOOK3S_INTERRUPT_H_FAC_UNAVAIL
1437 bne 14f
1438 mfspr r3, SPRN_HFSCR
1439 std r3, VCPU_HFSCR(r9)
1440 b guest_exit_cont
144114:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001442 /* External interrupt ? */
1443 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001444 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001445
1446 /* External interrupt, first check for host_ipi. If this is
1447 * set, we know the host wants us out so let's do it now
1448 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001449 bl kvmppc_read_intr
Suresh Warrier37f55d32016-08-19 15:35:46 +10001450
1451 /*
1452 * Restore the active volatile registers after returning from
1453 * a C function.
1454 */
1455 ld r9, HSTATE_KVM_VCPU(r13)
1456 li r12, BOOK3S_INTERRUPT_EXTERNAL
1457
1458 /*
1459 * kvmppc_read_intr return codes:
1460 *
1461 * Exit to host (r3 > 0)
1462 * 1 An interrupt is pending that needs to be handled by the host
1463 * Exit guest and return to host by branching to guest_exit_cont
1464 *
Suresh Warrierf7af5202016-08-19 15:35:52 +10001465 * 2 Passthrough that needs completion in the host
1466 * Exit guest and return to host by branching to guest_exit_cont
1467 * However, we also set r12 to BOOK3S_INTERRUPT_HV_RM_HARD
1468 * to indicate to the host to complete handling the interrupt
1469 *
Suresh Warrier37f55d32016-08-19 15:35:46 +10001470 * Before returning to guest, we check if any CPU is heading out
1471 * to the host and if so, we head out also. If no CPUs are heading
1472 * check return values <= 0.
1473 *
1474 * Return to guest (r3 <= 0)
1475 * 0 No external interrupt is pending
1476 * -1 A guest wakeup IPI (which has now been cleared)
1477 * In either case, we return to guest to deliver any pending
1478 * guest interrupts.
Suresh Warriere3c13e52016-08-19 15:35:51 +10001479 *
1480 * -2 A PCI passthrough external interrupt was handled
1481 * (interrupt was delivered directly to guest)
1482 * Return to guest to deliver any pending guest interrupts.
Suresh Warrier37f55d32016-08-19 15:35:46 +10001483 */
1484
Suresh Warrierf7af5202016-08-19 15:35:52 +10001485 cmpdi r3, 1
1486 ble 1f
1487
1488 /* Return code = 2 */
1489 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
1490 stw r12, VCPU_TRAP(r9)
1491 b guest_exit_cont
1492
14931: /* Return code <= 1 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001494 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001495 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001496
Suresh Warrier37f55d32016-08-19 15:35:46 +10001497 /* Return code <= 0 */
Paul Mackerras66feed62015-03-28 14:21:12 +110014984: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001499 lwz r0, VCORE_ENTRY_EXIT(r5)
1500 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001501 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001502 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001503
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001504guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerras43ff3f62018-01-11 14:31:43 +11001505 /* Save more register state */
1506 mfdar r6
1507 mfdsisr r7
1508 std r6, VCPU_DAR(r9)
1509 stw r7, VCPU_DSISR(r9)
1510 /* don't overwrite fault_dar/fault_dsisr if HDSI */
1511 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1512 beq mc_cont
1513 std r6, VCPU_FAULT_DAR(r9)
1514 stw r7, VCPU_FAULT_DSISR(r9)
1515
1516 /* See if it is a machine check */
1517 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1518 beq machine_check_realmode
1519mc_cont:
1520#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1521 addi r3, r9, VCPU_TB_RMEXIT
1522 mr r4, r9
1523 bl kvmhv_accumulate_time
1524#endif
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001525#ifdef CONFIG_KVM_XICS
1526 /* We are exiting, pull the VP from the XIVE */
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001527 lbz r0, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001528 cmpwi cr0, r0, 0
1529 beq 1f
1530 li r7, TM_SPC_PULL_OS_CTX
1531 li r6, TM_QW1_OS
1532 mfmsr r0
Benjamin Herrenschmidt2662efd2018-01-12 13:37:14 +11001533 andi. r0, r0, MSR_DR /* in real mode? */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001534 beq 2f
1535 ld r10, HSTATE_XIVE_TIMA_VIRT(r13)
1536 cmpldi cr0, r10, 0
1537 beq 1f
1538 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001539 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001540 lwzx r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001541 /* Second load to recover the context state (Words 0 and 1) */
1542 ldx r11, r6, r10
1543 b 3f
15442: ld r10, HSTATE_XIVE_TIMA_PHYS(r13)
1545 cmpldi cr0, r10, 0
1546 beq 1f
1547 /* First load to pull the context, we ignore the value */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001548 eieio
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001549 lwzcix r11, r7, r10
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001550 /* Second load to recover the context state (Words 0 and 1) */
1551 ldcix r11, r6, r10
15523: std r11, VCPU_XIVE_SAVED_STATE(r9)
1553 /* Fixup some of the state for the next load */
1554 li r10, 0
1555 li r0, 0xff
Benjamin Herrenschmidt35c24052018-01-12 13:37:15 +11001556 stb r10, VCPU_XIVE_PUSHED(r9)
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10001557 stb r10, (VCPU_XIVE_SAVED_STATE+3)(r9)
1558 stb r0, (VCPU_XIVE_SAVED_STATE+4)(r9)
Benjamin Herrenschmidtad98dd12017-10-16 08:37:54 +11001559 eieio
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +100015601:
1561#endif /* CONFIG_KVM_XICS */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001562
Michael Ellerman345712c2019-11-13 21:05:44 +11001563 /* Possibly flush the link stack here. */
15641: nop
1565 patch_site 1b patch__call_kvm_flush_link_stack
1566
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001567 /* For hash guest, read the guest SLB and save it away */
1568 ld r5, VCPU_KVM(r9)
1569 lbz r0, KVM_RADIX(r5)
1570 li r5, 0
1571 cmpwi r0, 0
1572 bne 3f /* for radix, save 0 entries */
1573 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1574 mtctr r0
1575 li r6,0
1576 addi r7,r9,VCPU_SLB
15771: slbmfee r8,r6
1578 andis. r0,r8,SLB_ESID_V@h
1579 beq 2f
1580 add r8,r8,r6 /* put index in */
1581 slbmfev r3,r6
1582 std r8,VCPU_SLB_E(r7)
1583 std r3,VCPU_SLB_V(r7)
1584 addi r7,r7,VCPU_SLB_SIZE
1585 addi r5,r5,1
15862: addi r6,r6,1
1587 bdnz 1b
1588 /* Finally clear out the SLB */
1589 li r0,0
1590 slbmte r0,r0
1591 slbia
1592 ptesync
15933: stw r5,VCPU_SLB_MAX(r9)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001594
Paul Mackerrascda4a142018-03-22 09:48:54 +11001595 /* load host SLB entries */
1596BEGIN_MMU_FTR_SECTION
1597 b 0f
1598END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
1599 ld r8,PACA_SLBSHADOWPTR(r13)
1600
1601 .rept SLB_NUM_BOLTED
1602 li r3, SLBSHADOW_SAVEAREA
1603 LDX_BE r5, r8, r3
1604 addi r3, r3, 8
1605 LDX_BE r6, r8, r3
1606 andis. r7,r5,SLB_ESID_V@h
1607 beq 1f
1608 slbmte r6,r5
16091: addi r8,r8,16
1610 .endr
16110:
1612
Paul Mackerras6964e6a2018-01-11 14:51:02 +11001613guest_bypass:
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001614 stw r12, STACK_SLOT_TRAP(r1)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001615
1616 /* Save DEC */
1617 /* Do this before kvmhv_commence_exit so we know TB is guest TB */
1618 ld r3, HSTATE_KVM_VCORE(r13)
1619 mfspr r5,SPRN_DEC
1620 mftb r6
1621 /* On P9, if the guest has large decr enabled, don't sign extend */
1622BEGIN_FTR_SECTION
1623 ld r4, VCORE_LPCR(r3)
1624 andis. r4, r4, LPCR_LD@h
1625 bne 16f
1626END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
1627 extsw r5,r5
162816: add r5,r5,r6
1629 /* r5 is a guest timebase value here, convert to host TB */
1630 ld r4,VCORE_TB_OFFSET_APPL(r3)
1631 subf r5,r4,r5
1632 std r5,VCPU_DEC_EXPIRES(r9)
1633
Paul Mackerras6af27c82015-03-28 14:21:10 +11001634 /* Increment exit count, poke other threads to exit */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10001635 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001636 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001637 nop
1638 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001639
Paul Mackerrasec257162015-06-24 21:18:03 +10001640 /* Stop others sending VCPU interrupts to this physical CPU */
1641 li r0, -1
1642 stw r0, VCPU_CPU(r9)
1643 stw r0, VCPU_THREAD_CPU(r9)
1644
Paul Mackerrasde56a942011-06-29 00:21:34 +00001645 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001646 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001647 stw r6,VCPU_CTRL(r9)
1648 andi. r0,r6,1
1649 bne 4f
1650 ori r6,r6,1
1651 mtspr SPRN_CTRLT,r6
16524:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001653 /*
1654 * Save the guest PURR/SPURR
1655 */
1656 mfspr r5,SPRN_PURR
1657 mfspr r6,SPRN_SPURR
1658 ld r7,VCPU_PURR(r9)
1659 ld r8,VCPU_SPURR(r9)
1660 std r5,VCPU_PURR(r9)
1661 std r6,VCPU_SPURR(r9)
1662 subf r5,r7,r5
1663 subf r6,r8,r6
1664
1665 /*
1666 * Restore host PURR/SPURR and add guest times
1667 * so that the time in the guest gets accounted.
1668 */
1669 ld r3,HSTATE_PURR(r13)
1670 ld r4,HSTATE_SPURR(r13)
1671 add r3,r3,r5
1672 add r4,r4,r6
1673 mtspr SPRN_PURR,r3
1674 mtspr SPRN_SPURR,r4
1675
Michael Neulingb005255e2014-01-08 21:25:21 +11001676BEGIN_FTR_SECTION
1677 b 8f
1678END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001679 /* Save POWER8-specific registers */
1680 mfspr r5, SPRN_IAMR
1681 mfspr r6, SPRN_PSPB
1682 mfspr r7, SPRN_FSCR
1683 std r5, VCPU_IAMR(r9)
1684 stw r6, VCPU_PSPB(r9)
1685 std r7, VCPU_FSCR(r9)
1686 mfspr r5, SPRN_IC
Michael Neulingb005255e2014-01-08 21:25:21 +11001687 mfspr r7, SPRN_TAR
1688 std r5, VCPU_IC(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001689 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001690 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001691 std r8, VCPU_EBBHR(r9)
1692 mfspr r5, SPRN_EBBRR
1693 mfspr r6, SPRN_BESCR
Michael Neulingb005255e2014-01-08 21:25:21 +11001694 mfspr r7, SPRN_PID
1695 mfspr r8, SPRN_WORT
Paul Mackerras83677f52016-11-16 22:33:27 +11001696 std r5, VCPU_EBBRR(r9)
1697 std r6, VCPU_BESCR(r9)
Michael Neulingb005255e2014-01-08 21:25:21 +11001698 stw r7, VCPU_GUEST_PID(r9)
1699 std r8, VCPU_WORT(r9)
Paul Mackerras83677f52016-11-16 22:33:27 +11001700BEGIN_FTR_SECTION
1701 mfspr r5, SPRN_TCSCR
1702 mfspr r6, SPRN_ACOP
1703 mfspr r7, SPRN_CSIGR
1704 mfspr r8, SPRN_TACR
1705 std r5, VCPU_TCSCR(r9)
1706 std r6, VCPU_ACOP(r9)
1707 std r7, VCPU_CSIGR(r9)
1708 std r8, VCPU_TACR(r9)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001709FTR_SECTION_ELSE
1710 mfspr r5, SPRN_TIDR
1711 mfspr r6, SPRN_PSSCR
1712 std r5, VCPU_TID(r9)
1713 rldicl r6, r6, 4, 50 /* r6 &= PSSCR_GUEST_VIS */
1714 rotldi r6, r6, 60
1715 std r6, VCPU_PSSCR(r9)
Paul Mackerras769377f2017-02-15 14:30:17 +11001716 /* Restore host HFSCR value */
1717 ld r7, STACK_SLOT_HFSCR(r1)
1718 mtspr SPRN_HFSCR, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001719ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001720 /*
1721 * Restore various registers to 0, where non-zero values
1722 * set by the guest could disrupt the host.
1723 */
1724 li r0, 0
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001725 mtspr SPRN_PSPB, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001726 mtspr SPRN_WORT, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001727BEGIN_FTR_SECTION
1728 mtspr SPRN_TCSCR, r0
Paul Mackerrasccec4452016-03-05 19:34:39 +11001729 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1730 li r0, 1
1731 sldi r0, r0, 31
1732 mtspr SPRN_MMCRS, r0
Paul Mackerras83677f52016-11-16 22:33:27 +11001733END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Michael Neulingb005255e2014-01-08 21:25:21 +11001734
Michael Ellerman915c9d02019-02-22 13:22:08 +11001735 /* Save and restore AMR, IAMR and UAMOR before turning on the MMU */
1736 ld r8, STACK_SLOT_IAMR(r1)
1737 mtspr SPRN_IAMR, r8
1738
17398: /* Power7 jumps back in here */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001740 mfspr r5,SPRN_AMR
1741 mfspr r6,SPRN_UAMOR
1742 std r5,VCPU_AMR(r9)
1743 std r6,VCPU_UAMOR(r9)
Michael Ellerman915c9d02019-02-22 13:22:08 +11001744 ld r5,STACK_SLOT_AMR(r1)
1745 ld r6,STACK_SLOT_UAMOR(r1)
1746 mtspr SPRN_AMR, r5
Paul Mackerras4c3bb4c2017-06-15 15:43:17 +10001747 mtspr SPRN_UAMOR, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001748
Paul Mackerrasde56a942011-06-29 00:21:34 +00001749 /* Switch DSCR back to host value */
1750 mfspr r8, SPRN_DSCR
1751 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001752 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001753 mtspr SPRN_DSCR, r7
1754
1755 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001756 std r14, VCPU_GPR(R14)(r9)
1757 std r15, VCPU_GPR(R15)(r9)
1758 std r16, VCPU_GPR(R16)(r9)
1759 std r17, VCPU_GPR(R17)(r9)
1760 std r18, VCPU_GPR(R18)(r9)
1761 std r19, VCPU_GPR(R19)(r9)
1762 std r20, VCPU_GPR(R20)(r9)
1763 std r21, VCPU_GPR(R21)(r9)
1764 std r22, VCPU_GPR(R22)(r9)
1765 std r23, VCPU_GPR(R23)(r9)
1766 std r24, VCPU_GPR(R24)(r9)
1767 std r25, VCPU_GPR(R25)(r9)
1768 std r26, VCPU_GPR(R26)(r9)
1769 std r27, VCPU_GPR(R27)(r9)
1770 std r28, VCPU_GPR(R28)(r9)
1771 std r29, VCPU_GPR(R29)(r9)
1772 std r30, VCPU_GPR(R30)(r9)
1773 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001774
1775 /* Save SPRGs */
1776 mfspr r3, SPRN_SPRG0
1777 mfspr r4, SPRN_SPRG1
1778 mfspr r5, SPRN_SPRG2
1779 mfspr r6, SPRN_SPRG3
1780 std r3, VCPU_SPRG0(r9)
1781 std r4, VCPU_SPRG1(r9)
1782 std r5, VCPU_SPRG2(r9)
1783 std r6, VCPU_SPRG3(r9)
1784
Paul Mackerras89436332012-03-02 01:38:23 +00001785 /* save FP state */
1786 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001787 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001788
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001789#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001790/*
1791 * Branch around the call if both CPU_FTR_TM and
1792 * CPU_FTR_P9_TM_HV_ASSIST are off.
1793 */
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001794BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11001795 b 91f
1796END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001797 /*
1798 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
1799 */
Simon Guo6f597c62018-05-23 15:01:48 +08001800 mr r3, r9
1801 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10001802 bl kvmppc_save_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +08001803 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100180491:
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001805#endif
1806
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001807 /* Increment yield count if they have a VPA */
1808 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1809 cmpdi r8, 0
1810 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001811 li r4, LPPACA_YIELDCOUNT
1812 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001813 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001814 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001815 li r3, 1
1816 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000181725:
1818 /* Save PMU registers if requested */
1819 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001820BEGIN_FTR_SECTION
1821 /*
1822 * POWER8 seems to have a hardware bug where setting
1823 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1824 * when some counters are already negative doesn't seem
1825 * to cause a performance monitor alert (and hence interrupt).
1826 * The effect of this is that when saving the PMU state,
1827 * if there is no PMU alert pending when we read MMCR0
1828 * before freezing the counters, but one becomes pending
1829 * before we read the counters, we lose it.
1830 * To work around this, we need a way to freeze the counters
1831 * before reading MMCR0. Normally, freezing the counters
1832 * is done by writing MMCR0 (to set MMCR0[FC]) which
1833 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1834 * we can also freeze the counters using MMCR2, by writing
1835 * 1s to all the counter freeze condition bits (there are
1836 * 9 bits each for 6 counters).
1837 */
1838 li r3, -1 /* set all freeze bits */
1839 clrrdi r3, r3, 10
1840 mfspr r10, SPRN_MMCR2
1841 mtspr SPRN_MMCR2, r3
1842 isync
1843END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001844 li r3, 1
1845 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1846 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1847 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001848 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001849 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001850 li r7, 0
1851 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001852 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001853 beq 21f /* if no VPA, save PMU stuff anyway */
1854 lbz r7, LPPACA_PMCINUSE(r8)
1855 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1856 bne 21f
1857 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1858 b 22f
185921: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001860 mfspr r7, SPRN_SIAR
1861 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001862 std r4, VCPU_MMCR(r9)
1863 std r5, VCPU_MMCR + 8(r9)
1864 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001865BEGIN_FTR_SECTION
1866 std r10, VCPU_MMCR + 24(r9)
1867END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001868 std r7, VCPU_SIAR(r9)
1869 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001870 mfspr r3, SPRN_PMC1
1871 mfspr r4, SPRN_PMC2
1872 mfspr r5, SPRN_PMC3
1873 mfspr r6, SPRN_PMC4
1874 mfspr r7, SPRN_PMC5
1875 mfspr r8, SPRN_PMC6
1876 stw r3, VCPU_PMC(r9)
1877 stw r4, VCPU_PMC + 4(r9)
1878 stw r5, VCPU_PMC + 8(r9)
1879 stw r6, VCPU_PMC + 12(r9)
1880 stw r7, VCPU_PMC + 16(r9)
1881 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001882BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001883 mfspr r5, SPRN_SIER
Paul Mackerras83677f52016-11-16 22:33:27 +11001884 std r5, VCPU_SIER(r9)
1885BEGIN_FTR_SECTION_NESTED(96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001886 mfspr r6, SPRN_SPMC1
1887 mfspr r7, SPRN_SPMC2
1888 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001889 stw r6, VCPU_PMC + 24(r9)
1890 stw r7, VCPU_PMC + 28(r9)
1891 std r8, VCPU_MMCR + 32(r9)
1892 lis r4, 0x8000
1893 mtspr SPRN_MMCRS, r4
Paul Mackerras83677f52016-11-16 22:33:27 +11001894END_FTR_SECTION_NESTED(CPU_FTR_ARCH_300, 0, 96)
Michael Neulingb005255e2014-01-08 21:25:21 +11001895END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000189622:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001897
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001898 /* Restore host values of some registers */
1899BEGIN_FTR_SECTION
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001900 ld r5, STACK_SLOT_CIABR(r1)
1901 ld r6, STACK_SLOT_DAWR(r1)
1902 ld r7, STACK_SLOT_DAWRX(r1)
1903 mtspr SPRN_CIABR, r5
Michael Neulingb53221e2018-03-27 15:37:22 +11001904 /*
1905 * If the DAWR doesn't work, it's ok to write these here as
1906 * this value should always be zero
1907 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10001908 mtspr SPRN_DAWR, r6
1909 mtspr SPRN_DAWRX, r7
1910END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1911BEGIN_FTR_SECTION
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001912 ld r5, STACK_SLOT_TID(r1)
1913 ld r6, STACK_SLOT_PSSCR(r1)
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001914 ld r7, STACK_SLOT_PID(r1)
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001915 mtspr SPRN_TIDR, r5
1916 mtspr SPRN_PSSCR, r6
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11001917 mtspr SPRN_PID, r7
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001918END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001919
1920#ifdef CONFIG_PPC_RADIX_MMU
1921 /*
1922 * Are we running hash or radix ?
1923 */
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10001924 ld r5, VCPU_KVM(r9)
1925 lbz r0, KVM_RADIX(r5)
1926 cmpwi cr2, r0, 0
Nicholas Piggin2bf10712018-07-05 18:47:00 +10001927 beq cr2, 2f
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001928
Paul Mackerrasdf158182018-05-17 14:47:59 +10001929 /*
1930 * Radix: do eieio; tlbsync; ptesync sequence in case we
1931 * interrupted the guest between a tlbie and a ptesync.
1932 */
1933 eieio
1934 tlbsync
1935 ptesync
1936
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001937 /* Radix: Handle the case where the guest used an illegal PID */
1938 LOAD_REG_ADDR(r4, mmu_base_pid)
1939 lwz r3, VCPU_GUEST_PID(r9)
1940 lwz r5, 0(r4)
1941 cmpw cr0,r3,r5
1942 blt 2f
1943
1944 /*
1945 * Illegal PID, the HW might have prefetched and cached in the TLB
1946 * some translations for the LPID 0 / guest PID combination which
1947 * Linux doesn't know about, so we need to flush that PID out of
1948 * the TLB. First we need to set LPIDR to 0 so tlbiel applies to
1949 * the right context.
1950 */
1951 li r0,0
1952 mtspr SPRN_LPID,r0
1953 isync
1954
1955 /* Then do a congruence class local flush */
1956 ld r6,VCPU_KVM(r9)
1957 lwz r0,KVM_TLB_SETS(r6)
1958 mtctr r0
1959 li r7,0x400 /* IS field = 0b01 */
1960 ptesync
1961 sldi r0,r3,32 /* RS has PID */
19621: PPC_TLBIEL(7,0,2,1,1) /* RIC=2, PRS=1, R=1 */
1963 addi r7,r7,0x1000
1964 bdnz 1b
1965 ptesync
1966
Nicholas Piggin2bf10712018-07-05 18:47:00 +100019672:
Benjamin Herrenschmidta25bd722017-07-24 14:26:06 +10001968#endif /* CONFIG_PPC_RADIX_MMU */
Paul Mackerrase9cf1e02016-11-18 13:11:42 +11001969
Paul Mackerrasde56a942011-06-29 00:21:34 +00001970 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001971 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001972 * We don't have to lock against tlbies but we do
1973 * have to coordinate the hardware threads.
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11001974 * Here STACK_SLOT_TRAP(r1) contains the trap number.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001975 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001976kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001977 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001978 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001979 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1980 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001981 cmpwi r3,0
1982 beq 15f
1983 HMT_LOW
198413: lbz r3,VCORE_IN_GUEST(r5)
1985 cmpwi r3,0
1986 bne 13b
1987 HMT_MEDIUM
1988 b 16f
1989
1990 /* Primary thread waits for all the secondaries to exit guest */
199115: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001992 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001993 clrldi r3,r3,56
1994 cmpw r3,r0
1995 bne 15b
1996 isync
1997
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001998 /* Did we actually switch to the guest at all? */
1999 lbz r6, VCORE_IN_GUEST(r5)
2000 cmpwi r6, 0
2001 beq 19f
2002
Paul Mackerrasde56a942011-06-29 00:21:34 +00002003 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002004 lwz r7,KVM_HOST_LPID(r4)
Paul Mackerras7a840842016-11-16 22:25:20 +11002005BEGIN_FTR_SECTION
2006 ld r6,KVM_HOST_SDR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002007 li r8,LPID_RSVD /* switch to reserved LPID */
2008 mtspr SPRN_LPID,r8
2009 ptesync
Paul Mackerras7a840842016-11-16 22:25:20 +11002010 mtspr SPRN_SDR1,r6 /* switch to host page table */
2011END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002012 mtspr SPRN_LPID,r7
2013 isync
2014
Michael Neulingb005255e2014-01-08 21:25:21 +11002015BEGIN_FTR_SECTION
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002016 /* DPDES and VTB are shared between threads */
Michael Neulingb005255e2014-01-08 21:25:21 +11002017 mfspr r7, SPRN_DPDES
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002018 mfspr r8, SPRN_VTB
Michael Neulingb005255e2014-01-08 21:25:21 +11002019 std r7, VCORE_DPDES(r5)
Paul Mackerras88b02cf92016-09-15 13:42:52 +10002020 std r8, VCORE_VTB(r5)
Michael Neulingb005255e2014-01-08 21:25:21 +11002021 /* clear DPDES so we don't get guest doorbells in the host */
2022 li r8, 0
2023 mtspr SPRN_DPDES, r8
2024END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
2025
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302026 /* If HMI, call kvmppc_realmode_hmi_handler() */
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11002027 lwz r12, STACK_SLOT_TRAP(r1)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302028 cmpwi r12, BOOK3S_INTERRUPT_HMI
2029 bne 27f
2030 bl kvmppc_realmode_hmi_handler
2031 nop
Paul Mackerrasd0757452018-01-17 20:51:13 +11002032 cmpdi r3, 0
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302033 /*
Paul Mackerrasd0757452018-01-17 20:51:13 +11002034 * At this point kvmppc_realmode_hmi_handler may have resync-ed
2035 * the TB, and if it has, we must not subtract the guest timebase
2036 * offset from the timebase. So, skip it.
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302037 *
2038 * Also, do not call kvmppc_subcore_exit_guest() because it has
2039 * been invoked as part of kvmppc_realmode_hmi_handler().
2040 */
Paul Mackerrasd0757452018-01-17 20:51:13 +11002041 beq 30f
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302042
204327:
Paul Mackerrasde56a942011-06-29 00:21:34 +00002044 /* Subtract timebase offset from timebase */
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002045 ld r8, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002046 cmpdi r8,0
2047 beq 17f
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002048 li r0, 0
2049 std r0, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11002050 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002051 subf r8,r8,r6
2052 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
2053 mftb r7 /* check if lower 24 bits overflowed */
2054 clrldi r6,r6,40
2055 clrldi r7,r7,40
2056 cmpld r7,r6
2057 bge 17f
2058 addis r8,r8,0x100 /* if so, increment upper 40 bits */
2059 mtspr SPRN_TBU40,r8
2060
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530206117: bl kvmppc_subcore_exit_guest
2062 nop
206330: ld r5,HSTATE_KVM_VCORE(r13)
2064 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
2065
Paul Mackerrasde56a942011-06-29 00:21:34 +00002066 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302067 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002068 cmpdi r0, 0
2069 beq 18f
2070 li r0, 0
2071 mtspr SPRN_PCR, r0
207218:
2073 /* Signal secondary CPUs to continue */
2074 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000207519: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002076 mtspr SPRN_HDEC,r8
2077
Paul Mackerrasc0101502017-10-19 14:11:23 +1100207816:
2079BEGIN_FTR_SECTION
2080 /* On POWER9 with HPT-on-radix we need to wait for all other threads */
2081 ld r3, HSTATE_SPLIT_MODE(r13)
2082 cmpdi r3, 0
2083 beq 47f
2084 lwz r8, KVM_SPLIT_DO_RESTORE(r3)
2085 cmpwi r8, 0
2086 beq 47f
Paul Mackerrasc0101502017-10-19 14:11:23 +11002087 bl kvmhv_p9_restore_lpcr
2088 nop
Paul Mackerrasc0101502017-10-19 14:11:23 +11002089 b 48f
209047:
2091END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2092 ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002093 mtspr SPRN_LPCR,r8
2094 isync
Paul Mackerrasc0101502017-10-19 14:11:23 +1100209548:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002096#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2097 /* Finish timing, if we have a vcpu */
2098 ld r4, HSTATE_KVM_VCPU(r13)
2099 cmpdi r4, 0
2100 li r3, 0
2101 beq 2f
2102 bl kvmhv_accumulate_time
21032:
2104#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00002105 /* Unset guest mode */
2106 li r0, KVM_GUEST_MODE_NONE
2107 stb r0, HSTATE_IN_GUEST(r13)
2108
Paul Mackerrasa8b48a42018-03-07 22:17:20 +11002109 lwz r12, STACK_SLOT_TRAP(r1) /* return trap # in r12 */
Paul Mackerras7ceaa6d2017-06-16 11:53:19 +10002110 ld r0, SFS+PPC_LR_STKOFF(r1)
2111 addi r1, r1, SFS
Paul Mackerras218309b2013-09-06 13:23:44 +10002112 mtlr r0
2113 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002114
Michael Ellerman345712c2019-11-13 21:05:44 +11002115.balign 32
2116.global kvm_flush_link_stack
2117kvm_flush_link_stack:
2118 /* Save LR into r0 */
2119 mflr r0
2120
2121 /* Flush the link stack. On Power8 it's up to 32 entries in size. */
2122 .rept 32
2123 bl .+4
2124 .endr
2125
2126 /* And on Power9 it's up to 64. */
2127BEGIN_FTR_SECTION
2128 .rept 32
2129 bl .+4
2130 .endr
2131END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2132
2133 /* Restore LR */
2134 mtlr r0
2135 blr
2136
2137
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002138#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
2139/*
2140 * Softpatch interrupt for transactional memory emulation cases
2141 * on POWER9 DD2.2. This is early in the guest exit path - we
2142 * haven't saved registers or done a treclaim yet.
2143 */
2144kvmppc_tm_emul:
2145 /* Save instruction image in HEIR */
2146 mfspr r3, SPRN_HEIR
2147 stw r3, VCPU_HEIR(r9)
2148
2149 /*
2150 * The cases we want to handle here are those where the guest
2151 * is in real suspend mode and is trying to transition to
2152 * transactional mode.
2153 */
2154 lbz r0, HSTATE_FAKE_SUSPEND(r13)
2155 cmpwi r0, 0 /* keep exiting guest if in fake suspend */
2156 bne guest_exit_cont
2157 rldicl r3, r11, 64 - MSR_TS_S_LG, 62
2158 cmpwi r3, 1 /* or if not in suspend state */
2159 bne guest_exit_cont
2160
2161 /* Call C code to do the emulation */
2162 mr r3, r9
2163 bl kvmhv_p9_tm_emulation_early
2164 nop
2165 ld r9, HSTATE_KVM_VCPU(r13)
2166 li r12, BOOK3S_INTERRUPT_HV_SOFTPATCH
2167 cmpwi r3, 0
2168 beq guest_exit_cont /* continue exiting if not handled */
2169 ld r10, VCPU_PC(r9)
2170 ld r11, VCPU_MSR(r9)
2171 b fast_interrupt_c_return /* go back to guest if handled */
2172#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
2173
Paul Mackerras697d3892011-12-12 12:36:37 +00002174/*
2175 * Check whether an HDSI is an HPTE not found fault or something else.
2176 * If it is an HPTE not found fault that is due to the guest accessing
2177 * a page that they have mapped but which we have paged out, then
2178 * we continue on with the guest exit path. In all other cases,
2179 * reflect the HDSI to the guest as a DSI.
2180 */
2181kvmppc_hdsi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002182 ld r3, VCPU_KVM(r9)
2183 lbz r0, KVM_RADIX(r3)
Paul Mackerras697d3892011-12-12 12:36:37 +00002184 mfspr r4, SPRN_HDAR
2185 mfspr r6, SPRN_HDSISR
Michael Neulinge001fa72017-09-15 15:26:14 +10002186BEGIN_FTR_SECTION
2187 /* Look for DSISR canary. If we find it, retry instruction */
2188 cmpdi r6, 0x7fff
2189 beq 6f
2190END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
2191 cmpwi r0, 0
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002192 bne .Lradix_hdsi /* on radix, just save DAR/DSISR/ASDR */
Paul Mackerras4cf302b2011-12-12 12:38:51 +00002193 /* HPTE not found fault or protection fault? */
2194 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00002195 beq 1f /* if not, send it to the guest */
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002196 andi. r0, r11, MSR_DR /* data relocation enabled? */
2197 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002198BEGIN_FTR_SECTION
2199 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2200 b 4f
2201END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras697d3892011-12-12 12:36:37 +00002202 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002203 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002204 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
2205 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000022064: std r4, VCPU_FAULT_DAR(r9)
2207 stw r6, VCPU_FAULT_DSISR(r9)
2208
2209 /* Search the hash table. */
2210 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002211 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002212 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00002213 ld r9, HSTATE_KVM_VCPU(r13)
2214 ld r10, VCPU_PC(r9)
2215 ld r11, VCPU_MSR(r9)
2216 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
2217 cmpdi r3, 0 /* retry the instruction */
2218 beq 6f
2219 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002220 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00002221 cmpdi r3, -2 /* MMIO emulation; need instr word */
2222 beq 2f
2223
Paul Mackerrascf29b212015-10-27 16:10:20 +11002224 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00002225 ld r4, VCPU_FAULT_DAR(r9)
2226 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110022271: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00002228 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110022297: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00002230 mtspr SPRN_SRR0, r10
2231 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002232 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002233 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002234fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000022356: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10002236 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00002237 mtctr r7
2238 mtxer r8
2239 mr r4, r9
2240 b fast_guest_return
2241
22423: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
2243 ld r5, KVM_VRMA_SLB_V(r5)
2244 b 4b
2245
2246 /* If this is for emulated MMIO, load the instruction word */
22472: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
2248
2249 /* Set guest mode to 'jump over instruction' so if lwz faults
2250 * we'll just continue at the next IP. */
2251 li r0, KVM_GUEST_MODE_SKIP
2252 stb r0, HSTATE_IN_GUEST(r13)
2253
2254 /* Do the access with MSR:DR enabled */
2255 mfmsr r3
2256 ori r4, r3, MSR_DR /* Enable paging for data */
2257 mtmsrd r4
2258 lwz r8, 0(r10)
2259 mtmsrd r3
2260
2261 /* Store the result */
2262 stw r8, VCPU_LAST_INST(r9)
2263
2264 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10002265 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00002266 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002267 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00002268
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002269.Lradix_hdsi:
2270 std r4, VCPU_FAULT_DAR(r9)
2271 stw r6, VCPU_FAULT_DSISR(r9)
2272.Lradix_hisi:
2273 mfspr r5, SPRN_ASDR
2274 std r5, VCPU_FAULT_GPA(r9)
2275 b guest_exit_cont
2276
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002277/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00002278 * Similarly for an HISI, reflect it to the guest as an ISI unless
2279 * it is an HPTE not found fault for a page that we have paged out.
2280 */
2281kvmppc_hisi:
Paul Mackerrasf4c51f82017-01-30 21:21:45 +11002282 ld r3, VCPU_KVM(r9)
2283 lbz r0, KVM_RADIX(r3)
2284 cmpwi r0, 0
2285 bne .Lradix_hisi /* for radix, just save ASDR */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002286 andis. r0, r11, SRR1_ISI_NOPT@h
2287 beq 1f
Paul Mackerras4e5acdc2017-02-28 11:05:47 +11002288 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
2289 beq 3f
Paul Mackerrasef8c6402017-01-30 21:21:43 +11002290BEGIN_FTR_SECTION
2291 mfspr r5, SPRN_ASDR /* on POWER9, use ASDR to get VSID */
2292 b 4f
2293END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras342d3db2011-12-12 12:38:05 +00002294 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00002295 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11002296 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
2297 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000022984:
2299 /* Search the hash table. */
2300 mr r3, r9 /* vcpu pointer */
2301 mr r4, r10
2302 mr r6, r11
2303 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002304 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00002305 ld r9, HSTATE_KVM_VCPU(r13)
2306 ld r10, VCPU_PC(r9)
2307 ld r11, VCPU_MSR(r9)
2308 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
2309 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002310 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002311 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002312 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00002313
Paul Mackerrascf29b212015-10-27 16:10:20 +11002314 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00002315 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110023161: li r0, BOOK3S_INTERRUPT_INST_STORAGE
23177: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00002318 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11002319 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11002320 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002321 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00002322
23233: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
2324 ld r5, KVM_VRMA_SLB_V(r6)
2325 b 4b
2326
2327/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002328 * Try to handle an hcall in real mode.
2329 * Returns to the guest if we handle it, or continues on up to
2330 * the kernel if we can't (i.e. if we don't have a handler for
2331 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002332 *
2333 * r5 - r8 contain hcall args,
2334 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002335 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002336hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00002337 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002338 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08002339 /* sc 1 from userspace - reflect to guest syscall */
2340 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002341 clrrdi r3,r3,2
2342 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002343 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10002344 /* See if this hcall is enabled for in-kernel handling */
2345 ld r4, VCPU_KVM(r9)
2346 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
2347 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
2348 add r4, r4, r0
2349 ld r0, KVM_ENABLED_HCALLS(r4)
2350 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
2351 srd r0, r0, r4
2352 andi. r0, r0, 1
2353 beq guest_exit_cont
2354 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002355 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10002356 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002357 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002358 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10002359 add r12,r3,r4
2360 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002361 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002362 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002363 bctrl
2364 cmpdi r3,H_TOO_HARD
2365 beq hcall_real_fallback
2366 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00002367 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002368 ld r10,VCPU_PC(r4)
2369 ld r11,VCPU_MSR(r4)
2370 b fast_guest_return
2371
Liu Ping Fan27025a62013-11-19 14:12:48 +08002372sc_1_fast_return:
2373 mtspr SPRN_SRR0,r10
2374 mtspr SPRN_SRR1,r11
2375 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11002376 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08002377 mr r4,r9
2378 b fast_guest_return
2379
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002380 /* We've attempted a real mode hcall, but it's punted it back
2381 * to userspace. We need to restore some clobbered volatiles
2382 * before resuming the pass-it-to-qemu path */
2383hcall_real_fallback:
2384 li r12,BOOK3S_INTERRUPT_SYSCALL
2385 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002386
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002387 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002388
2389 .globl hcall_real_table
2390hcall_real_table:
2391 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002392 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
2393 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
2394 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10002395 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
2396 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002397 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2398 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002399 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002400 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002401 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002402 .long 0 /* 0x2c */
2403 .long 0 /* 0x30 */
2404 .long 0 /* 0x34 */
2405 .long 0 /* 0x38 */
2406 .long 0 /* 0x3c */
2407 .long 0 /* 0x40 */
2408 .long 0 /* 0x44 */
2409 .long 0 /* 0x48 */
2410 .long 0 /* 0x4c */
2411 .long 0 /* 0x50 */
2412 .long 0 /* 0x54 */
2413 .long 0 /* 0x58 */
2414 .long 0 /* 0x5c */
2415 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002416#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002417 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2418 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2419 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002420 .long DOTSYM(kvmppc_rm_h_ipoll) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002421 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002422#else
2423 .long 0 /* 0x64 - H_EOI */
2424 .long 0 /* 0x68 - H_CPPR */
2425 .long 0 /* 0x6c - H_IPI */
2426 .long 0 /* 0x70 - H_IPOLL */
2427 .long 0 /* 0x74 - H_XIRR */
2428#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002429 .long 0 /* 0x78 */
2430 .long 0 /* 0x7c */
2431 .long 0 /* 0x80 */
2432 .long 0 /* 0x84 */
2433 .long 0 /* 0x88 */
2434 .long 0 /* 0x8c */
2435 .long 0 /* 0x90 */
2436 .long 0 /* 0x94 */
2437 .long 0 /* 0x98 */
2438 .long 0 /* 0x9c */
2439 .long 0 /* 0xa0 */
2440 .long 0 /* 0xa4 */
2441 .long 0 /* 0xa8 */
2442 .long 0 /* 0xac */
2443 .long 0 /* 0xb0 */
2444 .long 0 /* 0xb4 */
2445 .long 0 /* 0xb8 */
2446 .long 0 /* 0xbc */
2447 .long 0 /* 0xc0 */
2448 .long 0 /* 0xc4 */
2449 .long 0 /* 0xc8 */
2450 .long 0 /* 0xcc */
2451 .long 0 /* 0xd0 */
2452 .long 0 /* 0xd4 */
2453 .long 0 /* 0xd8 */
2454 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002455 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002456 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002457 .long 0 /* 0xe8 */
2458 .long 0 /* 0xec */
2459 .long 0 /* 0xf0 */
2460 .long 0 /* 0xf4 */
2461 .long 0 /* 0xf8 */
2462 .long 0 /* 0xfc */
2463 .long 0 /* 0x100 */
2464 .long 0 /* 0x104 */
2465 .long 0 /* 0x108 */
2466 .long 0 /* 0x10c */
2467 .long 0 /* 0x110 */
2468 .long 0 /* 0x114 */
2469 .long 0 /* 0x118 */
2470 .long 0 /* 0x11c */
2471 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002472 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002473 .long 0 /* 0x128 */
2474 .long 0 /* 0x12c */
2475 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002476 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002477 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002478 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002479 .long 0 /* 0x140 */
2480 .long 0 /* 0x144 */
2481 .long 0 /* 0x148 */
2482 .long 0 /* 0x14c */
2483 .long 0 /* 0x150 */
2484 .long 0 /* 0x154 */
2485 .long 0 /* 0x158 */
2486 .long 0 /* 0x15c */
2487 .long 0 /* 0x160 */
2488 .long 0 /* 0x164 */
2489 .long 0 /* 0x168 */
2490 .long 0 /* 0x16c */
2491 .long 0 /* 0x170 */
2492 .long 0 /* 0x174 */
2493 .long 0 /* 0x178 */
2494 .long 0 /* 0x17c */
2495 .long 0 /* 0x180 */
2496 .long 0 /* 0x184 */
2497 .long 0 /* 0x188 */
2498 .long 0 /* 0x18c */
2499 .long 0 /* 0x190 */
2500 .long 0 /* 0x194 */
2501 .long 0 /* 0x198 */
2502 .long 0 /* 0x19c */
2503 .long 0 /* 0x1a0 */
2504 .long 0 /* 0x1a4 */
2505 .long 0 /* 0x1a8 */
2506 .long 0 /* 0x1ac */
2507 .long 0 /* 0x1b0 */
2508 .long 0 /* 0x1b4 */
2509 .long 0 /* 0x1b8 */
2510 .long 0 /* 0x1bc */
2511 .long 0 /* 0x1c0 */
2512 .long 0 /* 0x1c4 */
2513 .long 0 /* 0x1c8 */
2514 .long 0 /* 0x1cc */
2515 .long 0 /* 0x1d0 */
2516 .long 0 /* 0x1d4 */
2517 .long 0 /* 0x1d8 */
2518 .long 0 /* 0x1dc */
2519 .long 0 /* 0x1e0 */
2520 .long 0 /* 0x1e4 */
2521 .long 0 /* 0x1e8 */
2522 .long 0 /* 0x1ec */
2523 .long 0 /* 0x1f0 */
2524 .long 0 /* 0x1f4 */
2525 .long 0 /* 0x1f8 */
2526 .long 0 /* 0x1fc */
2527 .long 0 /* 0x200 */
2528 .long 0 /* 0x204 */
2529 .long 0 /* 0x208 */
2530 .long 0 /* 0x20c */
2531 .long 0 /* 0x210 */
2532 .long 0 /* 0x214 */
2533 .long 0 /* 0x218 */
2534 .long 0 /* 0x21c */
2535 .long 0 /* 0x220 */
2536 .long 0 /* 0x224 */
2537 .long 0 /* 0x228 */
2538 .long 0 /* 0x22c */
2539 .long 0 /* 0x230 */
2540 .long 0 /* 0x234 */
2541 .long 0 /* 0x238 */
2542 .long 0 /* 0x23c */
2543 .long 0 /* 0x240 */
2544 .long 0 /* 0x244 */
2545 .long 0 /* 0x248 */
2546 .long 0 /* 0x24c */
2547 .long 0 /* 0x250 */
2548 .long 0 /* 0x254 */
2549 .long 0 /* 0x258 */
2550 .long 0 /* 0x25c */
2551 .long 0 /* 0x260 */
2552 .long 0 /* 0x264 */
2553 .long 0 /* 0x268 */
2554 .long 0 /* 0x26c */
2555 .long 0 /* 0x270 */
2556 .long 0 /* 0x274 */
2557 .long 0 /* 0x278 */
2558 .long 0 /* 0x27c */
2559 .long 0 /* 0x280 */
2560 .long 0 /* 0x284 */
2561 .long 0 /* 0x288 */
2562 .long 0 /* 0x28c */
2563 .long 0 /* 0x290 */
2564 .long 0 /* 0x294 */
2565 .long 0 /* 0x298 */
2566 .long 0 /* 0x29c */
2567 .long 0 /* 0x2a0 */
2568 .long 0 /* 0x2a4 */
2569 .long 0 /* 0x2a8 */
2570 .long 0 /* 0x2ac */
2571 .long 0 /* 0x2b0 */
2572 .long 0 /* 0x2b4 */
2573 .long 0 /* 0x2b8 */
2574 .long 0 /* 0x2bc */
2575 .long 0 /* 0x2c0 */
2576 .long 0 /* 0x2c4 */
2577 .long 0 /* 0x2c8 */
2578 .long 0 /* 0x2cc */
2579 .long 0 /* 0x2d0 */
2580 .long 0 /* 0x2d4 */
2581 .long 0 /* 0x2d8 */
2582 .long 0 /* 0x2dc */
2583 .long 0 /* 0x2e0 */
2584 .long 0 /* 0x2e4 */
2585 .long 0 /* 0x2e8 */
2586 .long 0 /* 0x2ec */
2587 .long 0 /* 0x2f0 */
2588 .long 0 /* 0x2f4 */
2589 .long 0 /* 0x2f8 */
Benjamin Herrenschmidt5af50992017-04-05 17:54:56 +10002590#ifdef CONFIG_KVM_XICS
2591 .long DOTSYM(kvmppc_rm_h_xirr_x) - hcall_real_table
2592#else
2593 .long 0 /* 0x2fc - H_XIRR_X*/
2594#endif
Michael Ellermane928e9c2015-03-20 20:39:41 +11002595 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002596 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002597hcall_real_table_end:
2598
Paul Mackerras8563bf52014-01-08 21:25:29 +11002599_GLOBAL(kvmppc_h_set_xdabr)
2600 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2601 beq 6f
2602 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2603 andc. r0, r5, r0
2604 beq 3f
26056: li r3, H_PARAMETER
2606 blr
2607
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002608_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002609 li r5, DABRX_USER | DABRX_KERNEL
26103:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002611BEGIN_FTR_SECTION
2612 b 2f
2613END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002614 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002615 stw r5, VCPU_DABRX(r3)
2616 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002617 /* Work around P7 bug where DABR can get corrupted on mtspr */
26181: mtspr SPRN_DABR,r4
2619 mfspr r5, SPRN_DABR
2620 cmpd r4, r5
2621 bne 1b
2622 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002623 li r3,0
2624 blr
2625
Michael Neulinge8ebedb2018-03-27 15:37:21 +110026262:
2627BEGIN_FTR_SECTION
2628 /* POWER9 with disabled DAWR */
Aneesh Kumar K.Vca9a16c2018-03-30 17:27:24 +05302629 li r3, H_HARDWARE
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002630 blr
2631END_FTR_SECTION_IFCLR(CPU_FTR_DAWR)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002632 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
Michael Neulinge8ebedb2018-03-27 15:37:21 +11002633 rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002634 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002635 clrrdi r4, r4, 3
2636 std r4, VCPU_DAWR(r3)
2637 std r5, VCPU_DAWRX(r3)
2638 mtspr SPRN_DAWR, r4
2639 mtspr SPRN_DAWRX, r5
2640 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002641 blr
2642
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002643_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002644 ori r11,r11,MSR_EE
2645 std r11,VCPU_MSR(r3)
2646 li r0,1
2647 stb r0,VCPU_CEDED(r3)
2648 sync /* order setting ceded vs. testing prodded */
2649 lbz r5,VCPU_PRODDED(r3)
2650 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002651 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002652 li r12,0 /* set trap to 0 to say hcall is handled */
2653 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002654 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002655 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002656
2657 /*
2658 * Set our bit in the bitmask of napping threads unless all the
2659 * other threads are already napping, in which case we send this
2660 * up to the host.
2661 */
2662 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002663 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002664 lwz r8,VCORE_ENTRY_EXIT(r5)
2665 clrldi r8,r8,56
2666 li r0,1
2667 sld r0,r0,r6
2668 addi r6,r5,VCORE_NAPPING_THREADS
266931: lwarx r4,0,r6
2670 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002671 cmpw r4,r8
2672 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002673 stwcx. r4,0,r6
2674 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002675 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002676 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002677 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002678 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002679 lwz r7,VCORE_ENTRY_EXIT(r5)
2680 cmpwi r7,0x100
2681 bge 33f /* another thread already exiting */
2682
2683/*
2684 * Although not specifically required by the architecture, POWER7
2685 * preserves the following registers in nap mode, even if an SMT mode
2686 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2687 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2688 */
2689 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002690 std r14, VCPU_GPR(R14)(r3)
2691 std r15, VCPU_GPR(R15)(r3)
2692 std r16, VCPU_GPR(R16)(r3)
2693 std r17, VCPU_GPR(R17)(r3)
2694 std r18, VCPU_GPR(R18)(r3)
2695 std r19, VCPU_GPR(R19)(r3)
2696 std r20, VCPU_GPR(R20)(r3)
2697 std r21, VCPU_GPR(R21)(r3)
2698 std r22, VCPU_GPR(R22)(r3)
2699 std r23, VCPU_GPR(R23)(r3)
2700 std r24, VCPU_GPR(R24)(r3)
2701 std r25, VCPU_GPR(R25)(r3)
2702 std r26, VCPU_GPR(R26)(r3)
2703 std r27, VCPU_GPR(R27)(r3)
2704 std r28, VCPU_GPR(R28)(r3)
2705 std r29, VCPU_GPR(R29)(r3)
2706 std r30, VCPU_GPR(R30)(r3)
2707 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002708
2709 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002710 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002711
Paul Mackerras93d17392016-06-22 15:52:55 +10002712#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002713/*
2714 * Branch around the call if both CPU_FTR_TM and
2715 * CPU_FTR_P9_TM_HV_ASSIST are off.
2716 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002717BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002718 b 91f
2719END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002720 /*
2721 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2722 */
Simon Guo6f597c62018-05-23 15:01:48 +08002723 ld r3, HSTATE_KVM_VCPU(r13)
2724 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002725 bl kvmppc_save_tm_hv
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100272691:
Paul Mackerras93d17392016-06-22 15:52:55 +10002727#endif
2728
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002729 /*
2730 * Set DEC to the smaller of DEC and HDEC, so that we wake
2731 * no later than the end of our timeslice (HDEC interrupts
2732 * don't wake us from nap).
2733 */
2734 mfspr r3, SPRN_DEC
2735 mfspr r4, SPRN_HDEC
2736 mftb r5
Paul Mackerras1bc3fe82017-05-22 16:55:16 +10002737BEGIN_FTR_SECTION
2738 /* On P9 check whether the guest has large decrementer mode enabled */
2739 ld r6, HSTATE_KVM_VCORE(r13)
2740 ld r6, VCORE_LPCR(r6)
2741 andis. r6, r6, LPCR_LD@h
2742 bne 68f
2743END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras2f272462017-05-22 16:25:14 +10002744 extsw r3, r3
Paul Mackerras1bc3fe82017-05-22 16:55:16 +1000274568: EXTEND_HDEC(r4)
Paul Mackerras2f272462017-05-22 16:25:14 +10002746 cmpd r3, r4
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002747 ble 67f
2748 mtspr SPRN_DEC, r4
274967:
2750 /* save expiry time of guest decrementer */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002751 add r3, r3, r5
2752 ld r4, HSTATE_KVM_VCPU(r13)
2753 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002754 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002755 subf r3, r6, r3 /* convert to host TB value */
2756 std r3, VCPU_DEC_EXPIRES(r4)
2757
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002758#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2759 ld r4, HSTATE_KVM_VCPU(r13)
2760 addi r3, r4, VCPU_TB_CEDE
2761 bl kvmhv_accumulate_time
2762#endif
2763
Paul Mackerrasccc07772015-03-28 14:21:07 +11002764 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2765
Paul Mackerras19ccb762011-07-23 17:42:46 +10002766 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002767 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002768 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002769 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002770 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002771 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002772kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002773 mfspr r0, SPRN_CTRLF
2774 clrrdi r0, r0, 1
2775 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302776
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002777 li r0,1
2778 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002779 mfspr r5,SPRN_LPCR
2780 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002781BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002782 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002783 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002784END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002785
2786kvm_nap_sequence: /* desired LPCR value in r5 */
2787BEGIN_FTR_SECTION
2788 /*
2789 * PSSCR bits: exit criterion = 1 (wakeup based on LPCR at sreset)
2790 * enable state loss = 1 (allow SMT mode switch)
2791 * requested level = 0 (just stop dispatching)
2792 */
2793 lis r3, (PSSCR_EC | PSSCR_ESL)@h
2794 mtspr SPRN_PSSCR, r3
2795 /* Set LPCR_PECE_HVEE bit to enable wakeup by HV interrupts */
2796 li r4, LPCR_PECE_HVEE@higher
2797 sldi r4, r4, 32
2798 or r5, r5, r4
2799END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002800 mtspr SPRN_LPCR,r5
2801 isync
2802 li r0, 0
2803 std r0, HSTATE_SCRATCH0(r13)
2804 ptesync
2805 ld r0, HSTATE_SCRATCH0(r13)
28061: cmpd r0, r0
2807 bne 1b
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002808BEGIN_FTR_SECTION
Paul Mackerras19ccb762011-07-23 17:42:46 +10002809 nap
Paul Mackerrasbf53c882016-11-18 14:34:07 +11002810FTR_SECTION_ELSE
2811 PPC_STOP
2812ALT_FTR_SECTION_END_IFCLR(CPU_FTR_ARCH_300)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002813 b .
2814
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100281533: mr r4, r3
2816 li r3, 0
2817 li r12, 0
2818 b 34f
2819
Paul Mackerras19ccb762011-07-23 17:42:46 +10002820kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002821 /* get vcpu pointer */
2822 ld r4, HSTATE_KVM_VCPU(r13)
2823
Paul Mackerras19ccb762011-07-23 17:42:46 +10002824 /* Woken by external or decrementer interrupt */
2825 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002826
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002827#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2828 addi r3, r4, VCPU_TB_RMINTR
2829 bl kvmhv_accumulate_time
2830#endif
2831
Paul Mackerras93d17392016-06-22 15:52:55 +10002832#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002833/*
2834 * Branch around the call if both CPU_FTR_TM and
2835 * CPU_FTR_P9_TM_HV_ASSIST are off.
2836 */
Paul Mackerras93d17392016-06-22 15:52:55 +10002837BEGIN_FTR_SECTION
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11002838 b 91f
2839END_FTR_SECTION(CPU_FTR_TM | CPU_FTR_P9_TM_HV_ASSIST, 0)
Paul Mackerras67f8a8c2017-09-12 13:47:23 +10002840 /*
2841 * NOTE THAT THIS TRASHES ALL NON-VOLATILE REGISTERS INCLUDING CR
2842 */
Simon Guo6f597c62018-05-23 15:01:48 +08002843 mr r3, r4
2844 ld r4, VCPU_MSR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10002845 bl kvmppc_restore_tm_hv
Simon Guo6f597c62018-05-23 15:01:48 +08002846 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100284791:
Paul Mackerras93d17392016-06-22 15:52:55 +10002848#endif
2849
Paul Mackerras19ccb762011-07-23 17:42:46 +10002850 /* load up FP state */
2851 bl kvmppc_load_fp
2852
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002853 /* Restore guest decrementer */
2854 ld r3, VCPU_DEC_EXPIRES(r4)
2855 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10002856 ld r6, VCORE_TB_OFFSET_APPL(r5)
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002857 add r3, r3, r6 /* convert host TB to guest TB value */
2858 mftb r7
2859 subf r3, r7, r3
2860 mtspr SPRN_DEC, r3
2861
Paul Mackerras19ccb762011-07-23 17:42:46 +10002862 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002863 ld r14, VCPU_GPR(R14)(r4)
2864 ld r15, VCPU_GPR(R15)(r4)
2865 ld r16, VCPU_GPR(R16)(r4)
2866 ld r17, VCPU_GPR(R17)(r4)
2867 ld r18, VCPU_GPR(R18)(r4)
2868 ld r19, VCPU_GPR(R19)(r4)
2869 ld r20, VCPU_GPR(R20)(r4)
2870 ld r21, VCPU_GPR(R21)(r4)
2871 ld r22, VCPU_GPR(R22)(r4)
2872 ld r23, VCPU_GPR(R23)(r4)
2873 ld r24, VCPU_GPR(R24)(r4)
2874 ld r25, VCPU_GPR(R25)(r4)
2875 ld r26, VCPU_GPR(R26)(r4)
2876 ld r27, VCPU_GPR(R27)(r4)
2877 ld r28, VCPU_GPR(R28)(r4)
2878 ld r29, VCPU_GPR(R29)(r4)
2879 ld r30, VCPU_GPR(R30)(r4)
2880 ld r31, VCPU_GPR(R31)(r4)
Suresh Warrier37f55d32016-08-19 15:35:46 +10002881
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002882 /* Check the wake reason in SRR1 to see why we got here */
2883 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002884
Suresh Warrier37f55d32016-08-19 15:35:46 +10002885 /*
2886 * Restore volatile registers since we could have called a
2887 * C routine in kvmppc_check_wake_reason
2888 * r4 = VCPU
2889 * r3 tells us whether we need to return to host or not
2890 * WARNING: it gets checked further down:
2891 * should not modify r3 until this check is done.
2892 */
2893 ld r4, HSTATE_KVM_VCPU(r13)
2894
Paul Mackerras19ccb762011-07-23 17:42:46 +10002895 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100289634: ld r5,HSTATE_KVM_VCORE(r13)
2897 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002898 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002899 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002900 addi r6,r5,VCORE_NAPPING_THREADS
290132: lwarx r7,0,r6
2902 andc r7,r7,r0
2903 stwcx. r7,0,r6
2904 bne 32b
2905 li r0,0
2906 stb r0,HSTATE_NAPPING(r13)
2907
Suresh Warrier37f55d32016-08-19 15:35:46 +10002908 /* See if the wake reason saved in r3 means we need to exit */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002909 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002910 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002911 cmpdi r3, 0
2912 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002913
Paul Mackerras19ccb762011-07-23 17:42:46 +10002914 /* see if any other thread is already exiting */
2915 lwz r0,VCORE_ENTRY_EXIT(r5)
2916 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002917 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002918
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002919 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002920
2921 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002922kvm_cede_prodded:
2923 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002924 stb r0,VCPU_PRODDED(r3)
2925 sync /* order testing prodded vs. clearing ceded */
2926 stb r0,VCPU_CEDED(r3)
2927 li r3,H_SUCCESS
2928 blr
2929
2930 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002931kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002932 ld r9, HSTATE_KVM_VCPU(r13)
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002933#ifdef CONFIG_KVM_XICS
Paul Mackerras577a5112019-08-13 20:03:49 +10002934 /* are we using XIVE with single escalation? */
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002935 ld r10, VCPU_XIVE_ESC_VADDR(r9)
2936 cmpdi r10, 0
2937 beq 3f
Paul Mackerras577a5112019-08-13 20:03:49 +10002938 li r6, XIVE_ESB_SET_PQ_00
2939 /*
2940 * If we still have a pending escalation, abort the cede,
2941 * and we must set PQ to 10 rather than 00 so that we don't
2942 * potentially end up with two entries for the escalation
2943 * interrupt in the XIVE interrupt queue. In that case
2944 * we also don't want to set xive_esc_on to 1 here in
2945 * case we race with xive_esc_irq().
2946 */
2947 lbz r5, VCPU_XIVE_ESC_ON(r9)
2948 cmpwi r5, 0
2949 beq 4f
2950 li r0, 0
2951 stb r0, VCPU_CEDED(r9)
2952 li r6, XIVE_ESB_SET_PQ_10
2953 b 5f
29544: li r0, 1
2955 stb r0, VCPU_XIVE_ESC_ON(r9)
2956 /* make sure store to xive_esc_on is seen before xive_esc_irq runs */
2957 sync
29585: /* Enable XIVE escalation */
2959 mfmsr r0
2960 andi. r0, r0, MSR_DR /* in real mode? */
2961 beq 1f
2962 ldx r0, r10, r6
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002963 b 2f
29641: ld r10, VCPU_XIVE_ESC_RADDR(r9)
Paul Mackerras577a5112019-08-13 20:03:49 +10002965 ldcix r0, r10, r6
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +110029662: sync
Benjamin Herrenschmidt9b9b13a2018-01-12 13:37:16 +11002967#endif /* CONFIG_KVM_XICS */
29683: b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002969
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002970 /* Try to handle a machine check in real mode */
2971machine_check_realmode:
2972 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002973 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002974 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002975 ld r9, HSTATE_KVM_VCPU(r13)
2976 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302977 /*
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302978 * For the guest that is FWNMI capable, deliver all the MCE errors
2979 * (handled/unhandled) by exiting the guest with KVM_EXIT_NMI exit
2980 * reason. This new approach injects machine check errors in guest
2981 * address space to guest with additional information in the form
2982 * of RTAS event, thus enabling guest kernel to suitably handle
2983 * such errors.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302984 *
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302985 * For the guest that is not FWNMI capable (old QEMU) fallback
2986 * to old behaviour for backward compatibility:
2987 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest either
2988 * through machine check interrupt (set HSRR0 to 0x200).
2989 * For handled errors (no-fatal), just go back to guest execution
2990 * with current HSRR0.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302991 * if we receive machine check with MSR(RI=0) then deliver it to
2992 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302993 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302994 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002995 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2996 bne mc_cont /* if so, exit to host */
Aravinda Prasade20bbd32017-05-11 16:33:37 +05302997 /* Check if guest is capable of handling NMI exit */
2998 ld r10, VCPU_KVM(r9)
2999 lbz r10, KVM_FWNMI(r10)
3000 cmpdi r10, 1 /* FWNMI capable? */
3001 beq mc_cont /* if so, exit with KVM_EXIT_NMI. */
3002
3003 /* if not, fall through for backward compatibility. */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05303004 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
3005 beq 1f /* Deliver a machine check to guest */
3006 ld r10, VCPU_PC(r9)
3007 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05303008 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00003009 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053030101: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11003011 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053030122: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00003013
Paul Mackerrasde56a942011-06-29 00:21:34 +00003014/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003015 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11003016 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003017 * 0 if nothing needs to be done
3018 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11003019 * -1 if there was a guest wakeup (IPI or msgsnd)
Suresh Warriere3c13e52016-08-19 15:35:51 +10003020 * -2 if we handled a PCI passthrough interrupt (returned by
3021 * kvmppc_read_intr only)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003022 *
3023 * Also sets r12 to the interrupt vector for any interrupt that needs
3024 * to be handled now by the host (0x500 for external interrupt), or zero.
Suresh Warrier37f55d32016-08-19 15:35:46 +10003025 * Modifies all volatile registers (since it may call a C function).
3026 * This routine calls kvmppc_read_intr, a C function, if an external
3027 * interrupt is pending.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003028 */
3029kvmppc_check_wake_reason:
3030 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11003031BEGIN_FTR_SECTION
3032 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
3033FTR_SECTION_ELSE
3034 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
3035ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
3036 cmpwi r6, 8 /* was it an external interrupt? */
Suresh Warrier37f55d32016-08-19 15:35:46 +10003037 beq 7f /* if so, see what it was */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003038 li r3, 0
3039 li r12, 0
3040 cmpwi r6, 6 /* was it the decrementer? */
3041 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11003042BEGIN_FTR_SECTION
3043 cmpwi r6, 5 /* privileged doorbell? */
3044 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11003045 cmpwi r6, 3 /* hypervisor doorbell? */
3046 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11003047END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05303048 cmpwi r6, 0xa /* Hypervisor maintenance ? */
3049 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11003050 li r3, 1 /* anything else, return 1 */
30510: blr
3052
Paul Mackerras5d00f662014-01-08 21:25:28 +11003053 /* hypervisor doorbell */
30543: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05303055
3056 /*
3057 * Clear the doorbell as we will invoke the handler
3058 * explicitly in the guest exit path.
3059 */
3060 lis r6, (PPC_DBELL_SERVER << (63-36))@h
3061 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11003062 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11003063 li r3, 1
Nicholas Piggin2cde3712017-10-10 20:18:28 +10003064BEGIN_FTR_SECTION
3065 PPC_MSGSYNC
3066 lwsync
3067END_FTR_SECTION_IFSET(CPU_FTR_ARCH_300)
Paul Mackerras66feed62015-03-28 14:21:12 +11003068 lbz r0, HSTATE_HOST_IPI(r13)
3069 cmpwi r0, 0
3070 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05303071 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11003072 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11003073 blr
3074
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05303075 /* Woken up due to Hypervisor maintenance interrupt */
30764: li r12, BOOK3S_INTERRUPT_HMI
3077 li r3, 1
3078 blr
3079
Suresh Warrier37f55d32016-08-19 15:35:46 +10003080 /* external interrupt - create a stack frame so we can call C */
30817: mflr r0
3082 std r0, PPC_LR_STKOFF(r1)
3083 stdu r1, -PPC_MIN_STKFRM(r1)
3084 bl kvmppc_read_intr
3085 nop
3086 li r12, BOOK3S_INTERRUPT_EXTERNAL
Suresh Warrierf7af5202016-08-19 15:35:52 +10003087 cmpdi r3, 1
3088 ble 1f
3089
3090 /*
3091 * Return code of 2 means PCI passthrough interrupt, but
3092 * we need to return back to host to complete handling the
3093 * interrupt. Trap reason is expected in r12 by guest
3094 * exit code.
3095 */
3096 li r12, BOOK3S_INTERRUPT_HV_RM_HARD
30971:
Suresh Warrier37f55d32016-08-19 15:35:46 +10003098 ld r0, PPC_MIN_STKFRM+PPC_LR_STKOFF(r1)
3099 addi r1, r1, PPC_MIN_STKFRM
3100 mtlr r0
3101 blr
Paul Mackerrasde56a942011-06-29 00:21:34 +00003102
3103/*
3104 * Save away FP, VMX and VSX registers.
3105 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003106 * N.B. r30 and r31 are volatile across this function,
3107 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003108 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11003109kvmppc_save_fp:
3110 mflr r30
3111 mr r31,r3
Paul Mackerras89436332012-03-02 01:38:23 +00003112 mfmsr r5
3113 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00003114#ifdef CONFIG_ALTIVEC
3115BEGIN_FTR_SECTION
3116 oris r8,r8,MSR_VEC@h
3117END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3118#endif
3119#ifdef CONFIG_VSX
3120BEGIN_FTR_SECTION
3121 oris r8,r8,MSR_VSX@h
3122END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3123#endif
3124 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003125 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003126 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003127#ifdef CONFIG_ALTIVEC
3128BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003129 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003130 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003131END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3132#endif
3133 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11003134 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11003135 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00003136 blr
3137
3138/*
3139 * Load up FP, VMX and VSX registers
3140 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11003141 * N.B. r30 and r31 are volatile across this function,
3142 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00003143 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00003144kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11003145 mflr r30
3146 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00003147 mfmsr r9
3148 ori r8,r9,MSR_FP
3149#ifdef CONFIG_ALTIVEC
3150BEGIN_FTR_SECTION
3151 oris r8,r8,MSR_VEC@h
3152END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3153#endif
3154#ifdef CONFIG_VSX
3155BEGIN_FTR_SECTION
3156 oris r8,r8,MSR_VSX@h
3157END_FTR_SECTION_IFSET(CPU_FTR_VSX)
3158#endif
3159 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11003160 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003161 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003162#ifdef CONFIG_ALTIVEC
3163BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11003164 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02003165 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00003166END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
3167#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11003168 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00003169 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11003170 mtlr r30
3171 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00003172 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10003173
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003174#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
3175/*
3176 * Save transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003177 * Called with r3 pointing to the vcpu struct and r4 containing
3178 * the guest MSR value.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003179 * This can modify all checkpointed registers, but
Simon Guo6f597c62018-05-23 15:01:48 +08003180 * restores r1 and r2 before exit.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003181 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003182kvmppc_save_tm_hv:
3183 /* See if we need to handle fake suspend mode */
3184BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08003185 b __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003186END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3187
3188 lbz r0, HSTATE_FAKE_SUSPEND(r13) /* Were we fake suspended? */
3189 cmpwi r0, 0
Simon Guocaa3be92018-05-23 15:01:50 +08003190 beq __kvmppc_save_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003191
3192 /* The following code handles the fake_suspend = 1 case */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003193 mflr r0
3194 std r0, PPC_LR_STKOFF(r1)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003195 stdu r1, -PPC_MIN_STKFRM(r1)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003196
3197 /* Turn on TM. */
3198 mfmsr r8
3199 li r0, 1
3200 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
3201 mtmsrd r8
3202
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003203 rldicl. r8, r8, 64 - MSR_TS_S_LG, 62 /* Did we actually hrfid? */
3204 beq 4f
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003205BEGIN_FTR_SECTION
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003206 bl pnv_power9_force_smt4_catch
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003207END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_XER_SO_BUG)
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003208 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003209
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003210 std r1, HSTATE_HOST_R1(r13)
3211
3212 /* Clear the MSR RI since r1, r13 may be foobar. */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003213 li r5, 0
3214 mtmsrd r5, 1
3215
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003216 /* We have to treclaim here because that's the only way to do S->N */
3217 li r3, TM_CAUSE_KVM_RESCHED
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003218 TRECLAIM(R3)
3219
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003220 /*
3221 * We were in fake suspend, so we are not going to save the
3222 * register state as the guest checkpointed state (since
3223 * we already have it), therefore we can now use any volatile GPR.
3224 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003225 /* Reload PACA pointer, stack pointer and TOC. */
3226 GET_PACA(r13)
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003227 ld r1, HSTATE_HOST_R1(r13)
3228 ld r2, PACATOC(r13)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003229
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003230 /* Set MSR RI now we have r1 and r13 back. */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003231 li r5, MSR_RI
3232 mtmsrd r5, 1
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003233
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003234 HMT_MEDIUM
3235 ld r6, HSTATE_DSCR(r13)
3236 mtspr SPRN_DSCR, r6
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003237BEGIN_FTR_SECTION_NESTED(96)
3238 bl pnv_power9_force_smt4_release
3239END_FTR_SECTION_NESTED(CPU_FTR_P9_TM_XER_SO_BUG, CPU_FTR_P9_TM_XER_SO_BUG, 96)
3240 nop
3241
32424:
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003243 mfspr r3, SPRN_PSSCR
3244 /* PSSCR_FAKE_SUSPEND is a write-only bit, but clear it anyway */
3245 li r0, PSSCR_FAKE_SUSPEND
3246 andc r3, r3, r0
3247 mtspr SPRN_PSSCR, r3
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003248
Paul Mackerras681c6172018-03-21 21:32:03 +11003249 /* Don't save TEXASR, use value from last exit in real suspend state */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003250 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003251 mfspr r5, SPRN_TFHAR
3252 mfspr r6, SPRN_TFIAR
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003253 std r5, VCPU_TFHAR(r9)
3254 std r6, VCPU_TFIAR(r9)
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003255
Suraj Jitindar Singh87a11bb2018-03-21 21:32:02 +11003256 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003257 ld r0, PPC_LR_STKOFF(r1)
3258 mtlr r0
3259 blr
3260
3261/*
3262 * Restore transactional state and TM-related registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003263 * Called with r3 pointing to the vcpu struct
3264 * and r4 containing the guest MSR value.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003265 * This potentially modifies all checkpointed registers.
Simon Guo6f597c62018-05-23 15:01:48 +08003266 * It restores r1 and r2 from the PACA.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003267 */
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003268kvmppc_restore_tm_hv:
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003269 /*
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003270 * If we are doing TM emulation for the guest on a POWER9 DD2,
3271 * then we don't actually do a trechkpt -- we either set up
3272 * fake-suspend mode, or emulate a TM rollback.
3273 */
3274BEGIN_FTR_SECTION
Simon Guocaa3be92018-05-23 15:01:50 +08003275 b __kvmppc_restore_tm
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003276END_FTR_SECTION_IFCLR(CPU_FTR_P9_TM_HV_ASSIST)
3277 mflr r0
3278 std r0, PPC_LR_STKOFF(r1)
3279
3280 li r0, 0
3281 stb r0, HSTATE_FAKE_SUSPEND(r13)
3282
3283 /* Turn on TM so we can restore TM SPRs */
3284 mfmsr r5
3285 li r0, 1
3286 rldimi r5, r0, MSR_TM_LG, 63-MSR_TM_LG
3287 mtmsrd r5
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003288
3289 /*
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003290 * The user may change these outside of a transaction, so they must
3291 * always be context switched.
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003292 */
Simon Guo6f597c62018-05-23 15:01:48 +08003293 ld r5, VCPU_TFHAR(r3)
3294 ld r6, VCPU_TFIAR(r3)
3295 ld r7, VCPU_TEXASR(r3)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003296 mtspr SPRN_TFHAR, r5
3297 mtspr SPRN_TFIAR, r6
3298 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003299
Simon Guo6f597c62018-05-23 15:01:48 +08003300 rldicl. r5, r4, 64 - MSR_TS_S_LG, 62
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003301 beqlr /* TM not active in guest */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003302
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003303 /* Make sure the failure summary is set */
3304 oris r7, r7, (TEXASR_FS)@h
3305 mtspr SPRN_TEXASR, r7
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003306
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003307 cmpwi r5, 1 /* check for suspended state */
3308 bgt 10f
3309 stb r5, HSTATE_FAKE_SUSPEND(r13)
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003310 b 9f /* and return */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +1100331110: stdu r1, -PPC_MIN_STKFRM(r1)
3312 /* guest is in transactional state, so simulate rollback */
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003313 bl kvmhv_emulate_tm_rollback
3314 nop
Paul Mackerras4bb3c7a2018-03-21 21:32:01 +11003315 addi r1, r1, PPC_MIN_STKFRM
Paul Mackerras7b0e8272018-05-30 20:07:52 +100033169: ld r0, PPC_LR_STKOFF(r1)
3317 mtlr r0
3318 blr
Paul Mackerras7b0e8272018-05-30 20:07:52 +10003319#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */
Paul Mackerrasf024ee02016-06-22 14:21:59 +10003320
Paul Mackerras44a3add2013-10-04 21:45:04 +10003321/*
3322 * We come here if we get any exception or interrupt while we are
3323 * executing host real mode code while in guest MMU context.
Paul Mackerras857b99e2017-09-01 16:17:27 +10003324 * r12 is (CR << 32) | vector
3325 * r13 points to our PACA
3326 * r12 is saved in HSTATE_SCRATCH0(r13)
3327 * ctr is saved in HSTATE_SCRATCH1(r13) if RELOCATABLE
3328 * r9 is saved in HSTATE_SCRATCH2(r13)
3329 * r13 is saved in HSPRG1
3330 * cfar is saved in HSTATE_CFAR(r13)
3331 * ppr is saved in HSTATE_PPR(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10003332 */
3333kvmppc_bad_host_intr:
Paul Mackerras857b99e2017-09-01 16:17:27 +10003334 /*
3335 * Switch to the emergency stack, but start half-way down in
3336 * case we were already on it.
3337 */
3338 mr r9, r1
3339 std r1, PACAR1(r13)
3340 ld r1, PACAEMERGSP(r13)
3341 subi r1, r1, THREAD_SIZE/2 + INT_FRAME_SIZE
3342 std r9, 0(r1)
3343 std r0, GPR0(r1)
3344 std r9, GPR1(r1)
3345 std r2, GPR2(r1)
3346 SAVE_4GPRS(3, r1)
3347 SAVE_2GPRS(7, r1)
3348 srdi r0, r12, 32
3349 clrldi r12, r12, 32
3350 std r0, _CCR(r1)
3351 std r12, _TRAP(r1)
3352 andi. r0, r12, 2
3353 beq 1f
3354 mfspr r3, SPRN_HSRR0
3355 mfspr r4, SPRN_HSRR1
3356 mfspr r5, SPRN_HDAR
3357 mfspr r6, SPRN_HDSISR
3358 b 2f
33591: mfspr r3, SPRN_SRR0
3360 mfspr r4, SPRN_SRR1
3361 mfspr r5, SPRN_DAR
3362 mfspr r6, SPRN_DSISR
33632: std r3, _NIP(r1)
3364 std r4, _MSR(r1)
3365 std r5, _DAR(r1)
3366 std r6, _DSISR(r1)
3367 ld r9, HSTATE_SCRATCH2(r13)
3368 ld r12, HSTATE_SCRATCH0(r13)
3369 GET_SCRATCH0(r0)
3370 SAVE_4GPRS(9, r1)
3371 std r0, GPR13(r1)
3372 SAVE_NVGPRS(r1)
3373 ld r5, HSTATE_CFAR(r13)
3374 std r5, ORIG_GPR3(r1)
3375 mflr r3
3376#ifdef CONFIG_RELOCATABLE
3377 ld r4, HSTATE_SCRATCH1(r13)
3378#else
3379 mfctr r4
3380#endif
3381 mfxer r5
Madhavan Srinivasan4e26bc42017-12-20 09:25:50 +05303382 lbz r6, PACAIRQSOFTMASK(r13)
Paul Mackerras857b99e2017-09-01 16:17:27 +10003383 std r3, _LINK(r1)
3384 std r4, _CTR(r1)
3385 std r5, _XER(r1)
3386 std r6, SOFTE(r1)
3387 ld r2, PACATOC(r13)
3388 LOAD_REG_IMMEDIATE(3, 0x7265677368657265)
3389 std r3, STACK_FRAME_OVERHEAD-16(r1)
3390
3391 /*
3392 * On POWER9 do a minimal restore of the MMU and call C code,
3393 * which will print a message and panic.
3394 * XXX On POWER7 and POWER8, we just spin here since we don't
3395 * know what the other threads are doing (and we don't want to
3396 * coordinate with them) - but at least we now have register state
3397 * in memory that we might be able to look at from another CPU.
3398 */
3399BEGIN_FTR_SECTION
Paul Mackerras44a3add2013-10-04 21:45:04 +10003400 b .
Paul Mackerras857b99e2017-09-01 16:17:27 +10003401END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_300)
3402 ld r9, HSTATE_KVM_VCPU(r13)
3403 ld r10, VCPU_KVM(r9)
3404
3405 li r0, 0
3406 mtspr SPRN_AMR, r0
3407 mtspr SPRN_IAMR, r0
3408 mtspr SPRN_CIABR, r0
3409 mtspr SPRN_DAWRX, r0
3410
Paul Mackerras857b99e2017-09-01 16:17:27 +10003411BEGIN_MMU_FTR_SECTION
3412 b 4f
3413END_MMU_FTR_SECTION_IFSET(MMU_FTR_TYPE_RADIX)
3414
3415 slbmte r0, r0
3416 slbia
3417 ptesync
3418 ld r8, PACA_SLBSHADOWPTR(r13)
3419 .rept SLB_NUM_BOLTED
3420 li r3, SLBSHADOW_SAVEAREA
3421 LDX_BE r5, r8, r3
3422 addi r3, r3, 8
3423 LDX_BE r6, r8, r3
3424 andis. r7, r5, SLB_ESID_V@h
3425 beq 3f
3426 slbmte r6, r5
34273: addi r8, r8, 16
3428 .endr
3429
34304: lwz r7, KVM_HOST_LPID(r10)
3431 mtspr SPRN_LPID, r7
3432 mtspr SPRN_PID, r0
3433 ld r8, KVM_HOST_LPCR(r10)
3434 mtspr SPRN_LPCR, r8
3435 isync
3436 li r0, KVM_GUEST_MODE_NONE
3437 stb r0, HSTATE_IN_GUEST(r13)
3438
3439 /*
3440 * Turn on the MMU and jump to C code
3441 */
3442 bcl 20, 31, .+4
34435: mflr r3
3444 addi r3, r3, 9f - 5b
Nicholas Piggineadce3b2018-05-18 03:49:43 +10003445 li r4, -1
3446 rldimi r3, r4, 62, 0 /* ensure 0xc000000000000000 bits are set */
Paul Mackerras857b99e2017-09-01 16:17:27 +10003447 ld r4, PACAKMSR(r13)
3448 mtspr SPRN_SRR0, r3
3449 mtspr SPRN_SRR1, r4
Nicholas Piggin222f20f2018-01-10 03:07:15 +11003450 RFI_TO_KERNEL
Paul Mackerras857b99e2017-09-01 16:17:27 +100034519: addi r3, r1, STACK_FRAME_OVERHEAD
3452 bl kvmppc_bad_interrupt
3453 b 9b
Michael Neulinge4e38122014-03-25 10:47:02 +11003454
3455/*
3456 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
3457 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
3458 * r11 has the guest MSR value (in/out)
3459 * r9 has a vcpu pointer (in)
3460 * r0 is used as a scratch register
3461 */
3462kvmppc_msr_interrupt:
3463 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
3464 cmpwi r0, 2 /* Check if we are in transactional state.. */
3465 ld r11, VCPU_INTR_MSR(r9)
3466 bne 1f
3467 /* ... if transactional, change to suspended */
3468 li r0, 1
34691: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
3470 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10003471
3472/*
3473 * This works around a hardware bug on POWER8E processors, where
3474 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
3475 * performance monitor interrupt. Instead, when we need to have
3476 * an interrupt pending, we have to arrange for a counter to overflow.
3477 */
3478kvmppc_fix_pmao:
3479 li r3, 0
3480 mtspr SPRN_MMCR2, r3
3481 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
3482 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
3483 mtspr SPRN_MMCR0, r3
3484 lis r3, 0x7fff
3485 ori r3, r3, 0xffff
3486 mtspr SPRN_PMC6, r3
3487 isync
3488 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003489
3490#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
3491/*
3492 * Start timing an activity
3493 * r3 = pointer to time accumulation struct, r4 = vcpu
3494 */
3495kvmhv_start_timing:
3496 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003497 ld r6, VCORE_TB_OFFSET_APPL(r5)
3498 mftb r5
3499 subf r5, r6, r5 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003500 std r3, VCPU_CUR_ACTIVITY(r4)
3501 std r5, VCPU_ACTIVITY_START(r4)
3502 blr
3503
3504/*
3505 * Accumulate time to one activity and start another.
3506 * r3 = pointer to new time accumulation struct, r4 = vcpu
3507 */
3508kvmhv_accumulate_time:
3509 ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003510 ld r8, VCORE_TB_OFFSET_APPL(r5)
3511 ld r5, VCPU_CUR_ACTIVITY(r4)
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003512 ld r6, VCPU_ACTIVITY_START(r4)
3513 std r3, VCPU_CUR_ACTIVITY(r4)
3514 mftb r7
Paul Mackerras57b8daa2018-04-20 22:51:11 +10003515 subf r7, r8, r7 /* subtract current timebase offset */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11003516 std r7, VCPU_ACTIVITY_START(r4)
3517 cmpdi r5, 0
3518 beqlr
3519 subf r3, r6, r7
3520 ld r8, TAS_SEQCOUNT(r5)
3521 cmpdi r8, 0
3522 addi r8, r8, 1
3523 std r8, TAS_SEQCOUNT(r5)
3524 lwsync
3525 ld r7, TAS_TOTAL(r5)
3526 add r7, r7, r3
3527 std r7, TAS_TOTAL(r5)
3528 ld r6, TAS_MIN(r5)
3529 ld r7, TAS_MAX(r5)
3530 beq 3f
3531 cmpd r3, r6
3532 bge 1f
35333: std r3, TAS_MIN(r5)
35341: cmpd r3, r7
3535 ble 2f
3536 std r3, TAS_MAX(r5)
35372: lwsync
3538 addi r8, r8, 1
3539 std r8, TAS_SEQCOUNT(r5)
3540 blr
3541#endif