blob: 0d246fca157a96a613b636deb02f8381b71ab090 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Aneesh Kumar K.Vf64e8082016-03-01 12:59:20 +053030#include <asm/book3s/64/mmu-hash.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +053032#include <asm/opal.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110033
34#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000035
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110036/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
Paul Mackerrasde56a942011-06-29 00:21:34 +000040/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100041 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000042 * Must be called with interrupts hard-disabled.
43 *
44 * Input Registers:
45 *
46 * LR = return address to continue at after eventually re-enabling MMU
47 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100048_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100049 mflr r0
50 std r0, PPC_LR_STKOFF(r1)
51 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000052 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100053 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000054 li r0,MSR_RI
55 andc r0,r10,r0
56 li r6,MSR_IR | MSR_DR
57 andc r6,r10,r6
58 mtmsrd r0,1 /* clear RI in MSR */
59 mtsrr0 r5
60 mtsrr1 r6
61 RFI
62
Paul Mackerras218309b2013-09-06 13:23:44 +100063kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110064 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100065 bl kvmppc_hv_entry
66
67 /* Back from guest - restore host state and return to caller */
68
Michael Neulingeee7ff92014-01-08 21:25:19 +110069BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100070 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
72 li r6,7
73 mtspr SPRN_DABR,r5
74 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110075END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100076
77 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050078 ld r3,PACA_SPRG_VDSO(r13)
79 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100080
Paul Mackerras218309b2013-09-06 13:23:44 +100081 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
84 cmpwi r4, 0
85 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +100086BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100087 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +100088 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
89 cmpwi r4, MMCR0_PMAO
90 beql kvmppc_fix_pmao
91END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100092 lwz r3, HSTATE_PMC1(r13)
93 lwz r4, HSTATE_PMC2(r13)
94 lwz r5, HSTATE_PMC3(r13)
95 lwz r6, HSTATE_PMC4(r13)
96 lwz r8, HSTATE_PMC5(r13)
97 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100098 mtspr SPRN_PMC1, r3
99 mtspr SPRN_PMC2, r4
100 mtspr SPRN_PMC3, r5
101 mtspr SPRN_PMC4, r6
102 mtspr SPRN_PMC5, r8
103 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000104 ld r3, HSTATE_MMCR0(r13)
105 ld r4, HSTATE_MMCR1(r13)
106 ld r5, HSTATE_MMCRA(r13)
107 ld r6, HSTATE_SIAR(r13)
108 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000109 mtspr SPRN_MMCR1, r4
110 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100111 mtspr SPRN_SIAR, r6
112 mtspr SPRN_SDAR, r7
113BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000114 ld r8, HSTATE_MMCR2(r13)
115 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100116 mtspr SPRN_MMCR2, r8
117 mtspr SPRN_SIER, r9
118END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000119 mtspr SPRN_MMCR0, r3
120 isync
12123:
122
123 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100124 * Reload DEC. HDEC interrupts were disabled when
125 * we reloaded the host's LPCR value.
126 */
127 ld r3, HSTATE_DECEXP(r13)
128 mftb r4
129 subf r4, r4, r3
130 mtspr SPRN_DEC, r4
131
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000132 /* hwthread_req may have got set by cede or no vcpu, so clear it */
133 li r0, 0
134 stb r0, HSTATE_HWTHREAD_REQ(r13)
135
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100136 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000137 * For external and machine check interrupts, we need
138 * to call the Linux handler to process the interrupt.
139 * We do that by jumping to absolute address 0x500 for
140 * external interrupts, or the machine_check_fwnmi label
141 * for machine checks (since firmware might have patched
142 * the vector area at 0x200). The [h]rfid at the end of the
143 * handler will return to the book3s_hv_interrupts.S code.
144 * For other interrupts we do the rfid to get back
145 * to the book3s_hv_interrupts.S code here.
146 */
147 ld r8, 112+PPC_LR_STKOFF(r1)
148 addi r1, r1, 112
149 ld r7, HSTATE_HOST_MSR(r13)
150
151 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
152 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000153 beq 11f
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +0530154 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
155 beq 15f /* Invoke the H_DOORBELL handler */
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530156 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
157 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000158
159 /* RFI into the highmem handler, or branch to interrupt handler */
160 mfmsr r6
161 li r0, MSR_RI
162 andc r6, r6, r0
163 mtmsrd r6, 1 /* Clear RI in MSR */
164 mtsrr0 r8
165 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000166 beq cr1, 13f /* machine check */
167 RFI
168
169 /* On POWER7, we have external interrupts set to use HSRR0/1 */
17011: mtspr SPRN_HSRR0, r8
171 mtspr SPRN_HSRR1, r7
172 ba 0x500
173
17413: b machine_check_fwnmi
175
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053017614: mtspr SPRN_HSRR0, r8
177 mtspr SPRN_HSRR1, r7
178 b hmi_exception_after_realmode
179
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +053018015: mtspr SPRN_HSRR0, r8
181 mtspr SPRN_HSRR1, r7
182 ba 0xe80
183
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100184kvmppc_primary_no_guest:
185 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100186 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
187 mfspr r3, SPRN_HDEC
188 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100189 /*
190 * Make sure the primary has finished the MMU switch.
191 * We should never get here on a secondary thread, but
192 * check it for robustness' sake.
193 */
194 ld r5, HSTATE_KVM_VCORE(r13)
19565: lbz r0, VCORE_IN_GUEST(r5)
196 cmpwi r0, 0
197 beq 65b
198 /* Set LPCR. */
199 ld r8,VCORE_LPCR(r5)
200 mtspr SPRN_LPCR,r8
201 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100202 /* set our bit in napping_threads */
203 ld r5, HSTATE_KVM_VCORE(r13)
204 lbz r7, HSTATE_PTID(r13)
205 li r0, 1
206 sld r0, r0, r7
207 addi r6, r5, VCORE_NAPPING_THREADS
2081: lwarx r3, 0, r6
209 or r3, r3, r0
210 stwcx. r3, 0, r6
211 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100212 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100213 isync
214 li r12, 0
215 lwz r7, VCORE_ENTRY_EXIT(r5)
216 cmpwi r7, 0x100
217 bge kvm_novcpu_exit /* another thread already exiting */
218 li r3, NAPPING_NOVCPU
219 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100220
Paul Mackerrasccc07772015-03-28 14:21:07 +1100221 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100222 b kvm_do_nap
223
224kvm_novcpu_wakeup:
225 ld r1, HSTATE_HOST_R1(r13)
226 ld r5, HSTATE_KVM_VCORE(r13)
227 li r0, 0
228 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100229
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100230 /* check the wake reason */
231 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100232
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100233 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100234 lwz r0, VCORE_ENTRY_EXIT(r5)
235 cmpwi r0, 0x100
236 bge kvm_novcpu_exit
237
238 /* clear our bit in napping_threads */
239 lbz r7, HSTATE_PTID(r13)
240 li r0, 1
241 sld r0, r0, r7
242 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002434: lwarx r7, 0, r6
244 andc r7, r7, r0
245 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100246 bne 4b
247
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100248 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100249 cmpdi r3, 0
250 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100251
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100252 /* See if our timeslice has expired (HDEC is negative) */
253 mfspr r0, SPRN_HDEC
254 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
255 cmpwi r0, 0
256 blt kvm_novcpu_exit
257
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100258 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
259 ld r4, HSTATE_KVM_VCPU(r13)
260 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100261 beq kvmppc_primary_no_guest
262
263#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
264 addi r3, r4, VCPU_TB_RMENTRY
265 bl kvmhv_start_timing
266#endif
267 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100268
269kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100270#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
271 ld r4, HSTATE_KVM_VCPU(r13)
272 cmpdi r4, 0
273 beq 13f
274 addi r3, r4, VCPU_TB_RMEXIT
275 bl kvmhv_accumulate_time
276#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110027713: mr r3, r12
278 stw r12, 112-4(r1)
279 bl kvmhv_commence_exit
280 nop
281 lwz r12, 112-4(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100282 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100283
Paul Mackerras371fefd2011-06-29 00:23:08 +0000284/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100285 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000286 * Relocation is off and most register values are lost.
287 * r13 points to the PACA.
288 */
289 .globl kvm_start_guest
290kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530291
292 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100293 mfspr r0, SPRN_CTRLF
294 ori r0, r0, 1
295 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530296
Paul Mackerras19ccb762011-07-23 17:42:46 +1000297 ld r2,PACATOC(r13)
298
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000299 li r0,KVM_HWTHREAD_IN_KVM
300 stb r0,HSTATE_HWTHREAD_STATE(r13)
301
302 /* NV GPR values from power7_idle() will no longer be valid */
303 li r0,1
304 stb r0,PACA_NAPSTATELOST(r13)
305
Paul Mackerras4619ac82013-04-17 20:31:41 +0000306 /* were we napping due to cede? */
307 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100308 cmpwi r0,NAPPING_CEDE
309 beq kvm_end_cede
310 cmpwi r0,NAPPING_NOVCPU
311 beq kvm_novcpu_wakeup
312
313 ld r1,PACAEMERGSP(r13)
314 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000315
316 /*
317 * We weren't napping due to cede, so this must be a secondary
318 * thread being woken up to run a guest, or being woken up due
319 * to a stray IPI. (Or due to some machine check or hypervisor
320 * maintenance interrupt while the core is in KVM.)
321 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000322
323 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100324 bl kvmppc_check_wake_reason
325 cmpdi r3, 0
326 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000327
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000328 /* get vcore pointer, NULL if we have nothing to run */
329 ld r5,HSTATE_KVM_VCORE(r13)
330 cmpdi r5,0
331 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000332 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000333
Paul Mackerras56548fc2014-12-03 14:48:40 +1100334kvm_secondary_got_guest:
335
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100336 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530337 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100338 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000339
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000340 /* On thread 0 of a subcore, set HDEC to max */
341 lbz r4, HSTATE_PTID(r13)
342 cmpwi r4, 0
343 bne 63f
344 lis r6, 0x7fff
345 ori r6, r6, 0xffff
346 mtspr SPRN_HDEC, r6
347 /* and set per-LPAR registers, if doing dynamic micro-threading */
348 ld r6, HSTATE_SPLIT_MODE(r13)
349 cmpdi r6, 0
350 beq 63f
351 ld r0, KVM_SPLIT_RPR(r6)
352 mtspr SPRN_RPR, r0
353 ld r0, KVM_SPLIT_PMMAR(r6)
354 mtspr SPRN_PMMAR, r0
355 ld r0, KVM_SPLIT_LDBAR(r6)
356 mtspr SPRN_LDBAR, r0
357 isync
35863:
359 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100360 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000361 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100362 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000363
364 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000365 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000366 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000367 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100368 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000369 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100370 * kvmppc_run_core() is going to assume that all our vcpu
371 * state is visible in memory. This lwsync makes sure
372 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100373 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000374 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000375 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000376
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530377 /*
378 * All secondaries exiting guest will fall through this path.
379 * Before proceeding, just check for HMI interrupt and
380 * invoke opal hmi handler. By now we are sure that the
381 * primary thread on this core/subcore has already made partition
382 * switch/TB resync and we are good to call opal hmi handler.
383 */
384 cmpwi r12, BOOK3S_INTERRUPT_HMI
385 bne kvm_no_guest
386
387 li r3,0 /* NULL argument */
388 bl hmi_exception_realmode
Paul Mackerras56548fc2014-12-03 14:48:40 +1100389/*
390 * At this point we have finished executing in the guest.
391 * We need to wait for hwthread_req to become zero, since
392 * we may not turn on the MMU while hwthread_req is non-zero.
393 * While waiting we also need to check if we get given a vcpu to run.
394 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000395kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100396 lbz r3, HSTATE_HWTHREAD_REQ(r13)
397 cmpwi r3, 0
398 bne 53f
399 HMT_MEDIUM
400 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000401 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100402 /* need to recheck hwthread_req after a barrier, to avoid race */
403 sync
404 lbz r3, HSTATE_HWTHREAD_REQ(r13)
405 cmpwi r3, 0
406 bne 54f
407/*
408 * We jump to power7_wakeup_loss, which will return to the caller
409 * of power7_nap in the powernv cpu offline loop. The value we
410 * put in r3 becomes the return value for power7_nap.
411 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000412 li r3, LPCR_PECE0
413 mfspr r4, SPRN_LPCR
414 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
415 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100416 li r3, 0
417 b power7_wakeup_loss
418
41953: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000420 ld r5, HSTATE_KVM_VCORE(r13)
421 cmpdi r5, 0
422 bne 60f
423 ld r3, HSTATE_SPLIT_MODE(r13)
424 cmpdi r3, 0
425 beq kvm_no_guest
426 lbz r0, KVM_SPLIT_DO_NAP(r3)
427 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100428 beq kvm_no_guest
429 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000430 b kvm_unsplit_nap
43160: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100432 b kvm_secondary_got_guest
433
43454: li r0, KVM_HWTHREAD_IN_KVM
435 stb r0, HSTATE_HWTHREAD_STATE(r13)
436 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000437
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000438/*
439 * Here the primary thread is trying to return the core to
440 * whole-core mode, so we need to nap.
441 */
442kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530443 /*
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530444 * When secondaries are napping in kvm_unsplit_nap() with
445 * hwthread_req = 1, HMI goes ignored even though subcores are
446 * already exited the guest. Hence HMI keeps waking up secondaries
447 * from nap in a loop and secondaries always go back to nap since
448 * no vcore is assigned to them. This makes impossible for primary
449 * thread to get hold of secondary threads resulting into a soft
450 * lockup in KVM path.
451 *
452 * Let us check if HMI is pending and handle it before we go to nap.
453 */
454 cmpwi r12, BOOK3S_INTERRUPT_HMI
455 bne 55f
456 li r3, 0 /* NULL argument */
457 bl hmi_exception_realmode
45855:
459 /*
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530460 * Ensure that secondary doesn't nap when it has
461 * its vcore pointer set.
462 */
463 sync /* matches smp_mb() before setting split_info.do_nap */
464 ld r0, HSTATE_KVM_VCORE(r13)
465 cmpdi r0, 0
466 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000467 /* clear any pending message */
468BEGIN_FTR_SECTION
469 lis r6, (PPC_DBELL_SERVER << (63-36))@h
470 PPC_MSGCLR(6)
471END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
472 /* Set kvm_split_mode.napped[tid] = 1 */
473 ld r3, HSTATE_SPLIT_MODE(r13)
474 li r0, 1
475 lhz r4, PACAPACAINDEX(r13)
476 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
477 addi r4, r4, KVM_SPLIT_NAPPED
478 stbx r0, r3, r4
479 /* Check the do_nap flag again after setting napped[] */
480 sync
481 lbz r0, KVM_SPLIT_DO_NAP(r3)
482 cmpwi r0, 0
483 beq 57f
484 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
485 mfspr r4, SPRN_LPCR
486 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
487 mtspr SPRN_LPCR, r4
488 isync
489 std r0, HSTATE_SCRATCH0(r13)
490 ptesync
491 ld r0, HSTATE_SCRATCH0(r13)
4921: cmpd r0, r0
493 bne 1b
494 nap
495 b .
496
49757: li r0, 0
498 stbx r0, r3, r4
499 b kvm_no_guest
500
Paul Mackerras218309b2013-09-06 13:23:44 +1000501/******************************************************************************
502 * *
503 * Entry code *
504 * *
505 *****************************************************************************/
506
Paul Mackerrasde56a942011-06-29 00:21:34 +0000507.global kvmppc_hv_entry
508kvmppc_hv_entry:
509
510 /* Required state:
511 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100512 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000513 * MSR = ~IR|DR
514 * R13 = PACA
515 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000516 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000517 * all other volatile GPRS = free
518 */
519 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000520 std r0, PPC_LR_STKOFF(r1)
521 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000522
Paul Mackerrasde56a942011-06-29 00:21:34 +0000523 /* Save R1 in the PACA */
524 std r1, HSTATE_HOST_R1(r13)
525
Paul Mackerras44a3add2013-10-04 21:45:04 +1000526 li r6, KVM_GUEST_MODE_HOST_HV
527 stb r6, HSTATE_IN_GUEST(r13)
528
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100529#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
530 /* Store initial timestamp */
531 cmpdi r4, 0
532 beq 1f
533 addi r3, r4, VCPU_TB_RMENTRY
534 bl kvmhv_start_timing
5351:
536#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +0000537 /* Clear out SLB */
538 li r6,0
539 slbmte r6,r6
540 slbia
541 ptesync
542
Paul Mackerras9e368f22011-06-29 00:40:08 +0000543 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100544 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000545 * We don't have to lock against concurrent tlbies,
546 * but we do have to coordinate across hardware threads.
547 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100548 /* Set bit in entry map iff exit map is zero. */
549 ld r5, HSTATE_KVM_VCORE(r13)
550 li r7, 1
551 lbz r6, HSTATE_PTID(r13)
552 sld r7, r7, r6
553 addi r9, r5, VCORE_ENTRY_EXIT
55421: lwarx r3, 0, r9
555 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000556 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100557 or r3, r3, r7
558 stwcx. r3, 0, r9
Paul Mackerras371fefd2011-06-29 00:23:08 +0000559 bne 21b
560
561 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100562 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000563 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100564 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000565 ld r6,KVM_SDR1(r9)
566 lwz r7,KVM_LPID(r9)
567 li r0,LPID_RSVD /* switch to reserved LPID */
568 mtspr SPRN_LPID,r0
569 ptesync
570 mtspr SPRN_SDR1,r6 /* switch to partition page table */
571 mtspr SPRN_LPID,r7
572 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000573
574 /* See if we need to flush the TLB */
575 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
576 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
577 srdi r6,r6,6 /* doubleword number */
578 sldi r6,r6,3 /* address offset */
579 add r6,r6,r9
580 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000581 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000582 sld r0,r0,r7
583 ld r7,0(r6)
584 and. r7,r7,r0
585 beq 22f
58623: ldarx r7,0,r6 /* if set, clear the bit */
587 andc r7,r7,r0
588 stdcx. r7,0,r6
589 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100590 /* Flush the TLB of any entries for this LPID */
591 /* use arch 2.07S as a proxy for POWER8 */
592BEGIN_FTR_SECTION
593 li r6,512 /* POWER8 has 512 sets */
594FTR_SECTION_ELSE
595 li r6,128 /* POWER7 has 128 sets */
596ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000597 mtctr r6
598 li r7,0x800 /* IS field = 0b10 */
599 ptesync
60028: tlbiel r7
601 addi r7,r7,0x1000
602 bdnz 28b
603 ptesync
604
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000605 /* Add timebase offset onto timebase */
60622: ld r8,VCORE_TB_OFFSET(r5)
607 cmpdi r8,0
608 beq 37f
609 mftb r6 /* current host timebase */
610 add r8,r8,r6
611 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
612 mftb r7 /* check if lower 24 bits overflowed */
613 clrldi r6,r6,40
614 clrldi r7,r7,40
615 cmpld r7,r6
616 bge 37f
617 addis r8,r8,0x100 /* if so, increment upper 40 bits */
618 mtspr SPRN_TBU40,r8
619
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000620 /* Load guest PCR value to select appropriate compat mode */
62137: ld r7, VCORE_PCR(r5)
622 cmpdi r7, 0
623 beq 38f
624 mtspr SPRN_PCR, r7
62538:
Michael Neulingb005255e2014-01-08 21:25:21 +1100626
627BEGIN_FTR_SECTION
628 /* DPDES is shared between threads */
629 ld r8, VCORE_DPDES(r5)
630 mtspr SPRN_DPDES, r8
631END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
632
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530633 /* Mark the subcore state as inside guest */
634 bl kvmppc_subcore_enter_guest
635 nop
636 ld r5, HSTATE_KVM_VCORE(r13)
637 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000638 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000639 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000640
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100641 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110064210: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100643 beq kvmppc_primary_no_guest
644kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000645
646 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100647 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000648 cmpwi r5,0
649 beq 9f
650 mtctr r5
651 addi r6,r4,VCPU_SLB
6521: ld r8,VCPU_SLB_E(r6)
653 ld r9,VCPU_SLB_V(r6)
654 slbmte r9,r8
655 addi r6,r6,VCPU_SLB_SIZE
656 bdnz 1b
6579:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100658 /* Increment yield count if they have a VPA */
659 ld r3, VCPU_VPA(r4)
660 cmpdi r3, 0
661 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200662 li r6, LPPACA_YIELDCOUNT
663 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100664 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200665 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100666 li r6, 1
667 stb r6, VCPU_VPA_DIRTY(r4)
66825:
669
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100670 /* Save purr/spurr */
671 mfspr r5,SPRN_PURR
672 mfspr r6,SPRN_SPURR
673 std r5,HSTATE_PURR(r13)
674 std r6,HSTATE_SPURR(r13)
675 ld r7,VCPU_PURR(r4)
676 ld r8,VCPU_SPURR(r4)
677 mtspr SPRN_PURR,r7
678 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100679
Michael Neulingeee7ff92014-01-08 21:25:19 +1100680BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000681 /* Set partition DABR */
682 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100683 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000684 ld r6,VCPU_DABR(r4)
685 mtspr SPRN_DABRX,r5
686 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000687 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100688END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000689
Michael Neulinge4e38122014-03-25 10:47:02 +1100690#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
691BEGIN_FTR_SECTION
692 b skip_tm
693END_FTR_SECTION_IFCLR(CPU_FTR_TM)
694
695 /* Turn on TM/FP/VSX/VMX so we can restore them. */
696 mfmsr r5
697 li r6, MSR_TM >> 32
698 sldi r6, r6, 32
699 or r5, r5, r6
700 ori r5, r5, MSR_FP
701 oris r5, r5, (MSR_VEC | MSR_VSX)@h
702 mtmsrd r5
703
704 /*
705 * The user may change these outside of a transaction, so they must
706 * always be context switched.
707 */
708 ld r5, VCPU_TFHAR(r4)
709 ld r6, VCPU_TFIAR(r4)
710 ld r7, VCPU_TEXASR(r4)
711 mtspr SPRN_TFHAR, r5
712 mtspr SPRN_TFIAR, r6
713 mtspr SPRN_TEXASR, r7
714
715 ld r5, VCPU_MSR(r4)
716 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
717 beq skip_tm /* TM not active in guest */
718
719 /* Make sure the failure summary is set, otherwise we'll program check
720 * when we trechkpt. It's possible that this might have been not set
721 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
722 * host.
723 */
724 oris r7, r7, (TEXASR_FS)@h
725 mtspr SPRN_TEXASR, r7
726
727 /*
728 * We need to load up the checkpointed state for the guest.
729 * We need to do this early as it will blow away any GPRs, VSRs and
730 * some SPRs.
731 */
732
733 mr r31, r4
734 addi r3, r31, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200735 bl load_fp_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100736 addi r3, r31, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200737 bl load_vr_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100738 mr r4, r31
739 lwz r7, VCPU_VRSAVE_TM(r4)
740 mtspr SPRN_VRSAVE, r7
741
742 ld r5, VCPU_LR_TM(r4)
743 lwz r6, VCPU_CR_TM(r4)
744 ld r7, VCPU_CTR_TM(r4)
745 ld r8, VCPU_AMR_TM(r4)
746 ld r9, VCPU_TAR_TM(r4)
747 mtlr r5
748 mtcr r6
749 mtctr r7
750 mtspr SPRN_AMR, r8
751 mtspr SPRN_TAR, r9
752
753 /*
754 * Load up PPR and DSCR values but don't put them in the actual SPRs
755 * till the last moment to avoid running with userspace PPR and DSCR for
756 * too long.
757 */
758 ld r29, VCPU_DSCR_TM(r4)
759 ld r30, VCPU_PPR_TM(r4)
760
761 std r2, PACATMSCRATCH(r13) /* Save TOC */
762
763 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
764 li r5, 0
765 mtmsrd r5, 1
766
767 /* Load GPRs r0-r28 */
768 reg = 0
769 .rept 29
770 ld reg, VCPU_GPRS_TM(reg)(r31)
771 reg = reg + 1
772 .endr
773
774 mtspr SPRN_DSCR, r29
775 mtspr SPRN_PPR, r30
776
777 /* Load final GPRs */
778 ld 29, VCPU_GPRS_TM(29)(r31)
779 ld 30, VCPU_GPRS_TM(30)(r31)
780 ld 31, VCPU_GPRS_TM(31)(r31)
781
782 /* TM checkpointed state is now setup. All GPRs are now volatile. */
783 TRECHKPT
784
785 /* Now let's get back the state we need. */
786 HMT_MEDIUM
787 GET_PACA(r13)
788 ld r29, HSTATE_DSCR(r13)
789 mtspr SPRN_DSCR, r29
790 ld r4, HSTATE_KVM_VCPU(r13)
791 ld r1, HSTATE_HOST_R1(r13)
792 ld r2, PACATMSCRATCH(r13)
793
794 /* Set the MSR RI since we have our registers back. */
795 li r5, MSR_RI
796 mtmsrd r5, 1
797skip_tm:
798#endif
799
Paul Mackerrasde56a942011-06-29 00:21:34 +0000800 /* Load guest PMU registers */
801 /* R4 is live here (vcpu pointer) */
802 li r3, 1
803 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
804 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
805 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000806BEGIN_FTR_SECTION
807 ld r3, VCPU_MMCR(r4)
808 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
809 cmpwi r5, MMCR0_PMAO
810 beql kvmppc_fix_pmao
811END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000812 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
813 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
814 lwz r6, VCPU_PMC + 8(r4)
815 lwz r7, VCPU_PMC + 12(r4)
816 lwz r8, VCPU_PMC + 16(r4)
817 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000818 mtspr SPRN_PMC1, r3
819 mtspr SPRN_PMC2, r5
820 mtspr SPRN_PMC3, r6
821 mtspr SPRN_PMC4, r7
822 mtspr SPRN_PMC5, r8
823 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000824 ld r3, VCPU_MMCR(r4)
825 ld r5, VCPU_MMCR + 8(r4)
826 ld r6, VCPU_MMCR + 16(r4)
827 ld r7, VCPU_SIAR(r4)
828 ld r8, VCPU_SDAR(r4)
829 mtspr SPRN_MMCR1, r5
830 mtspr SPRN_MMCRA, r6
831 mtspr SPRN_SIAR, r7
832 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100833BEGIN_FTR_SECTION
834 ld r5, VCPU_MMCR + 24(r4)
835 ld r6, VCPU_SIER(r4)
836 lwz r7, VCPU_PMC + 24(r4)
837 lwz r8, VCPU_PMC + 28(r4)
838 ld r9, VCPU_MMCR + 32(r4)
839 mtspr SPRN_MMCR2, r5
840 mtspr SPRN_SIER, r6
841 mtspr SPRN_SPMC1, r7
842 mtspr SPRN_SPMC2, r8
843 mtspr SPRN_MMCRS, r9
844END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000845 mtspr SPRN_MMCR0, r3
846 isync
847
848 /* Load up FP, VMX and VSX registers */
849 bl kvmppc_load_fp
850
851 ld r14, VCPU_GPR(R14)(r4)
852 ld r15, VCPU_GPR(R15)(r4)
853 ld r16, VCPU_GPR(R16)(r4)
854 ld r17, VCPU_GPR(R17)(r4)
855 ld r18, VCPU_GPR(R18)(r4)
856 ld r19, VCPU_GPR(R19)(r4)
857 ld r20, VCPU_GPR(R20)(r4)
858 ld r21, VCPU_GPR(R21)(r4)
859 ld r22, VCPU_GPR(R22)(r4)
860 ld r23, VCPU_GPR(R23)(r4)
861 ld r24, VCPU_GPR(R24)(r4)
862 ld r25, VCPU_GPR(R25)(r4)
863 ld r26, VCPU_GPR(R26)(r4)
864 ld r27, VCPU_GPR(R27)(r4)
865 ld r28, VCPU_GPR(R28)(r4)
866 ld r29, VCPU_GPR(R29)(r4)
867 ld r30, VCPU_GPR(R30)(r4)
868 ld r31, VCPU_GPR(R31)(r4)
869
Paul Mackerrasde56a942011-06-29 00:21:34 +0000870 /* Switch DSCR to guest value */
871 ld r5, VCPU_DSCR(r4)
872 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000873
Michael Neulingb005255e2014-01-08 21:25:21 +1100874BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100875 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100876 b 8f
877END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
878 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
879 mfmsr r8
880 li r0, 1
881 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
882 mtmsrd r8
883
884 /* Load up POWER8-specific registers */
885 ld r5, VCPU_IAMR(r4)
886 lwz r6, VCPU_PSPB(r4)
887 ld r7, VCPU_FSCR(r4)
888 mtspr SPRN_IAMR, r5
889 mtspr SPRN_PSPB, r6
890 mtspr SPRN_FSCR, r7
891 ld r5, VCPU_DAWR(r4)
892 ld r6, VCPU_DAWRX(r4)
893 ld r7, VCPU_CIABR(r4)
894 ld r8, VCPU_TAR(r4)
895 mtspr SPRN_DAWR, r5
896 mtspr SPRN_DAWRX, r6
897 mtspr SPRN_CIABR, r7
898 mtspr SPRN_TAR, r8
899 ld r5, VCPU_IC(r4)
900 ld r6, VCPU_VTB(r4)
901 mtspr SPRN_IC, r5
902 mtspr SPRN_VTB, r6
Michael Neuling7b490412014-01-08 21:25:32 +1100903 ld r8, VCPU_EBBHR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100904 mtspr SPRN_EBBHR, r8
905 ld r5, VCPU_EBBRR(r4)
906 ld r6, VCPU_BESCR(r4)
907 ld r7, VCPU_CSIGR(r4)
908 ld r8, VCPU_TACR(r4)
909 mtspr SPRN_EBBRR, r5
910 mtspr SPRN_BESCR, r6
911 mtspr SPRN_CSIGR, r7
912 mtspr SPRN_TACR, r8
913 ld r5, VCPU_TCSCR(r4)
914 ld r6, VCPU_ACOP(r4)
915 lwz r7, VCPU_GUEST_PID(r4)
916 ld r8, VCPU_WORT(r4)
917 mtspr SPRN_TCSCR, r5
918 mtspr SPRN_ACOP, r6
919 mtspr SPRN_PID, r7
920 mtspr SPRN_WORT, r8
9218:
922
Paul Mackerrasde56a942011-06-29 00:21:34 +0000923 /*
924 * Set the decrementer to the guest decrementer.
925 */
926 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100927 /* r8 is a host timebase value here, convert to guest TB */
928 ld r5,HSTATE_KVM_VCORE(r13)
929 ld r6,VCORE_TB_OFFSET(r5)
930 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000931 mftb r7
932 subf r3,r7,r8
933 mtspr SPRN_DEC,r3
934 stw r3,VCPU_DEC(r4)
935
936 ld r5, VCPU_SPRG0(r4)
937 ld r6, VCPU_SPRG1(r4)
938 ld r7, VCPU_SPRG2(r4)
939 ld r8, VCPU_SPRG3(r4)
940 mtspr SPRN_SPRG0, r5
941 mtspr SPRN_SPRG1, r6
942 mtspr SPRN_SPRG2, r7
943 mtspr SPRN_SPRG3, r8
944
Paul Mackerrasde56a942011-06-29 00:21:34 +0000945 /* Load up DAR and DSISR */
946 ld r5, VCPU_DAR(r4)
947 lwz r6, VCPU_DSISR(r4)
948 mtspr SPRN_DAR, r5
949 mtspr SPRN_DSISR, r6
950
Paul Mackerrasde56a942011-06-29 00:21:34 +0000951 /* Restore AMR and UAMOR, set AMOR to all 1s */
952 ld r5,VCPU_AMR(r4)
953 ld r6,VCPU_UAMOR(r4)
954 li r7,-1
955 mtspr SPRN_AMR,r5
956 mtspr SPRN_UAMOR,r6
957 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000958
959 /* Restore state of CTRL run bit; assume 1 on entry */
960 lwz r5,VCPU_CTRL(r4)
961 andi. r5,r5,1
962 bne 4f
963 mfspr r6,SPRN_CTRLF
964 clrrdi r6,r6,1
965 mtspr SPRN_CTRLT,r6
9664:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100967 /* Secondary threads wait for primary to have done partition switch */
968 ld r5, HSTATE_KVM_VCORE(r13)
969 lbz r6, HSTATE_PTID(r13)
970 cmpwi r6, 0
971 beq 21f
972 lbz r0, VCORE_IN_GUEST(r5)
973 cmpwi r0, 0
974 bne 21f
975 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100097620: lwz r3, VCORE_ENTRY_EXIT(r5)
977 cmpwi r3, 0x100
978 bge no_switch_exit
979 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100980 cmpwi r0, 0
981 beq 20b
982 HMT_MEDIUM
98321:
984 /* Set LPCR. */
985 ld r8,VCORE_LPCR(r5)
986 mtspr SPRN_LPCR,r8
987 isync
988
989 /* Check if HDEC expires soon */
990 mfspr r3, SPRN_HDEC
991 cmpwi r3, 512 /* 1 microsecond */
992 blt hdec_soon
993
Paul Mackerrasde56a942011-06-29 00:21:34 +0000994 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +1000995 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000996
997 mtctr r6
998 mtxer r7
999
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001000kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001001 ld r10, VCPU_PC(r4)
1002 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001003 ld r6, VCPU_SRR0(r4)
1004 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001005 mtspr SPRN_SRR0, r6
1006 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001007
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001008deliver_guest_interrupt:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001009 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001010 rldicl r11, r11, 63 - MSR_HV_LG, 1
1011 rotldi r11, r11, 1 + MSR_HV_LG
1012 ori r11, r11, MSR_ME
1013
Paul Mackerras19ccb762011-07-23 17:42:46 +10001014 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001015 ld r0, VCPU_PENDING_EXC(r4)
1016 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1017 cmpdi cr1, r0, 0
1018 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001019 mfspr r8, SPRN_LPCR
1020 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1021 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1022 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +10001023 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001024 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001025 li r0, BOOK3S_INTERRUPT_EXTERNAL
1026 bne cr1, 12f
1027 mfspr r0, SPRN_DEC
1028 cmpwi r0, 0
1029 li r0, BOOK3S_INTERRUPT_DECREMENTER
1030 bge 5f
1031
103212: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +10001033 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001034 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +11001035 mr r9, r4
1036 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +110010375:
Paul Mackerras19ccb762011-07-23 17:42:46 +10001038
Liu Ping Fan27025a62013-11-19 14:12:48 +08001039/*
1040 * Required state:
1041 * R4 = vcpu
1042 * R10: value for HSRR0
1043 * R11: value for HSRR1
1044 * R13 = PACA
1045 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001046fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001047 li r0,0
1048 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001049 mtspr SPRN_HSRR0,r10
1050 mtspr SPRN_HSRR1,r11
1051
1052 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001053 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001054 stb r9, HSTATE_IN_GUEST(r13)
1055
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001056#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1057 /* Accumulate timing */
1058 addi r3, r4, VCPU_TB_GUEST
1059 bl kvmhv_accumulate_time
1060#endif
1061
Paul Mackerrasde56a942011-06-29 00:21:34 +00001062 /* Enter guest */
1063
Paul Mackerras0acb9112013-02-04 18:10:51 +00001064BEGIN_FTR_SECTION
1065 ld r5, VCPU_CFAR(r4)
1066 mtspr SPRN_CFAR, r5
1067END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001068BEGIN_FTR_SECTION
1069 ld r0, VCPU_PPR(r4)
1070END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001071
Paul Mackerrasde56a942011-06-29 00:21:34 +00001072 ld r5, VCPU_LR(r4)
1073 lwz r6, VCPU_CR(r4)
1074 mtlr r5
1075 mtcr r6
1076
Michael Neulingc75df6f2012-06-25 13:33:10 +00001077 ld r1, VCPU_GPR(R1)(r4)
1078 ld r2, VCPU_GPR(R2)(r4)
1079 ld r3, VCPU_GPR(R3)(r4)
1080 ld r5, VCPU_GPR(R5)(r4)
1081 ld r6, VCPU_GPR(R6)(r4)
1082 ld r7, VCPU_GPR(R7)(r4)
1083 ld r8, VCPU_GPR(R8)(r4)
1084 ld r9, VCPU_GPR(R9)(r4)
1085 ld r10, VCPU_GPR(R10)(r4)
1086 ld r11, VCPU_GPR(R11)(r4)
1087 ld r12, VCPU_GPR(R12)(r4)
1088 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001089
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001090BEGIN_FTR_SECTION
1091 mtspr SPRN_PPR, r0
1092END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1093 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001094 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001095
1096 hrfid
1097 b .
1098
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001099secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001100 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001101 cmpdi r4, 0
1102 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001103 stw r12, VCPU_TRAP(r4)
1104#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001105 addi r3, r4, VCPU_TB_RMEXIT
1106 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001107#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100110811: b kvmhv_switch_to_host
1109
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001110no_switch_exit:
1111 HMT_MEDIUM
1112 li r12, 0
1113 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001114hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001115 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000111612: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001117 mr r9, r4
1118#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001119 addi r3, r4, VCPU_TB_RMEXIT
1120 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001121#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001122 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001123
Paul Mackerrasde56a942011-06-29 00:21:34 +00001124/******************************************************************************
1125 * *
1126 * Exit code *
1127 * *
1128 *****************************************************************************/
1129
1130/*
1131 * We come here from the first-level interrupt handlers.
1132 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301133 .globl kvmppc_interrupt_hv
1134kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001135 /*
1136 * Register contents:
1137 * R12 = interrupt vector
1138 * R13 = PACA
1139 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1140 * guest R13 saved in SPRN_SCRATCH0
1141 */
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301142 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001143
1144 lbz r9, HSTATE_IN_GUEST(r13)
1145 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1146 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301147#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1148 cmpwi r9, KVM_GUEST_MODE_GUEST
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301149 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301150 beq kvmppc_interrupt_pr
1151#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001152 /* We're now back in the host but in guest MMU context */
1153 li r9, KVM_GUEST_MODE_HOST_HV
1154 stb r9, HSTATE_IN_GUEST(r13)
1155
Paul Mackerrasde56a942011-06-29 00:21:34 +00001156 ld r9, HSTATE_KVM_VCPU(r13)
1157
1158 /* Save registers */
1159
Michael Neulingc75df6f2012-06-25 13:33:10 +00001160 std r0, VCPU_GPR(R0)(r9)
1161 std r1, VCPU_GPR(R1)(r9)
1162 std r2, VCPU_GPR(R2)(r9)
1163 std r3, VCPU_GPR(R3)(r9)
1164 std r4, VCPU_GPR(R4)(r9)
1165 std r5, VCPU_GPR(R5)(r9)
1166 std r6, VCPU_GPR(R6)(r9)
1167 std r7, VCPU_GPR(R7)(r9)
1168 std r8, VCPU_GPR(R8)(r9)
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301169 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001170 std r0, VCPU_GPR(R9)(r9)
1171 std r10, VCPU_GPR(R10)(r9)
1172 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001173 ld r3, HSTATE_SCRATCH0(r13)
1174 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001175 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001176 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001177BEGIN_FTR_SECTION
1178 ld r3, HSTATE_CFAR(r13)
1179 std r3, VCPU_CFAR(r9)
1180END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001181BEGIN_FTR_SECTION
1182 ld r4, HSTATE_PPR(r13)
1183 std r4, VCPU_PPR(r9)
1184END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001185
1186 /* Restore R1/R2 so we can handle faults */
1187 ld r1, HSTATE_HOST_R1(r13)
1188 ld r2, PACATOC(r13)
1189
1190 mfspr r10, SPRN_SRR0
1191 mfspr r11, SPRN_SRR1
1192 std r10, VCPU_SRR0(r9)
1193 std r11, VCPU_SRR1(r9)
1194 andi. r0, r12, 2 /* need to read HSRR0/1? */
1195 beq 1f
1196 mfspr r10, SPRN_HSRR0
1197 mfspr r11, SPRN_HSRR1
1198 clrrdi r12, r12, 2
11991: std r10, VCPU_PC(r9)
1200 std r11, VCPU_MSR(r9)
1201
1202 GET_SCRATCH0(r3)
1203 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001204 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001205 std r4, VCPU_LR(r9)
1206
Paul Mackerrasde56a942011-06-29 00:21:34 +00001207 stw r12,VCPU_TRAP(r9)
1208
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001209#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1210 addi r3, r9, VCPU_TB_RMINTR
1211 mr r4, r9
1212 bl kvmhv_accumulate_time
1213 ld r5, VCPU_GPR(R5)(r9)
1214 ld r6, VCPU_GPR(R6)(r9)
1215 ld r7, VCPU_GPR(R7)(r9)
1216 ld r8, VCPU_GPR(R8)(r9)
1217#endif
1218
Paul Mackerras4a157d62014-12-03 13:30:39 +11001219 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001220 if this is an HEI (HV emulation interrupt, e40) */
1221 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001222 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001223 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1224 bne 11f
1225 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100122611: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001227
1228 /* these are volatile across C function calls */
1229 mfctr r3
1230 mfxer r4
1231 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001232 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001233
Paul Mackerras697d3892011-12-12 12:36:37 +00001234 /* If this is a page table miss then see if it's theirs or ours */
1235 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1236 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001237 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1238 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001239
Paul Mackerrasde56a942011-06-29 00:21:34 +00001240 /* See if this is a leftover HDEC interrupt */
1241 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1242 bne 2f
1243 mfspr r3,SPRN_HDEC
1244 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001245 mr r4,r9
1246 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000012472:
Paul Mackerras697d3892011-12-12 12:36:37 +00001248 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001249 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1250 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001251
Paul Mackerras66feed62015-03-28 14:21:12 +11001252 /* Hypervisor doorbell - exit only if host IPI flag set */
1253 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1254 bne 3f
1255 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301256 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001257 beq 4f
1258 b guest_exit_cont
12593:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001260 /* External interrupt ? */
1261 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001262 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001263
1264 /* External interrupt, first check for host_ipi. If this is
1265 * set, we know the host wants us out so let's do it now
1266 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001267 bl kvmppc_read_intr
1268 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001269 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001270
Paul Mackerras4619ac82013-04-17 20:31:41 +00001271 /* Check if any CPU is heading out to the host, if so head out too */
Paul Mackerras66feed62015-03-28 14:21:12 +110012724: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001273 lwz r0, VCORE_ENTRY_EXIT(r5)
1274 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001275 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001276 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001277
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001278guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001279 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001280 mfdar r6
1281 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001282 std r6, VCPU_DAR(r9)
1283 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001284 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001285 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001286 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001287 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001288 stw r7, VCPU_FAULT_DSISR(r9)
1289
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001290 /* See if it is a machine check */
1291 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1292 beq machine_check_realmode
1293mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001294#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1295 addi r3, r9, VCPU_TB_RMEXIT
1296 mr r4, r9
1297 bl kvmhv_accumulate_time
1298#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001299
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301300 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001301 /* Increment exit count, poke other threads to exit */
1302 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001303 nop
1304 ld r9, HSTATE_KVM_VCPU(r13)
1305 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001306
Paul Mackerrasec257162015-06-24 21:18:03 +10001307 /* Stop others sending VCPU interrupts to this physical CPU */
1308 li r0, -1
1309 stw r0, VCPU_CPU(r9)
1310 stw r0, VCPU_THREAD_CPU(r9)
1311
Paul Mackerrasde56a942011-06-29 00:21:34 +00001312 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001313 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001314 stw r6,VCPU_CTRL(r9)
1315 andi. r0,r6,1
1316 bne 4f
1317 ori r6,r6,1
1318 mtspr SPRN_CTRLT,r6
13194:
1320 /* Read the guest SLB and save it away */
1321 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1322 mtctr r0
1323 li r6,0
1324 addi r7,r9,VCPU_SLB
1325 li r5,0
13261: slbmfee r8,r6
1327 andis. r0,r8,SLB_ESID_V@h
1328 beq 2f
1329 add r8,r8,r6 /* put index in */
1330 slbmfev r3,r6
1331 std r8,VCPU_SLB_E(r7)
1332 std r3,VCPU_SLB_V(r7)
1333 addi r7,r7,VCPU_SLB_SIZE
1334 addi r5,r5,1
13352: addi r6,r6,1
1336 bdnz 1b
1337 stw r5,VCPU_SLB_MAX(r9)
1338
1339 /*
1340 * Save the guest PURR/SPURR
1341 */
1342 mfspr r5,SPRN_PURR
1343 mfspr r6,SPRN_SPURR
1344 ld r7,VCPU_PURR(r9)
1345 ld r8,VCPU_SPURR(r9)
1346 std r5,VCPU_PURR(r9)
1347 std r6,VCPU_SPURR(r9)
1348 subf r5,r7,r5
1349 subf r6,r8,r6
1350
1351 /*
1352 * Restore host PURR/SPURR and add guest times
1353 * so that the time in the guest gets accounted.
1354 */
1355 ld r3,HSTATE_PURR(r13)
1356 ld r4,HSTATE_SPURR(r13)
1357 add r3,r3,r5
1358 add r4,r4,r6
1359 mtspr SPRN_PURR,r3
1360 mtspr SPRN_SPURR,r4
1361
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001362 /* Save DEC */
1363 mfspr r5,SPRN_DEC
1364 mftb r6
1365 extsw r5,r5
1366 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001367 /* r5 is a guest timebase value here, convert to host TB */
1368 ld r3,HSTATE_KVM_VCORE(r13)
1369 ld r4,VCORE_TB_OFFSET(r3)
1370 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001371 std r5,VCPU_DEC_EXPIRES(r9)
1372
Michael Neulingb005255e2014-01-08 21:25:21 +11001373BEGIN_FTR_SECTION
1374 b 8f
1375END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001376 /* Save POWER8-specific registers */
1377 mfspr r5, SPRN_IAMR
1378 mfspr r6, SPRN_PSPB
1379 mfspr r7, SPRN_FSCR
1380 std r5, VCPU_IAMR(r9)
1381 stw r6, VCPU_PSPB(r9)
1382 std r7, VCPU_FSCR(r9)
1383 mfspr r5, SPRN_IC
1384 mfspr r6, SPRN_VTB
1385 mfspr r7, SPRN_TAR
1386 std r5, VCPU_IC(r9)
1387 std r6, VCPU_VTB(r9)
1388 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001389 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001390 std r8, VCPU_EBBHR(r9)
1391 mfspr r5, SPRN_EBBRR
1392 mfspr r6, SPRN_BESCR
1393 mfspr r7, SPRN_CSIGR
1394 mfspr r8, SPRN_TACR
1395 std r5, VCPU_EBBRR(r9)
1396 std r6, VCPU_BESCR(r9)
1397 std r7, VCPU_CSIGR(r9)
1398 std r8, VCPU_TACR(r9)
1399 mfspr r5, SPRN_TCSCR
1400 mfspr r6, SPRN_ACOP
1401 mfspr r7, SPRN_PID
1402 mfspr r8, SPRN_WORT
1403 std r5, VCPU_TCSCR(r9)
1404 std r6, VCPU_ACOP(r9)
1405 stw r7, VCPU_GUEST_PID(r9)
1406 std r8, VCPU_WORT(r9)
Paul Mackerrasccec4452016-03-05 19:34:39 +11001407 /*
1408 * Restore various registers to 0, where non-zero values
1409 * set by the guest could disrupt the host.
1410 */
1411 li r0, 0
1412 mtspr SPRN_IAMR, r0
1413 mtspr SPRN_CIABR, r0
1414 mtspr SPRN_DAWRX, r0
1415 mtspr SPRN_TCSCR, r0
1416 mtspr SPRN_WORT, r0
1417 /* Set MMCRS to 1<<31 to freeze and disable the SPMC counters */
1418 li r0, 1
1419 sldi r0, r0, 31
1420 mtspr SPRN_MMCRS, r0
Michael Neulingb005255e2014-01-08 21:25:21 +110014218:
1422
Paul Mackerrasde56a942011-06-29 00:21:34 +00001423 /* Save and reset AMR and UAMOR before turning on the MMU */
1424 mfspr r5,SPRN_AMR
1425 mfspr r6,SPRN_UAMOR
1426 std r5,VCPU_AMR(r9)
1427 std r6,VCPU_UAMOR(r9)
1428 li r6,0
1429 mtspr SPRN_AMR,r6
1430
Paul Mackerrasde56a942011-06-29 00:21:34 +00001431 /* Switch DSCR back to host value */
1432 mfspr r8, SPRN_DSCR
1433 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001434 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001435 mtspr SPRN_DSCR, r7
1436
1437 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001438 std r14, VCPU_GPR(R14)(r9)
1439 std r15, VCPU_GPR(R15)(r9)
1440 std r16, VCPU_GPR(R16)(r9)
1441 std r17, VCPU_GPR(R17)(r9)
1442 std r18, VCPU_GPR(R18)(r9)
1443 std r19, VCPU_GPR(R19)(r9)
1444 std r20, VCPU_GPR(R20)(r9)
1445 std r21, VCPU_GPR(R21)(r9)
1446 std r22, VCPU_GPR(R22)(r9)
1447 std r23, VCPU_GPR(R23)(r9)
1448 std r24, VCPU_GPR(R24)(r9)
1449 std r25, VCPU_GPR(R25)(r9)
1450 std r26, VCPU_GPR(R26)(r9)
1451 std r27, VCPU_GPR(R27)(r9)
1452 std r28, VCPU_GPR(R28)(r9)
1453 std r29, VCPU_GPR(R29)(r9)
1454 std r30, VCPU_GPR(R30)(r9)
1455 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001456
1457 /* Save SPRGs */
1458 mfspr r3, SPRN_SPRG0
1459 mfspr r4, SPRN_SPRG1
1460 mfspr r5, SPRN_SPRG2
1461 mfspr r6, SPRN_SPRG3
1462 std r3, VCPU_SPRG0(r9)
1463 std r4, VCPU_SPRG1(r9)
1464 std r5, VCPU_SPRG2(r9)
1465 std r6, VCPU_SPRG3(r9)
1466
Paul Mackerras89436332012-03-02 01:38:23 +00001467 /* save FP state */
1468 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001469 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001470
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001471#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1472BEGIN_FTR_SECTION
1473 b 2f
1474END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1475 /* Turn on TM. */
1476 mfmsr r8
1477 li r0, 1
1478 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1479 mtmsrd r8
1480
1481 ld r5, VCPU_MSR(r9)
1482 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1483 beq 1f /* TM not active in guest. */
1484
1485 li r3, TM_CAUSE_KVM_RESCHED
1486
1487 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1488 li r5, 0
1489 mtmsrd r5, 1
1490
1491 /* All GPRs are volatile at this point. */
1492 TRECLAIM(R3)
1493
1494 /* Temporarily store r13 and r9 so we have some regs to play with */
1495 SET_SCRATCH0(r13)
1496 GET_PACA(r13)
1497 std r9, PACATMSCRATCH(r13)
1498 ld r9, HSTATE_KVM_VCPU(r13)
1499
1500 /* Get a few more GPRs free. */
1501 std r29, VCPU_GPRS_TM(29)(r9)
1502 std r30, VCPU_GPRS_TM(30)(r9)
1503 std r31, VCPU_GPRS_TM(31)(r9)
1504
1505 /* Save away PPR and DSCR soon so don't run with user values. */
1506 mfspr r31, SPRN_PPR
1507 HMT_MEDIUM
1508 mfspr r30, SPRN_DSCR
1509 ld r29, HSTATE_DSCR(r13)
1510 mtspr SPRN_DSCR, r29
1511
1512 /* Save all but r9, r13 & r29-r31 */
1513 reg = 0
1514 .rept 29
1515 .if (reg != 9) && (reg != 13)
1516 std reg, VCPU_GPRS_TM(reg)(r9)
1517 .endif
1518 reg = reg + 1
1519 .endr
1520 /* ... now save r13 */
1521 GET_SCRATCH0(r4)
1522 std r4, VCPU_GPRS_TM(13)(r9)
1523 /* ... and save r9 */
1524 ld r4, PACATMSCRATCH(r13)
1525 std r4, VCPU_GPRS_TM(9)(r9)
1526
1527 /* Reload stack pointer and TOC. */
1528 ld r1, HSTATE_HOST_R1(r13)
1529 ld r2, PACATOC(r13)
1530
1531 /* Set MSR RI now we have r1 and r13 back. */
1532 li r5, MSR_RI
1533 mtmsrd r5, 1
1534
1535 /* Save away checkpinted SPRs. */
1536 std r31, VCPU_PPR_TM(r9)
1537 std r30, VCPU_DSCR_TM(r9)
1538 mflr r5
1539 mfcr r6
1540 mfctr r7
1541 mfspr r8, SPRN_AMR
1542 mfspr r10, SPRN_TAR
1543 std r5, VCPU_LR_TM(r9)
1544 stw r6, VCPU_CR_TM(r9)
1545 std r7, VCPU_CTR_TM(r9)
1546 std r8, VCPU_AMR_TM(r9)
1547 std r10, VCPU_TAR_TM(r9)
1548
1549 /* Restore r12 as trap number. */
1550 lwz r12, VCPU_TRAP(r9)
1551
1552 /* Save FP/VSX. */
1553 addi r3, r9, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001554 bl store_fp_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001555 addi r3, r9, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001556 bl store_vr_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001557 mfspr r6, SPRN_VRSAVE
1558 stw r6, VCPU_VRSAVE_TM(r9)
15591:
1560 /*
1561 * We need to save these SPRs after the treclaim so that the software
1562 * error code is recorded correctly in the TEXASR. Also the user may
1563 * change these outside of a transaction, so they must always be
1564 * context switched.
1565 */
1566 mfspr r5, SPRN_TFHAR
1567 mfspr r6, SPRN_TFIAR
1568 mfspr r7, SPRN_TEXASR
1569 std r5, VCPU_TFHAR(r9)
1570 std r6, VCPU_TFIAR(r9)
1571 std r7, VCPU_TEXASR(r9)
15722:
1573#endif
1574
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001575 /* Increment yield count if they have a VPA */
1576 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1577 cmpdi r8, 0
1578 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001579 li r4, LPPACA_YIELDCOUNT
1580 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001581 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001582 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001583 li r3, 1
1584 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000158525:
1586 /* Save PMU registers if requested */
1587 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001588BEGIN_FTR_SECTION
1589 /*
1590 * POWER8 seems to have a hardware bug where setting
1591 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1592 * when some counters are already negative doesn't seem
1593 * to cause a performance monitor alert (and hence interrupt).
1594 * The effect of this is that when saving the PMU state,
1595 * if there is no PMU alert pending when we read MMCR0
1596 * before freezing the counters, but one becomes pending
1597 * before we read the counters, we lose it.
1598 * To work around this, we need a way to freeze the counters
1599 * before reading MMCR0. Normally, freezing the counters
1600 * is done by writing MMCR0 (to set MMCR0[FC]) which
1601 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1602 * we can also freeze the counters using MMCR2, by writing
1603 * 1s to all the counter freeze condition bits (there are
1604 * 9 bits each for 6 counters).
1605 */
1606 li r3, -1 /* set all freeze bits */
1607 clrrdi r3, r3, 10
1608 mfspr r10, SPRN_MMCR2
1609 mtspr SPRN_MMCR2, r3
1610 isync
1611END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001612 li r3, 1
1613 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1614 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1615 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001616 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001617 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001618 li r7, 0
1619 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001620 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001621 beq 21f /* if no VPA, save PMU stuff anyway */
1622 lbz r7, LPPACA_PMCINUSE(r8)
1623 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1624 bne 21f
1625 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1626 b 22f
162721: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001628 mfspr r7, SPRN_SIAR
1629 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001630 std r4, VCPU_MMCR(r9)
1631 std r5, VCPU_MMCR + 8(r9)
1632 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001633BEGIN_FTR_SECTION
1634 std r10, VCPU_MMCR + 24(r9)
1635END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001636 std r7, VCPU_SIAR(r9)
1637 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001638 mfspr r3, SPRN_PMC1
1639 mfspr r4, SPRN_PMC2
1640 mfspr r5, SPRN_PMC3
1641 mfspr r6, SPRN_PMC4
1642 mfspr r7, SPRN_PMC5
1643 mfspr r8, SPRN_PMC6
1644 stw r3, VCPU_PMC(r9)
1645 stw r4, VCPU_PMC + 4(r9)
1646 stw r5, VCPU_PMC + 8(r9)
1647 stw r6, VCPU_PMC + 12(r9)
1648 stw r7, VCPU_PMC + 16(r9)
1649 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001650BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001651 mfspr r5, SPRN_SIER
1652 mfspr r6, SPRN_SPMC1
1653 mfspr r7, SPRN_SPMC2
1654 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001655 std r5, VCPU_SIER(r9)
1656 stw r6, VCPU_PMC + 24(r9)
1657 stw r7, VCPU_PMC + 28(r9)
1658 std r8, VCPU_MMCR + 32(r9)
1659 lis r4, 0x8000
1660 mtspr SPRN_MMCRS, r4
1661END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000166222:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001663 /* Clear out SLB */
1664 li r5,0
1665 slbmte r5,r5
1666 slbia
1667 ptesync
1668
Paul Mackerrasde56a942011-06-29 00:21:34 +00001669 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001670 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001671 * We don't have to lock against tlbies but we do
1672 * have to coordinate the hardware threads.
1673 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001674kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001675 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001676 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001677 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1678 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001679 cmpwi r3,0
1680 beq 15f
1681 HMT_LOW
168213: lbz r3,VCORE_IN_GUEST(r5)
1683 cmpwi r3,0
1684 bne 13b
1685 HMT_MEDIUM
1686 b 16f
1687
1688 /* Primary thread waits for all the secondaries to exit guest */
168915: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001690 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001691 clrldi r3,r3,56
1692 cmpw r3,r0
1693 bne 15b
1694 isync
1695
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001696 /* Did we actually switch to the guest at all? */
1697 lbz r6, VCORE_IN_GUEST(r5)
1698 cmpwi r6, 0
1699 beq 19f
1700
Paul Mackerrasde56a942011-06-29 00:21:34 +00001701 /* Primary thread switches back to host partition */
1702 ld r6,KVM_HOST_SDR1(r4)
1703 lwz r7,KVM_HOST_LPID(r4)
1704 li r8,LPID_RSVD /* switch to reserved LPID */
1705 mtspr SPRN_LPID,r8
1706 ptesync
1707 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1708 mtspr SPRN_LPID,r7
1709 isync
1710
Michael Neulingb005255e2014-01-08 21:25:21 +11001711BEGIN_FTR_SECTION
1712 /* DPDES is shared between threads */
1713 mfspr r7, SPRN_DPDES
1714 std r7, VCORE_DPDES(r5)
1715 /* clear DPDES so we don't get guest doorbells in the host */
1716 li r8, 0
1717 mtspr SPRN_DPDES, r8
1718END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1719
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301720 /* If HMI, call kvmppc_realmode_hmi_handler() */
1721 cmpwi r12, BOOK3S_INTERRUPT_HMI
1722 bne 27f
1723 bl kvmppc_realmode_hmi_handler
1724 nop
1725 li r12, BOOK3S_INTERRUPT_HMI
1726 /*
1727 * At this point kvmppc_realmode_hmi_handler would have resync-ed
1728 * the TB. Hence it is not required to subtract guest timebase
1729 * offset from timebase. So, skip it.
1730 *
1731 * Also, do not call kvmppc_subcore_exit_guest() because it has
1732 * been invoked as part of kvmppc_realmode_hmi_handler().
1733 */
1734 b 30f
1735
173627:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001737 /* Subtract timebase offset from timebase */
1738 ld r8,VCORE_TB_OFFSET(r5)
1739 cmpdi r8,0
1740 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001741 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001742 subf r8,r8,r6
1743 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1744 mftb r7 /* check if lower 24 bits overflowed */
1745 clrldi r6,r6,40
1746 clrldi r7,r7,40
1747 cmpld r7,r6
1748 bge 17f
1749 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1750 mtspr SPRN_TBU40,r8
1751
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +0530175217: bl kvmppc_subcore_exit_guest
1753 nop
175430: ld r5,HSTATE_KVM_VCORE(r13)
1755 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1756
Paul Mackerrasde56a942011-06-29 00:21:34 +00001757 /* Reset PCR */
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05301758 ld r0, VCORE_PCR(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001759 cmpdi r0, 0
1760 beq 18f
1761 li r0, 0
1762 mtspr SPRN_PCR, r0
176318:
1764 /* Signal secondary CPUs to continue */
1765 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000176619: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001767 mtspr SPRN_HDEC,r8
1768
176916: ld r8,KVM_HOST_LPCR(r4)
1770 mtspr SPRN_LPCR,r8
1771 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001772
1773 /* load host SLB entries */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001774 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001775
1776 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001777 li r3, SLBSHADOW_SAVEAREA
1778 LDX_BE r5, r8, r3
1779 addi r3, r3, 8
1780 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001781 andis. r7,r5,SLB_ESID_V@h
1782 beq 1f
1783 slbmte r6,r5
17841: addi r8,r8,16
1785 .endr
1786
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001787#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1788 /* Finish timing, if we have a vcpu */
1789 ld r4, HSTATE_KVM_VCPU(r13)
1790 cmpdi r4, 0
1791 li r3, 0
1792 beq 2f
1793 bl kvmhv_accumulate_time
17942:
1795#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001796 /* Unset guest mode */
1797 li r0, KVM_GUEST_MODE_NONE
1798 stb r0, HSTATE_IN_GUEST(r13)
1799
Paul Mackerras218309b2013-09-06 13:23:44 +10001800 ld r0, 112+PPC_LR_STKOFF(r1)
1801 addi r1, r1, 112
1802 mtlr r0
1803 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001804
Paul Mackerras697d3892011-12-12 12:36:37 +00001805/*
1806 * Check whether an HDSI is an HPTE not found fault or something else.
1807 * If it is an HPTE not found fault that is due to the guest accessing
1808 * a page that they have mapped but which we have paged out, then
1809 * we continue on with the guest exit path. In all other cases,
1810 * reflect the HDSI to the guest as a DSI.
1811 */
1812kvmppc_hdsi:
1813 mfspr r4, SPRN_HDAR
1814 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001815 /* HPTE not found fault or protection fault? */
1816 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001817 beq 1f /* if not, send it to the guest */
1818 andi. r0, r11, MSR_DR /* data relocation enabled? */
1819 beq 3f
1820 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001821 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001822 li r0, BOOK3S_INTERRUPT_DATA_SEGMENT
1823 bne 7f /* if no SLB entry found */
Paul Mackerras697d3892011-12-12 12:36:37 +000018244: std r4, VCPU_FAULT_DAR(r9)
1825 stw r6, VCPU_FAULT_DSISR(r9)
1826
1827 /* Search the hash table. */
1828 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001829 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001830 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001831 ld r9, HSTATE_KVM_VCPU(r13)
1832 ld r10, VCPU_PC(r9)
1833 ld r11, VCPU_MSR(r9)
1834 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1835 cmpdi r3, 0 /* retry the instruction */
1836 beq 6f
1837 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001838 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001839 cmpdi r3, -2 /* MMIO emulation; need instr word */
1840 beq 2f
1841
Paul Mackerrascf29b212015-10-27 16:10:20 +11001842 /* Synthesize a DSI (or DSegI) for the guest */
Paul Mackerras697d3892011-12-12 12:36:37 +00001843 ld r4, VCPU_FAULT_DAR(r9)
1844 mr r6, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110018451: li r0, BOOK3S_INTERRUPT_DATA_STORAGE
Paul Mackerras697d3892011-12-12 12:36:37 +00001846 mtspr SPRN_DSISR, r6
Paul Mackerrascf29b212015-10-27 16:10:20 +110018477: mtspr SPRN_DAR, r4
Paul Mackerras697d3892011-12-12 12:36:37 +00001848 mtspr SPRN_SRR0, r10
1849 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001850 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001851 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001852fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000018536: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001854 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001855 mtctr r7
1856 mtxer r8
1857 mr r4, r9
1858 b fast_guest_return
1859
18603: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1861 ld r5, KVM_VRMA_SLB_V(r5)
1862 b 4b
1863
1864 /* If this is for emulated MMIO, load the instruction word */
18652: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1866
1867 /* Set guest mode to 'jump over instruction' so if lwz faults
1868 * we'll just continue at the next IP. */
1869 li r0, KVM_GUEST_MODE_SKIP
1870 stb r0, HSTATE_IN_GUEST(r13)
1871
1872 /* Do the access with MSR:DR enabled */
1873 mfmsr r3
1874 ori r4, r3, MSR_DR /* Enable paging for data */
1875 mtmsrd r4
1876 lwz r8, 0(r10)
1877 mtmsrd r3
1878
1879 /* Store the result */
1880 stw r8, VCPU_LAST_INST(r9)
1881
1882 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001883 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001884 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001885 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001886
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001887/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001888 * Similarly for an HISI, reflect it to the guest as an ISI unless
1889 * it is an HPTE not found fault for a page that we have paged out.
1890 */
1891kvmppc_hisi:
1892 andis. r0, r11, SRR1_ISI_NOPT@h
1893 beq 1f
1894 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1895 beq 3f
1896 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001897 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerrascf29b212015-10-27 16:10:20 +11001898 li r0, BOOK3S_INTERRUPT_INST_SEGMENT
1899 bne 7f /* if no SLB entry found */
Paul Mackerras342d3db2011-12-12 12:38:05 +000019004:
1901 /* Search the hash table. */
1902 mr r3, r9 /* vcpu pointer */
1903 mr r4, r10
1904 mr r6, r11
1905 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001906 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001907 ld r9, HSTATE_KVM_VCPU(r13)
1908 ld r10, VCPU_PC(r9)
1909 ld r11, VCPU_MSR(r9)
1910 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1911 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001912 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001913 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001914 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001915
Paul Mackerrascf29b212015-10-27 16:10:20 +11001916 /* Synthesize an ISI (or ISegI) for the guest */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001917 mr r11, r3
Paul Mackerrascf29b212015-10-27 16:10:20 +110019181: li r0, BOOK3S_INTERRUPT_INST_STORAGE
19197: mtspr SPRN_SRR0, r10
Paul Mackerras342d3db2011-12-12 12:38:05 +00001920 mtspr SPRN_SRR1, r11
Paul Mackerrascf29b212015-10-27 16:10:20 +11001921 mr r10, r0
Michael Neulinge4e38122014-03-25 10:47:02 +11001922 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001923 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001924
19253: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1926 ld r5, KVM_VRMA_SLB_V(r6)
1927 b 4b
1928
1929/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001930 * Try to handle an hcall in real mode.
1931 * Returns to the guest if we handle it, or continues on up to
1932 * the kernel if we can't (i.e. if we don't have a handler for
1933 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001934 *
1935 * r5 - r8 contain hcall args,
1936 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001937 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001938hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001939 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001940 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001941 /* sc 1 from userspace - reflect to guest syscall */
1942 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001943 clrrdi r3,r3,2
1944 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001945 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001946 /* See if this hcall is enabled for in-kernel handling */
1947 ld r4, VCPU_KVM(r9)
1948 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1949 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1950 add r4, r4, r0
1951 ld r0, KVM_ENABLED_HCALLS(r4)
1952 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1953 srd r0, r0, r4
1954 andi. r0, r0, 1
1955 beq guest_exit_cont
1956 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001957 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001958 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001959 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001960 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001961 add r12,r3,r4
1962 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001963 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001964 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001965 bctrl
1966 cmpdi r3,H_TOO_HARD
1967 beq hcall_real_fallback
1968 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001969 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001970 ld r10,VCPU_PC(r4)
1971 ld r11,VCPU_MSR(r4)
1972 b fast_guest_return
1973
Liu Ping Fan27025a62013-11-19 14:12:48 +08001974sc_1_fast_return:
1975 mtspr SPRN_SRR0,r10
1976 mtspr SPRN_SRR1,r11
1977 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001978 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001979 mr r4,r9
1980 b fast_guest_return
1981
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001982 /* We've attempted a real mode hcall, but it's punted it back
1983 * to userspace. We need to restore some clobbered volatiles
1984 * before resuming the pass-it-to-qemu path */
1985hcall_real_fallback:
1986 li r12,BOOK3S_INTERRUPT_SYSCALL
1987 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001988
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001989 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001990
1991 .globl hcall_real_table
1992hcall_real_table:
1993 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001994 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1995 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1996 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10001997 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1998 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001999 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
2000 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002001 .long DOTSYM(kvmppc_rm_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002002 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002003 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002004 .long 0 /* 0x2c */
2005 .long 0 /* 0x30 */
2006 .long 0 /* 0x34 */
2007 .long 0 /* 0x38 */
2008 .long 0 /* 0x3c */
2009 .long 0 /* 0x40 */
2010 .long 0 /* 0x44 */
2011 .long 0 /* 0x48 */
2012 .long 0 /* 0x4c */
2013 .long 0 /* 0x50 */
2014 .long 0 /* 0x54 */
2015 .long 0 /* 0x58 */
2016 .long 0 /* 0x5c */
2017 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002018#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002019 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
2020 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
2021 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002022 .long 0 /* 0x70 - H_IPOLL */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002023 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00002024#else
2025 .long 0 /* 0x64 - H_EOI */
2026 .long 0 /* 0x68 - H_CPPR */
2027 .long 0 /* 0x6c - H_IPI */
2028 .long 0 /* 0x70 - H_IPOLL */
2029 .long 0 /* 0x74 - H_XIRR */
2030#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002031 .long 0 /* 0x78 */
2032 .long 0 /* 0x7c */
2033 .long 0 /* 0x80 */
2034 .long 0 /* 0x84 */
2035 .long 0 /* 0x88 */
2036 .long 0 /* 0x8c */
2037 .long 0 /* 0x90 */
2038 .long 0 /* 0x94 */
2039 .long 0 /* 0x98 */
2040 .long 0 /* 0x9c */
2041 .long 0 /* 0xa0 */
2042 .long 0 /* 0xa4 */
2043 .long 0 /* 0xa8 */
2044 .long 0 /* 0xac */
2045 .long 0 /* 0xb0 */
2046 .long 0 /* 0xb4 */
2047 .long 0 /* 0xb8 */
2048 .long 0 /* 0xbc */
2049 .long 0 /* 0xc0 */
2050 .long 0 /* 0xc4 */
2051 .long 0 /* 0xc8 */
2052 .long 0 /* 0xcc */
2053 .long 0 /* 0xd0 */
2054 .long 0 /* 0xd4 */
2055 .long 0 /* 0xd8 */
2056 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002057 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11002058 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002059 .long 0 /* 0xe8 */
2060 .long 0 /* 0xec */
2061 .long 0 /* 0xf0 */
2062 .long 0 /* 0xf4 */
2063 .long 0 /* 0xf8 */
2064 .long 0 /* 0xfc */
2065 .long 0 /* 0x100 */
2066 .long 0 /* 0x104 */
2067 .long 0 /* 0x108 */
2068 .long 0 /* 0x10c */
2069 .long 0 /* 0x110 */
2070 .long 0 /* 0x114 */
2071 .long 0 /* 0x118 */
2072 .long 0 /* 0x11c */
2073 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002074 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11002075 .long 0 /* 0x128 */
2076 .long 0 /* 0x12c */
2077 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11002078 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Alexey Kardashevskiy31217db2016-03-18 13:50:42 +11002079 .long DOTSYM(kvmppc_rm_h_stuff_tce) - hcall_real_table
Alexey Kardashevskiyd3695aa2016-02-15 12:55:09 +11002080 .long DOTSYM(kvmppc_rm_h_put_tce_indirect) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11002081 .long 0 /* 0x140 */
2082 .long 0 /* 0x144 */
2083 .long 0 /* 0x148 */
2084 .long 0 /* 0x14c */
2085 .long 0 /* 0x150 */
2086 .long 0 /* 0x154 */
2087 .long 0 /* 0x158 */
2088 .long 0 /* 0x15c */
2089 .long 0 /* 0x160 */
2090 .long 0 /* 0x164 */
2091 .long 0 /* 0x168 */
2092 .long 0 /* 0x16c */
2093 .long 0 /* 0x170 */
2094 .long 0 /* 0x174 */
2095 .long 0 /* 0x178 */
2096 .long 0 /* 0x17c */
2097 .long 0 /* 0x180 */
2098 .long 0 /* 0x184 */
2099 .long 0 /* 0x188 */
2100 .long 0 /* 0x18c */
2101 .long 0 /* 0x190 */
2102 .long 0 /* 0x194 */
2103 .long 0 /* 0x198 */
2104 .long 0 /* 0x19c */
2105 .long 0 /* 0x1a0 */
2106 .long 0 /* 0x1a4 */
2107 .long 0 /* 0x1a8 */
2108 .long 0 /* 0x1ac */
2109 .long 0 /* 0x1b0 */
2110 .long 0 /* 0x1b4 */
2111 .long 0 /* 0x1b8 */
2112 .long 0 /* 0x1bc */
2113 .long 0 /* 0x1c0 */
2114 .long 0 /* 0x1c4 */
2115 .long 0 /* 0x1c8 */
2116 .long 0 /* 0x1cc */
2117 .long 0 /* 0x1d0 */
2118 .long 0 /* 0x1d4 */
2119 .long 0 /* 0x1d8 */
2120 .long 0 /* 0x1dc */
2121 .long 0 /* 0x1e0 */
2122 .long 0 /* 0x1e4 */
2123 .long 0 /* 0x1e8 */
2124 .long 0 /* 0x1ec */
2125 .long 0 /* 0x1f0 */
2126 .long 0 /* 0x1f4 */
2127 .long 0 /* 0x1f8 */
2128 .long 0 /* 0x1fc */
2129 .long 0 /* 0x200 */
2130 .long 0 /* 0x204 */
2131 .long 0 /* 0x208 */
2132 .long 0 /* 0x20c */
2133 .long 0 /* 0x210 */
2134 .long 0 /* 0x214 */
2135 .long 0 /* 0x218 */
2136 .long 0 /* 0x21c */
2137 .long 0 /* 0x220 */
2138 .long 0 /* 0x224 */
2139 .long 0 /* 0x228 */
2140 .long 0 /* 0x22c */
2141 .long 0 /* 0x230 */
2142 .long 0 /* 0x234 */
2143 .long 0 /* 0x238 */
2144 .long 0 /* 0x23c */
2145 .long 0 /* 0x240 */
2146 .long 0 /* 0x244 */
2147 .long 0 /* 0x248 */
2148 .long 0 /* 0x24c */
2149 .long 0 /* 0x250 */
2150 .long 0 /* 0x254 */
2151 .long 0 /* 0x258 */
2152 .long 0 /* 0x25c */
2153 .long 0 /* 0x260 */
2154 .long 0 /* 0x264 */
2155 .long 0 /* 0x268 */
2156 .long 0 /* 0x26c */
2157 .long 0 /* 0x270 */
2158 .long 0 /* 0x274 */
2159 .long 0 /* 0x278 */
2160 .long 0 /* 0x27c */
2161 .long 0 /* 0x280 */
2162 .long 0 /* 0x284 */
2163 .long 0 /* 0x288 */
2164 .long 0 /* 0x28c */
2165 .long 0 /* 0x290 */
2166 .long 0 /* 0x294 */
2167 .long 0 /* 0x298 */
2168 .long 0 /* 0x29c */
2169 .long 0 /* 0x2a0 */
2170 .long 0 /* 0x2a4 */
2171 .long 0 /* 0x2a8 */
2172 .long 0 /* 0x2ac */
2173 .long 0 /* 0x2b0 */
2174 .long 0 /* 0x2b4 */
2175 .long 0 /* 0x2b8 */
2176 .long 0 /* 0x2bc */
2177 .long 0 /* 0x2c0 */
2178 .long 0 /* 0x2c4 */
2179 .long 0 /* 0x2c8 */
2180 .long 0 /* 0x2cc */
2181 .long 0 /* 0x2d0 */
2182 .long 0 /* 0x2d4 */
2183 .long 0 /* 0x2d8 */
2184 .long 0 /* 0x2dc */
2185 .long 0 /* 0x2e0 */
2186 .long 0 /* 0x2e4 */
2187 .long 0 /* 0x2e8 */
2188 .long 0 /* 0x2ec */
2189 .long 0 /* 0x2f0 */
2190 .long 0 /* 0x2f4 */
2191 .long 0 /* 0x2f8 */
2192 .long 0 /* 0x2fc */
2193 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002194 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002195hcall_real_table_end:
2196
Paul Mackerras8563bf52014-01-08 21:25:29 +11002197_GLOBAL(kvmppc_h_set_xdabr)
2198 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2199 beq 6f
2200 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2201 andc. r0, r5, r0
2202 beq 3f
22036: li r3, H_PARAMETER
2204 blr
2205
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002206_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002207 li r5, DABRX_USER | DABRX_KERNEL
22083:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002209BEGIN_FTR_SECTION
2210 b 2f
2211END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002212 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002213 stw r5, VCPU_DABRX(r3)
2214 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002215 /* Work around P7 bug where DABR can get corrupted on mtspr */
22161: mtspr SPRN_DABR,r4
2217 mfspr r5, SPRN_DABR
2218 cmpd r4, r5
2219 bne 1b
2220 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002221 li r3,0
2222 blr
2223
Paul Mackerras8563bf52014-01-08 21:25:29 +11002224 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
22252: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
Thomas Huth760a7362015-11-20 09:11:45 +01002226 rlwimi r5, r4, 2, DAWRX_WT
Paul Mackerras8563bf52014-01-08 21:25:29 +11002227 clrrdi r4, r4, 3
2228 std r4, VCPU_DAWR(r3)
2229 std r5, VCPU_DAWRX(r3)
2230 mtspr SPRN_DAWR, r4
2231 mtspr SPRN_DAWRX, r5
2232 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002233 blr
2234
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002235_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002236 ori r11,r11,MSR_EE
2237 std r11,VCPU_MSR(r3)
2238 li r0,1
2239 stb r0,VCPU_CEDED(r3)
2240 sync /* order setting ceded vs. testing prodded */
2241 lbz r5,VCPU_PRODDED(r3)
2242 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002243 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002244 li r12,0 /* set trap to 0 to say hcall is handled */
2245 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002246 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002247 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002248
2249 /*
2250 * Set our bit in the bitmask of napping threads unless all the
2251 * other threads are already napping, in which case we send this
2252 * up to the host.
2253 */
2254 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002255 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002256 lwz r8,VCORE_ENTRY_EXIT(r5)
2257 clrldi r8,r8,56
2258 li r0,1
2259 sld r0,r0,r6
2260 addi r6,r5,VCORE_NAPPING_THREADS
226131: lwarx r4,0,r6
2262 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002263 cmpw r4,r8
2264 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002265 stwcx. r4,0,r6
2266 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002267 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002268 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002269 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002270 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002271 lwz r7,VCORE_ENTRY_EXIT(r5)
2272 cmpwi r7,0x100
2273 bge 33f /* another thread already exiting */
2274
2275/*
2276 * Although not specifically required by the architecture, POWER7
2277 * preserves the following registers in nap mode, even if an SMT mode
2278 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2279 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2280 */
2281 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002282 std r14, VCPU_GPR(R14)(r3)
2283 std r15, VCPU_GPR(R15)(r3)
2284 std r16, VCPU_GPR(R16)(r3)
2285 std r17, VCPU_GPR(R17)(r3)
2286 std r18, VCPU_GPR(R18)(r3)
2287 std r19, VCPU_GPR(R19)(r3)
2288 std r20, VCPU_GPR(R20)(r3)
2289 std r21, VCPU_GPR(R21)(r3)
2290 std r22, VCPU_GPR(R22)(r3)
2291 std r23, VCPU_GPR(R23)(r3)
2292 std r24, VCPU_GPR(R24)(r3)
2293 std r25, VCPU_GPR(R25)(r3)
2294 std r26, VCPU_GPR(R26)(r3)
2295 std r27, VCPU_GPR(R27)(r3)
2296 std r28, VCPU_GPR(R28)(r3)
2297 std r29, VCPU_GPR(R29)(r3)
2298 std r30, VCPU_GPR(R30)(r3)
2299 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002300
2301 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002302 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002303
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002304 /*
2305 * Set DEC to the smaller of DEC and HDEC, so that we wake
2306 * no later than the end of our timeslice (HDEC interrupts
2307 * don't wake us from nap).
2308 */
2309 mfspr r3, SPRN_DEC
2310 mfspr r4, SPRN_HDEC
2311 mftb r5
2312 cmpw r3, r4
2313 ble 67f
2314 mtspr SPRN_DEC, r4
231567:
2316 /* save expiry time of guest decrementer */
2317 extsw r3, r3
2318 add r3, r3, r5
2319 ld r4, HSTATE_KVM_VCPU(r13)
2320 ld r5, HSTATE_KVM_VCORE(r13)
2321 ld r6, VCORE_TB_OFFSET(r5)
2322 subf r3, r6, r3 /* convert to host TB value */
2323 std r3, VCPU_DEC_EXPIRES(r4)
2324
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002325#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2326 ld r4, HSTATE_KVM_VCPU(r13)
2327 addi r3, r4, VCPU_TB_CEDE
2328 bl kvmhv_accumulate_time
2329#endif
2330
Paul Mackerrasccc07772015-03-28 14:21:07 +11002331 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2332
Paul Mackerras19ccb762011-07-23 17:42:46 +10002333 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002334 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002335 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002336 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002337 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002338 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002339kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002340 mfspr r0, SPRN_CTRLF
2341 clrrdi r0, r0, 1
2342 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302343
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002344 li r0,1
2345 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002346 mfspr r5,SPRN_LPCR
2347 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002348BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002349 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002350 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002351END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002352 mtspr SPRN_LPCR,r5
2353 isync
2354 li r0, 0
2355 std r0, HSTATE_SCRATCH0(r13)
2356 ptesync
2357 ld r0, HSTATE_SCRATCH0(r13)
23581: cmpd r0, r0
2359 bne 1b
2360 nap
2361 b .
2362
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100236333: mr r4, r3
2364 li r3, 0
2365 li r12, 0
2366 b 34f
2367
Paul Mackerras19ccb762011-07-23 17:42:46 +10002368kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002369 /* get vcpu pointer */
2370 ld r4, HSTATE_KVM_VCPU(r13)
2371
Paul Mackerras19ccb762011-07-23 17:42:46 +10002372 /* Woken by external or decrementer interrupt */
2373 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002374
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002375#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2376 addi r3, r4, VCPU_TB_RMINTR
2377 bl kvmhv_accumulate_time
2378#endif
2379
Paul Mackerras19ccb762011-07-23 17:42:46 +10002380 /* load up FP state */
2381 bl kvmppc_load_fp
2382
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002383 /* Restore guest decrementer */
2384 ld r3, VCPU_DEC_EXPIRES(r4)
2385 ld r5, HSTATE_KVM_VCORE(r13)
2386 ld r6, VCORE_TB_OFFSET(r5)
2387 add r3, r3, r6 /* convert host TB to guest TB value */
2388 mftb r7
2389 subf r3, r7, r3
2390 mtspr SPRN_DEC, r3
2391
Paul Mackerras19ccb762011-07-23 17:42:46 +10002392 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002393 ld r14, VCPU_GPR(R14)(r4)
2394 ld r15, VCPU_GPR(R15)(r4)
2395 ld r16, VCPU_GPR(R16)(r4)
2396 ld r17, VCPU_GPR(R17)(r4)
2397 ld r18, VCPU_GPR(R18)(r4)
2398 ld r19, VCPU_GPR(R19)(r4)
2399 ld r20, VCPU_GPR(R20)(r4)
2400 ld r21, VCPU_GPR(R21)(r4)
2401 ld r22, VCPU_GPR(R22)(r4)
2402 ld r23, VCPU_GPR(R23)(r4)
2403 ld r24, VCPU_GPR(R24)(r4)
2404 ld r25, VCPU_GPR(R25)(r4)
2405 ld r26, VCPU_GPR(R26)(r4)
2406 ld r27, VCPU_GPR(R27)(r4)
2407 ld r28, VCPU_GPR(R28)(r4)
2408 ld r29, VCPU_GPR(R29)(r4)
2409 ld r30, VCPU_GPR(R30)(r4)
2410 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002411
2412 /* Check the wake reason in SRR1 to see why we got here */
2413 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002414
2415 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100241634: ld r5,HSTATE_KVM_VCORE(r13)
2417 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002418 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002419 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002420 addi r6,r5,VCORE_NAPPING_THREADS
242132: lwarx r7,0,r6
2422 andc r7,r7,r0
2423 stwcx. r7,0,r6
2424 bne 32b
2425 li r0,0
2426 stb r0,HSTATE_NAPPING(r13)
2427
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002428 /* See if the wake reason means we need to exit */
2429 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002430 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002431 cmpdi r3, 0
2432 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002433
Paul Mackerras19ccb762011-07-23 17:42:46 +10002434 /* see if any other thread is already exiting */
2435 lwz r0,VCORE_ENTRY_EXIT(r5)
2436 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002437 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002438
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002439 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002440
2441 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002442kvm_cede_prodded:
2443 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002444 stb r0,VCPU_PRODDED(r3)
2445 sync /* order testing prodded vs. clearing ceded */
2446 stb r0,VCPU_CEDED(r3)
2447 li r3,H_SUCCESS
2448 blr
2449
2450 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002451kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002452 ld r9, HSTATE_KVM_VCPU(r13)
2453 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002454
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002455 /* Try to handle a machine check in real mode */
2456machine_check_realmode:
2457 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002458 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002459 nop
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002460 ld r9, HSTATE_KVM_VCPU(r13)
2461 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302462 /*
2463 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2464 * machine check interrupt (set HSRR0 to 0x200). And for handled
2465 * errors (no-fatal), just go back to guest execution with current
2466 * HSRR0 instead of exiting guest. This new approach will inject
2467 * machine check to guest for fatal error causing guest to crash.
2468 *
2469 * The old code used to return to host for unhandled errors which
2470 * was causing guest to hang with soft lockups inside guest and
2471 * makes it difficult to recover guest instance.
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302472 *
2473 * if we receive machine check with MSR(RI=0) then deliver it to
2474 * guest as machine check causing guest to crash.
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302475 */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302476 ld r11, VCPU_MSR(r9)
Paul Mackerras1c9e3d52015-11-12 16:43:48 +11002477 rldicl. r0, r11, 64-MSR_HV_LG, 63 /* check if it happened in HV mode */
2478 bne mc_cont /* if so, exit to host */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +05302479 andi. r10, r11, MSR_RI /* check for unrecoverable exception */
2480 beq 1f /* Deliver a machine check to guest */
2481 ld r10, VCPU_PC(r9)
2482 cmpdi r3, 0 /* Did we handle MCE ? */
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302483 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002484 /* If not, deliver a machine check. SRR0/1 are already set */
Mahesh Salgaonkar966d7132015-03-23 22:24:45 +053024851: li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Michael Neulinge4e38122014-03-25 10:47:02 +11002486 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053024872: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002488
Paul Mackerrasde56a942011-06-29 00:21:34 +00002489/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002490 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002491 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002492 * 0 if nothing needs to be done
2493 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002494 * -1 if there was a guest wakeup (IPI or msgsnd)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002495 *
2496 * Also sets r12 to the interrupt vector for any interrupt that needs
2497 * to be handled now by the host (0x500 for external interrupt), or zero.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002498 * Modifies r0, r6, r7, r8.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002499 */
2500kvmppc_check_wake_reason:
2501 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002502BEGIN_FTR_SECTION
2503 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2504FTR_SECTION_ELSE
2505 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2506ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2507 cmpwi r6, 8 /* was it an external interrupt? */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002508 li r12, BOOK3S_INTERRUPT_EXTERNAL
2509 beq kvmppc_read_intr /* if so, see what it was */
2510 li r3, 0
2511 li r12, 0
2512 cmpwi r6, 6 /* was it the decrementer? */
2513 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002514BEGIN_FTR_SECTION
2515 cmpwi r6, 5 /* privileged doorbell? */
2516 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002517 cmpwi r6, 3 /* hypervisor doorbell? */
2518 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002519END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302520 cmpwi r6, 0xa /* Hypervisor maintenance ? */
2521 beq 4f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002522 li r3, 1 /* anything else, return 1 */
25230: blr
2524
Paul Mackerras5d00f662014-01-08 21:25:28 +11002525 /* hypervisor doorbell */
25263: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302527
2528 /*
2529 * Clear the doorbell as we will invoke the handler
2530 * explicitly in the guest exit path.
2531 */
2532 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2533 PPC_MSGCLR(6)
Paul Mackerras66feed62015-03-28 14:21:12 +11002534 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002535 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002536 lbz r0, HSTATE_HOST_IPI(r13)
2537 cmpwi r0, 0
2538 bnelr
Gautham R. Shenoy70aa3962015-10-15 11:29:58 +05302539 /* if not, return -1 */
Paul Mackerras66feed62015-03-28 14:21:12 +11002540 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002541 blr
2542
Mahesh Salgaonkarfd7bacb2016-05-15 09:44:26 +05302543 /* Woken up due to Hypervisor maintenance interrupt */
25444: li r12, BOOK3S_INTERRUPT_HMI
2545 li r3, 1
2546 blr
2547
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002548/*
Paul Mackerrasc9342432013-09-06 13:24:13 +10002549 * Determine what sort of external interrupt is pending (if any).
2550 * Returns:
2551 * 0 if no interrupt is pending
2552 * 1 if an interrupt is pending that needs to be handled by the host
2553 * -1 if there was a guest wakeup IPI (which has now been cleared)
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002554 * Modifies r0, r6, r7, r8, returns value in r3.
Paul Mackerrasc9342432013-09-06 13:24:13 +10002555 */
2556kvmppc_read_intr:
2557 /* see if a host IPI is pending */
2558 li r3, 1
2559 lbz r0, HSTATE_HOST_IPI(r13)
2560 cmpwi r0, 0
2561 bne 1f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002562
Paul Mackerrasc9342432013-09-06 13:24:13 +10002563 /* Now read the interrupt from the ICP */
2564 ld r6, HSTATE_XICS_PHYS(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002565 li r7, XICS_XIRR
Paul Mackerrasc9342432013-09-06 13:24:13 +10002566 cmpdi r6, 0
2567 beq- 1f
2568 lwzcix r0, r6, r7
Alexander Graf76d072f2014-06-11 10:37:52 +02002569 /*
2570 * Save XIRR for later. Since we get in in reverse endian on LE
2571 * systems, save it byte reversed and fetch it back in host endian.
2572 */
2573 li r3, HSTATE_SAVED_XIRR
2574 STWX_BE r0, r3, r13
2575#ifdef __LITTLE_ENDIAN__
2576 lwz r3, HSTATE_SAVED_XIRR(r13)
2577#else
2578 mr r3, r0
2579#endif
2580 rlwinm. r3, r3, 0, 0xffffff
Paul Mackerrasde56a942011-06-29 00:21:34 +00002581 sync
Paul Mackerrasc9342432013-09-06 13:24:13 +10002582 beq 1f /* if nothing pending in the ICP */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002583
Paul Mackerrasc9342432013-09-06 13:24:13 +10002584 /* We found something in the ICP...
2585 *
2586 * If it's not an IPI, stash it in the PACA and return to
2587 * the host, we don't (yet) handle directing real external
2588 * interrupts directly to the guest
2589 */
2590 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
Paul Mackerrasc9342432013-09-06 13:24:13 +10002591 bne 42f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002592
Paul Mackerrasc9342432013-09-06 13:24:13 +10002593 /* It's an IPI, clear the MFRR and EOI it */
2594 li r3, 0xff
2595 li r8, XICS_MFRR
2596 stbcix r3, r6, r8 /* clear the IPI */
2597 stwcix r0, r6, r7 /* EOI it */
2598 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00002599
Paul Mackerrasc9342432013-09-06 13:24:13 +10002600 /* We need to re-check host IPI now in case it got set in the
2601 * meantime. If it's clear, we bounce the interrupt to the
2602 * guest
2603 */
2604 lbz r0, HSTATE_HOST_IPI(r13)
2605 cmpwi r0, 0
2606 bne- 43f
2607
2608 /* OK, it's an IPI for us */
Paul Mackerras6af27c82015-03-28 14:21:10 +11002609 li r12, 0
Paul Mackerrasc9342432013-09-06 13:24:13 +10002610 li r3, -1
26111: blr
2612
Alexander Graf76d072f2014-06-11 10:37:52 +0200261342: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2614 * the PACA earlier, it will be picked up by the host ICP driver
Paul Mackerrasc9342432013-09-06 13:24:13 +10002615 */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002616 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002617 b 1b
2618
261943: /* We raced with the host, we need to resend that IPI, bummer */
2620 li r0, IPI_PRIORITY
2621 stbcix r0, r6, r8 /* set the IPI */
2622 sync
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002623 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002624 b 1b
Paul Mackerrasde56a942011-06-29 00:21:34 +00002625
2626/*
2627 * Save away FP, VMX and VSX registers.
2628 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002629 * N.B. r30 and r31 are volatile across this function,
2630 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002631 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002632kvmppc_save_fp:
2633 mflr r30
2634 mr r31,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00002635 mfmsr r5
2636 ori r8,r5,MSR_FP
2637#ifdef CONFIG_ALTIVEC
2638BEGIN_FTR_SECTION
2639 oris r8,r8,MSR_VEC@h
2640END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2641#endif
2642#ifdef CONFIG_VSX
2643BEGIN_FTR_SECTION
2644 oris r8,r8,MSR_VSX@h
2645END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2646#endif
2647 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002648 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002649 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002650#ifdef CONFIG_ALTIVEC
2651BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002652 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002653 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002654END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2655#endif
2656 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002657 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002658 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002659 blr
2660
2661/*
2662 * Load up FP, VMX and VSX registers
2663 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002664 * N.B. r30 and r31 are volatile across this function,
2665 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002666 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002667kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002668 mflr r30
2669 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002670 mfmsr r9
2671 ori r8,r9,MSR_FP
2672#ifdef CONFIG_ALTIVEC
2673BEGIN_FTR_SECTION
2674 oris r8,r8,MSR_VEC@h
2675END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2676#endif
2677#ifdef CONFIG_VSX
2678BEGIN_FTR_SECTION
2679 oris r8,r8,MSR_VSX@h
2680END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2681#endif
2682 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002683 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002684 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002685#ifdef CONFIG_ALTIVEC
2686BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002687 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002688 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002689END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2690#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002691 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002692 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002693 mtlr r30
2694 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002695 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002696
2697/*
2698 * We come here if we get any exception or interrupt while we are
2699 * executing host real mode code while in guest MMU context.
2700 * For now just spin, but we should do something better.
2701 */
2702kvmppc_bad_host_intr:
2703 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002704
2705/*
2706 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2707 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2708 * r11 has the guest MSR value (in/out)
2709 * r9 has a vcpu pointer (in)
2710 * r0 is used as a scratch register
2711 */
2712kvmppc_msr_interrupt:
2713 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2714 cmpwi r0, 2 /* Check if we are in transactional state.. */
2715 ld r11, VCPU_INTR_MSR(r9)
2716 bne 1f
2717 /* ... if transactional, change to suspended */
2718 li r0, 1
27191: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2720 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002721
2722/*
2723 * This works around a hardware bug on POWER8E processors, where
2724 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2725 * performance monitor interrupt. Instead, when we need to have
2726 * an interrupt pending, we have to arrange for a counter to overflow.
2727 */
2728kvmppc_fix_pmao:
2729 li r3, 0
2730 mtspr SPRN_MMCR2, r3
2731 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2732 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2733 mtspr SPRN_MMCR0, r3
2734 lis r3, 0x7fff
2735 ori r3, r3, 0xffff
2736 mtspr SPRN_PMC6, r3
2737 isync
2738 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002739
2740#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2741/*
2742 * Start timing an activity
2743 * r3 = pointer to time accumulation struct, r4 = vcpu
2744 */
2745kvmhv_start_timing:
2746 ld r5, HSTATE_KVM_VCORE(r13)
2747 lbz r6, VCORE_IN_GUEST(r5)
2748 cmpwi r6, 0
2749 beq 5f /* if in guest, need to */
2750 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
27515: mftb r5
2752 subf r5, r6, r5
2753 std r3, VCPU_CUR_ACTIVITY(r4)
2754 std r5, VCPU_ACTIVITY_START(r4)
2755 blr
2756
2757/*
2758 * Accumulate time to one activity and start another.
2759 * r3 = pointer to new time accumulation struct, r4 = vcpu
2760 */
2761kvmhv_accumulate_time:
2762 ld r5, HSTATE_KVM_VCORE(r13)
2763 lbz r8, VCORE_IN_GUEST(r5)
2764 cmpwi r8, 0
2765 beq 4f /* if in guest, need to */
2766 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
27674: ld r5, VCPU_CUR_ACTIVITY(r4)
2768 ld r6, VCPU_ACTIVITY_START(r4)
2769 std r3, VCPU_CUR_ACTIVITY(r4)
2770 mftb r7
2771 subf r7, r8, r7
2772 std r7, VCPU_ACTIVITY_START(r4)
2773 cmpdi r5, 0
2774 beqlr
2775 subf r3, r6, r7
2776 ld r8, TAS_SEQCOUNT(r5)
2777 cmpdi r8, 0
2778 addi r8, r8, 1
2779 std r8, TAS_SEQCOUNT(r5)
2780 lwsync
2781 ld r7, TAS_TOTAL(r5)
2782 add r7, r7, r3
2783 std r7, TAS_TOTAL(r5)
2784 ld r6, TAS_MIN(r5)
2785 ld r7, TAS_MAX(r5)
2786 beq 3f
2787 cmpd r3, r6
2788 bge 1f
27893: std r3, TAS_MIN(r5)
27901: cmpd r3, r7
2791 ble 2f
2792 std r3, TAS_MAX(r5)
27932: lwsync
2794 addi r8, r8, 1
2795 std r8, TAS_SEQCOUNT(r5)
2796 blr
2797#endif