blob: 245f5c972030bfa91a424e836a8ef94fb8406476 [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Paul Mackerrasb4072df2012-11-23 22:37:50 +000030#include <asm/mmu-hash64.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000034
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110035/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE 1
37#define NAPPING_NOVCPU 2
38
Paul Mackerrasde56a942011-06-29 00:21:34 +000039/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100040 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000041 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100047_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100048 mflr r0
49 std r0, PPC_LR_STKOFF(r1)
50 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000051 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100052 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000053 li r0,MSR_RI
54 andc r0,r10,r0
55 li r6,MSR_IR | MSR_DR
56 andc r6,r10,r6
57 mtmsrd r0,1 /* clear RI in MSR */
58 mtsrr0 r5
59 mtsrr1 r6
60 RFI
61
Paul Mackerras218309b2013-09-06 13:23:44 +100062kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110063 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100064 bl kvmppc_hv_entry
65
66 /* Back from guest - restore host state and return to caller */
67
Michael Neulingeee7ff92014-01-08 21:25:19 +110068BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100069 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
71 li r6,7
72 mtspr SPRN_DABR,r5
73 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110074END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100075
76 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050077 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100079
Paul Mackerras218309b2013-09-06 13:23:44 +100080 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
83 cmpwi r4, 0
84 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +100085BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100086 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +100087 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88 cmpwi r4, MMCR0_PMAO
89 beql kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100091 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100097 mtspr SPRN_PMC1, r3
98 mtspr SPRN_PMC2, r4
99 mtspr SPRN_PMC3, r5
100 mtspr SPRN_PMC4, r6
101 mtspr SPRN_PMC5, r8
102 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000108 mtspr SPRN_MMCR1, r4
109 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100110 mtspr SPRN_SIAR, r6
111 mtspr SPRN_SDAR, r7
112BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100115 mtspr SPRN_MMCR2, r8
116 mtspr SPRN_SIER, r9
117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000118 mtspr SPRN_MMCR0, r3
119 isync
12023:
121
122 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
125 */
126 ld r3, HSTATE_DECEXP(r13)
127 mftb r4
128 subf r4, r4, r3
129 mtspr SPRN_DEC, r4
130
131 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000132 * For external and machine check interrupts, we need
133 * to call the Linux handler to process the interrupt.
134 * We do that by jumping to absolute address 0x500 for
135 * external interrupts, or the machine_check_fwnmi label
136 * for machine checks (since firmware might have patched
137 * the vector area at 0x200). The [h]rfid at the end of the
138 * handler will return to the book3s_hv_interrupts.S code.
139 * For other interrupts we do the rfid to get back
140 * to the book3s_hv_interrupts.S code here.
141 */
142 ld r8, 112+PPC_LR_STKOFF(r1)
143 addi r1, r1, 112
144 ld r7, HSTATE_HOST_MSR(r13)
145
146 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
147 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000148 beq 11f
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530149 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
150 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000151
152 /* RFI into the highmem handler, or branch to interrupt handler */
153 mfmsr r6
154 li r0, MSR_RI
155 andc r6, r6, r0
156 mtmsrd r6, 1 /* Clear RI in MSR */
157 mtsrr0 r8
158 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000159 beq cr1, 13f /* machine check */
160 RFI
161
162 /* On POWER7, we have external interrupts set to use HSRR0/1 */
16311: mtspr SPRN_HSRR0, r8
164 mtspr SPRN_HSRR1, r7
165 ba 0x500
166
16713: b machine_check_fwnmi
168
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053016914: mtspr SPRN_HSRR0, r8
170 mtspr SPRN_HSRR1, r7
171 b hmi_exception_after_realmode
172
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100173kvmppc_primary_no_guest:
174 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100175 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
176 mfspr r3, SPRN_HDEC
177 mtspr SPRN_DEC, r3
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100178 /* set our bit in napping_threads */
179 ld r5, HSTATE_KVM_VCORE(r13)
180 lbz r7, HSTATE_PTID(r13)
181 li r0, 1
182 sld r0, r0, r7
183 addi r6, r5, VCORE_NAPPING_THREADS
1841: lwarx r3, 0, r6
185 or r3, r3, r0
186 stwcx. r3, 0, r6
187 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100188 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100189 isync
190 li r12, 0
191 lwz r7, VCORE_ENTRY_EXIT(r5)
192 cmpwi r7, 0x100
193 bge kvm_novcpu_exit /* another thread already exiting */
194 li r3, NAPPING_NOVCPU
195 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100196
Paul Mackerrasccc07772015-03-28 14:21:07 +1100197 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100198 b kvm_do_nap
199
200kvm_novcpu_wakeup:
201 ld r1, HSTATE_HOST_R1(r13)
202 ld r5, HSTATE_KVM_VCORE(r13)
203 li r0, 0
204 stb r0, HSTATE_NAPPING(r13)
205 stb r0, HSTATE_HWTHREAD_REQ(r13)
206
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100207 /* check the wake reason */
208 bl kvmppc_check_wake_reason
209
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100210 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100211 lwz r0, VCORE_ENTRY_EXIT(r5)
212 cmpwi r0, 0x100
213 bge kvm_novcpu_exit
214
215 /* clear our bit in napping_threads */
216 lbz r7, HSTATE_PTID(r13)
217 li r0, 1
218 sld r0, r0, r7
219 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002204: lwarx r7, 0, r6
221 andc r7, r7, r0
222 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100223 bne 4b
224
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100225 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100226 cmpdi r3, 0
227 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100228
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100229 /* See if our timeslice has expired (HDEC is negative) */
230 mfspr r0, SPRN_HDEC
231 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
232 cmpwi r0, 0
233 blt kvm_novcpu_exit
234
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100235 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
236 ld r4, HSTATE_KVM_VCPU(r13)
237 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100238 beq kvmppc_primary_no_guest
239
240#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
241 addi r3, r4, VCPU_TB_RMENTRY
242 bl kvmhv_start_timing
243#endif
244 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100245
246kvm_novcpu_exit:
247 b hdec_soon
248
Paul Mackerras371fefd2011-06-29 00:23:08 +0000249/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100250 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000251 * Relocation is off and most register values are lost.
252 * r13 points to the PACA.
253 */
254 .globl kvm_start_guest
255kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530256
257 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100258 mfspr r0, SPRN_CTRLF
259 ori r0, r0, 1
260 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530261
Paul Mackerras19ccb762011-07-23 17:42:46 +1000262 ld r2,PACATOC(r13)
263
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000264 li r0,KVM_HWTHREAD_IN_KVM
265 stb r0,HSTATE_HWTHREAD_STATE(r13)
266
267 /* NV GPR values from power7_idle() will no longer be valid */
268 li r0,1
269 stb r0,PACA_NAPSTATELOST(r13)
270
Paul Mackerras4619ac82013-04-17 20:31:41 +0000271 /* were we napping due to cede? */
272 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100273 cmpwi r0,NAPPING_CEDE
274 beq kvm_end_cede
275 cmpwi r0,NAPPING_NOVCPU
276 beq kvm_novcpu_wakeup
277
278 ld r1,PACAEMERGSP(r13)
279 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000280
281 /*
282 * We weren't napping due to cede, so this must be a secondary
283 * thread being woken up to run a guest, or being woken up due
284 * to a stray IPI. (Or due to some machine check or hypervisor
285 * maintenance interrupt while the core is in KVM.)
286 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000287
288 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100289 bl kvmppc_check_wake_reason
290 cmpdi r3, 0
291 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000292
Paul Mackerras4619ac82013-04-17 20:31:41 +0000293 /* get vcpu pointer, NULL if we have no vcpu to run */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000294 ld r4,HSTATE_KVM_VCPU(r13)
295 cmpdi r4,0
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000296 /* if we have no vcpu to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000297 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000298
Paul Mackerras56548fc2014-12-03 14:48:40 +1100299kvm_secondary_got_guest:
300
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100301 /* Set HSTATE_DSCR(r13) to something sensible */
Sam bobroff1739ea92014-05-21 16:32:38 +1000302 ld r6, PACA_DSCR(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100303 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000304
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100305 /* Order load of vcore, ptid etc. after load of vcpu */
306 lwsync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100307 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000308
309 /* Back from the guest, go back to nap */
310 /* Clear our vcpu pointer so we don't come back in early */
311 li r0, 0
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100312 /*
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100313 * Once we clear HSTATE_KVM_VCPU(r13), the code in
314 * kvmppc_run_core() is going to assume that all our vcpu
315 * state is visible in memory. This lwsync makes sure
316 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100317 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000318 lwsync
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100319 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000320
Paul Mackerras56548fc2014-12-03 14:48:40 +1100321/*
322 * At this point we have finished executing in the guest.
323 * We need to wait for hwthread_req to become zero, since
324 * we may not turn on the MMU while hwthread_req is non-zero.
325 * While waiting we also need to check if we get given a vcpu to run.
326 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000327kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100328 lbz r3, HSTATE_HWTHREAD_REQ(r13)
329 cmpwi r3, 0
330 bne 53f
331 HMT_MEDIUM
332 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000333 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100334 /* need to recheck hwthread_req after a barrier, to avoid race */
335 sync
336 lbz r3, HSTATE_HWTHREAD_REQ(r13)
337 cmpwi r3, 0
338 bne 54f
339/*
340 * We jump to power7_wakeup_loss, which will return to the caller
341 * of power7_nap in the powernv cpu offline loop. The value we
342 * put in r3 becomes the return value for power7_nap.
343 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000344 li r3, LPCR_PECE0
345 mfspr r4, SPRN_LPCR
346 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
347 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100348 li r3, 0
349 b power7_wakeup_loss
350
35153: HMT_LOW
352 ld r4, HSTATE_KVM_VCPU(r13)
353 cmpdi r4, 0
354 beq kvm_no_guest
355 HMT_MEDIUM
356 b kvm_secondary_got_guest
357
35854: li r0, KVM_HWTHREAD_IN_KVM
359 stb r0, HSTATE_HWTHREAD_STATE(r13)
360 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000361
362/******************************************************************************
363 * *
364 * Entry code *
365 * *
366 *****************************************************************************/
367
Paul Mackerrasde56a942011-06-29 00:21:34 +0000368.global kvmppc_hv_entry
369kvmppc_hv_entry:
370
371 /* Required state:
372 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100373 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000374 * MSR = ~IR|DR
375 * R13 = PACA
376 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000377 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000378 * all other volatile GPRS = free
379 */
380 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000381 std r0, PPC_LR_STKOFF(r1)
382 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000383
Paul Mackerrasde56a942011-06-29 00:21:34 +0000384 /* Save R1 in the PACA */
385 std r1, HSTATE_HOST_R1(r13)
386
Paul Mackerras44a3add2013-10-04 21:45:04 +1000387 li r6, KVM_GUEST_MODE_HOST_HV
388 stb r6, HSTATE_IN_GUEST(r13)
389
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100390#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
391 /* Store initial timestamp */
392 cmpdi r4, 0
393 beq 1f
394 addi r3, r4, VCPU_TB_RMENTRY
395 bl kvmhv_start_timing
3961:
397#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +0000398 /* Clear out SLB */
399 li r6,0
400 slbmte r6,r6
401 slbia
402 ptesync
403
Paul Mackerras9e368f22011-06-29 00:40:08 +0000404 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100405 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000406 * We don't have to lock against concurrent tlbies,
407 * but we do have to coordinate across hardware threads.
408 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100409 /* Set bit in entry map iff exit map is zero. */
410 ld r5, HSTATE_KVM_VCORE(r13)
411 li r7, 1
412 lbz r6, HSTATE_PTID(r13)
413 sld r7, r7, r6
414 addi r9, r5, VCORE_ENTRY_EXIT
41521: lwarx r3, 0, r9
416 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000417 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100418 or r3, r3, r7
419 stwcx. r3, 0, r9
Paul Mackerras371fefd2011-06-29 00:23:08 +0000420 bne 21b
421
422 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100423 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000424 cmpwi r6,0
425 bne 20f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000426 ld r6,KVM_SDR1(r9)
427 lwz r7,KVM_LPID(r9)
428 li r0,LPID_RSVD /* switch to reserved LPID */
429 mtspr SPRN_LPID,r0
430 ptesync
431 mtspr SPRN_SDR1,r6 /* switch to partition page table */
432 mtspr SPRN_LPID,r7
433 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000434
435 /* See if we need to flush the TLB */
436 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
437 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
438 srdi r6,r6,6 /* doubleword number */
439 sldi r6,r6,3 /* address offset */
440 add r6,r6,r9
441 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000442 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000443 sld r0,r0,r7
444 ld r7,0(r6)
445 and. r7,r7,r0
446 beq 22f
44723: ldarx r7,0,r6 /* if set, clear the bit */
448 andc r7,r7,r0
449 stdcx. r7,0,r6
450 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100451 /* Flush the TLB of any entries for this LPID */
452 /* use arch 2.07S as a proxy for POWER8 */
453BEGIN_FTR_SECTION
454 li r6,512 /* POWER8 has 512 sets */
455FTR_SECTION_ELSE
456 li r6,128 /* POWER7 has 128 sets */
457ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000458 mtctr r6
459 li r7,0x800 /* IS field = 0b10 */
460 ptesync
46128: tlbiel r7
462 addi r7,r7,0x1000
463 bdnz 28b
464 ptesync
465
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000466 /* Add timebase offset onto timebase */
46722: ld r8,VCORE_TB_OFFSET(r5)
468 cmpdi r8,0
469 beq 37f
470 mftb r6 /* current host timebase */
471 add r8,r8,r6
472 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
473 mftb r7 /* check if lower 24 bits overflowed */
474 clrldi r6,r6,40
475 clrldi r7,r7,40
476 cmpld r7,r6
477 bge 37f
478 addis r8,r8,0x100 /* if so, increment upper 40 bits */
479 mtspr SPRN_TBU40,r8
480
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000481 /* Load guest PCR value to select appropriate compat mode */
48237: ld r7, VCORE_PCR(r5)
483 cmpdi r7, 0
484 beq 38f
485 mtspr SPRN_PCR, r7
48638:
Michael Neulingb005255e2014-01-08 21:25:21 +1100487
488BEGIN_FTR_SECTION
489 /* DPDES is shared between threads */
490 ld r8, VCORE_DPDES(r5)
491 mtspr SPRN_DPDES, r8
492END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
493
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000494 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000495 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
496 b 10f
497
498 /* Secondary threads wait for primary to have done partition switch */
49920: lbz r0,VCORE_IN_GUEST(r5)
500 cmpwi r0,0
501 beq 20b
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000502
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100503 /* Set LPCR. */
Paul Mackerrasa0144e22013-09-20 14:52:38 +100050410: ld r8,VCORE_LPCR(r5)
Paul Mackerras19ccb762011-07-23 17:42:46 +1000505 mtspr SPRN_LPCR,r8
Paul Mackerrasde56a942011-06-29 00:21:34 +0000506 isync
507
508 /* Check if HDEC expires soon */
509 mfspr r3,SPRN_HDEC
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100510 cmpwi r3,512 /* 1 microsecond */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000511 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasde56a942011-06-29 00:21:34 +0000512 blt hdec_soon
Paul Mackerras9e368f22011-06-29 00:40:08 +0000513
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100514 /* Do we have a guest vcpu to run? */
515 cmpdi r4, 0
516 beq kvmppc_primary_no_guest
517kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000518
519 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100520 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000521 cmpwi r5,0
522 beq 9f
523 mtctr r5
524 addi r6,r4,VCPU_SLB
5251: ld r8,VCPU_SLB_E(r6)
526 ld r9,VCPU_SLB_V(r6)
527 slbmte r9,r8
528 addi r6,r6,VCPU_SLB_SIZE
529 bdnz 1b
5309:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100531 /* Increment yield count if they have a VPA */
532 ld r3, VCPU_VPA(r4)
533 cmpdi r3, 0
534 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200535 li r6, LPPACA_YIELDCOUNT
536 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100537 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200538 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100539 li r6, 1
540 stb r6, VCPU_VPA_DIRTY(r4)
54125:
542
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100543 /* Save purr/spurr */
544 mfspr r5,SPRN_PURR
545 mfspr r6,SPRN_SPURR
546 std r5,HSTATE_PURR(r13)
547 std r6,HSTATE_SPURR(r13)
548 ld r7,VCPU_PURR(r4)
549 ld r8,VCPU_SPURR(r4)
550 mtspr SPRN_PURR,r7
551 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100552
Michael Neulingeee7ff92014-01-08 21:25:19 +1100553BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000554 /* Set partition DABR */
555 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100556 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000557 ld r6,VCPU_DABR(r4)
558 mtspr SPRN_DABRX,r5
559 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000560 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100561END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000562
Michael Neulinge4e38122014-03-25 10:47:02 +1100563#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
564BEGIN_FTR_SECTION
565 b skip_tm
566END_FTR_SECTION_IFCLR(CPU_FTR_TM)
567
568 /* Turn on TM/FP/VSX/VMX so we can restore them. */
569 mfmsr r5
570 li r6, MSR_TM >> 32
571 sldi r6, r6, 32
572 or r5, r5, r6
573 ori r5, r5, MSR_FP
574 oris r5, r5, (MSR_VEC | MSR_VSX)@h
575 mtmsrd r5
576
577 /*
578 * The user may change these outside of a transaction, so they must
579 * always be context switched.
580 */
581 ld r5, VCPU_TFHAR(r4)
582 ld r6, VCPU_TFIAR(r4)
583 ld r7, VCPU_TEXASR(r4)
584 mtspr SPRN_TFHAR, r5
585 mtspr SPRN_TFIAR, r6
586 mtspr SPRN_TEXASR, r7
587
588 ld r5, VCPU_MSR(r4)
589 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
590 beq skip_tm /* TM not active in guest */
591
592 /* Make sure the failure summary is set, otherwise we'll program check
593 * when we trechkpt. It's possible that this might have been not set
594 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
595 * host.
596 */
597 oris r7, r7, (TEXASR_FS)@h
598 mtspr SPRN_TEXASR, r7
599
600 /*
601 * We need to load up the checkpointed state for the guest.
602 * We need to do this early as it will blow away any GPRs, VSRs and
603 * some SPRs.
604 */
605
606 mr r31, r4
607 addi r3, r31, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200608 bl load_fp_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100609 addi r3, r31, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200610 bl load_vr_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100611 mr r4, r31
612 lwz r7, VCPU_VRSAVE_TM(r4)
613 mtspr SPRN_VRSAVE, r7
614
615 ld r5, VCPU_LR_TM(r4)
616 lwz r6, VCPU_CR_TM(r4)
617 ld r7, VCPU_CTR_TM(r4)
618 ld r8, VCPU_AMR_TM(r4)
619 ld r9, VCPU_TAR_TM(r4)
620 mtlr r5
621 mtcr r6
622 mtctr r7
623 mtspr SPRN_AMR, r8
624 mtspr SPRN_TAR, r9
625
626 /*
627 * Load up PPR and DSCR values but don't put them in the actual SPRs
628 * till the last moment to avoid running with userspace PPR and DSCR for
629 * too long.
630 */
631 ld r29, VCPU_DSCR_TM(r4)
632 ld r30, VCPU_PPR_TM(r4)
633
634 std r2, PACATMSCRATCH(r13) /* Save TOC */
635
636 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
637 li r5, 0
638 mtmsrd r5, 1
639
640 /* Load GPRs r0-r28 */
641 reg = 0
642 .rept 29
643 ld reg, VCPU_GPRS_TM(reg)(r31)
644 reg = reg + 1
645 .endr
646
647 mtspr SPRN_DSCR, r29
648 mtspr SPRN_PPR, r30
649
650 /* Load final GPRs */
651 ld 29, VCPU_GPRS_TM(29)(r31)
652 ld 30, VCPU_GPRS_TM(30)(r31)
653 ld 31, VCPU_GPRS_TM(31)(r31)
654
655 /* TM checkpointed state is now setup. All GPRs are now volatile. */
656 TRECHKPT
657
658 /* Now let's get back the state we need. */
659 HMT_MEDIUM
660 GET_PACA(r13)
661 ld r29, HSTATE_DSCR(r13)
662 mtspr SPRN_DSCR, r29
663 ld r4, HSTATE_KVM_VCPU(r13)
664 ld r1, HSTATE_HOST_R1(r13)
665 ld r2, PACATMSCRATCH(r13)
666
667 /* Set the MSR RI since we have our registers back. */
668 li r5, MSR_RI
669 mtmsrd r5, 1
670skip_tm:
671#endif
672
Paul Mackerrasde56a942011-06-29 00:21:34 +0000673 /* Load guest PMU registers */
674 /* R4 is live here (vcpu pointer) */
675 li r3, 1
676 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
677 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
678 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000679BEGIN_FTR_SECTION
680 ld r3, VCPU_MMCR(r4)
681 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
682 cmpwi r5, MMCR0_PMAO
683 beql kvmppc_fix_pmao
684END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000685 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
686 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
687 lwz r6, VCPU_PMC + 8(r4)
688 lwz r7, VCPU_PMC + 12(r4)
689 lwz r8, VCPU_PMC + 16(r4)
690 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000691 mtspr SPRN_PMC1, r3
692 mtspr SPRN_PMC2, r5
693 mtspr SPRN_PMC3, r6
694 mtspr SPRN_PMC4, r7
695 mtspr SPRN_PMC5, r8
696 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000697 ld r3, VCPU_MMCR(r4)
698 ld r5, VCPU_MMCR + 8(r4)
699 ld r6, VCPU_MMCR + 16(r4)
700 ld r7, VCPU_SIAR(r4)
701 ld r8, VCPU_SDAR(r4)
702 mtspr SPRN_MMCR1, r5
703 mtspr SPRN_MMCRA, r6
704 mtspr SPRN_SIAR, r7
705 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100706BEGIN_FTR_SECTION
707 ld r5, VCPU_MMCR + 24(r4)
708 ld r6, VCPU_SIER(r4)
709 lwz r7, VCPU_PMC + 24(r4)
710 lwz r8, VCPU_PMC + 28(r4)
711 ld r9, VCPU_MMCR + 32(r4)
712 mtspr SPRN_MMCR2, r5
713 mtspr SPRN_SIER, r6
714 mtspr SPRN_SPMC1, r7
715 mtspr SPRN_SPMC2, r8
716 mtspr SPRN_MMCRS, r9
717END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000718 mtspr SPRN_MMCR0, r3
719 isync
720
721 /* Load up FP, VMX and VSX registers */
722 bl kvmppc_load_fp
723
724 ld r14, VCPU_GPR(R14)(r4)
725 ld r15, VCPU_GPR(R15)(r4)
726 ld r16, VCPU_GPR(R16)(r4)
727 ld r17, VCPU_GPR(R17)(r4)
728 ld r18, VCPU_GPR(R18)(r4)
729 ld r19, VCPU_GPR(R19)(r4)
730 ld r20, VCPU_GPR(R20)(r4)
731 ld r21, VCPU_GPR(R21)(r4)
732 ld r22, VCPU_GPR(R22)(r4)
733 ld r23, VCPU_GPR(R23)(r4)
734 ld r24, VCPU_GPR(R24)(r4)
735 ld r25, VCPU_GPR(R25)(r4)
736 ld r26, VCPU_GPR(R26)(r4)
737 ld r27, VCPU_GPR(R27)(r4)
738 ld r28, VCPU_GPR(R28)(r4)
739 ld r29, VCPU_GPR(R29)(r4)
740 ld r30, VCPU_GPR(R30)(r4)
741 ld r31, VCPU_GPR(R31)(r4)
742
Paul Mackerrasde56a942011-06-29 00:21:34 +0000743 /* Switch DSCR to guest value */
744 ld r5, VCPU_DSCR(r4)
745 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000746
Michael Neulingb005255e2014-01-08 21:25:21 +1100747BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100748 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100749 b 8f
750END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
751 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
752 mfmsr r8
753 li r0, 1
754 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
755 mtmsrd r8
756
757 /* Load up POWER8-specific registers */
758 ld r5, VCPU_IAMR(r4)
759 lwz r6, VCPU_PSPB(r4)
760 ld r7, VCPU_FSCR(r4)
761 mtspr SPRN_IAMR, r5
762 mtspr SPRN_PSPB, r6
763 mtspr SPRN_FSCR, r7
764 ld r5, VCPU_DAWR(r4)
765 ld r6, VCPU_DAWRX(r4)
766 ld r7, VCPU_CIABR(r4)
767 ld r8, VCPU_TAR(r4)
768 mtspr SPRN_DAWR, r5
769 mtspr SPRN_DAWRX, r6
770 mtspr SPRN_CIABR, r7
771 mtspr SPRN_TAR, r8
772 ld r5, VCPU_IC(r4)
773 ld r6, VCPU_VTB(r4)
774 mtspr SPRN_IC, r5
775 mtspr SPRN_VTB, r6
Michael Neuling7b490412014-01-08 21:25:32 +1100776 ld r8, VCPU_EBBHR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100777 mtspr SPRN_EBBHR, r8
778 ld r5, VCPU_EBBRR(r4)
779 ld r6, VCPU_BESCR(r4)
780 ld r7, VCPU_CSIGR(r4)
781 ld r8, VCPU_TACR(r4)
782 mtspr SPRN_EBBRR, r5
783 mtspr SPRN_BESCR, r6
784 mtspr SPRN_CSIGR, r7
785 mtspr SPRN_TACR, r8
786 ld r5, VCPU_TCSCR(r4)
787 ld r6, VCPU_ACOP(r4)
788 lwz r7, VCPU_GUEST_PID(r4)
789 ld r8, VCPU_WORT(r4)
790 mtspr SPRN_TCSCR, r5
791 mtspr SPRN_ACOP, r6
792 mtspr SPRN_PID, r7
793 mtspr SPRN_WORT, r8
7948:
795
Paul Mackerrasde56a942011-06-29 00:21:34 +0000796 /*
797 * Set the decrementer to the guest decrementer.
798 */
799 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100800 /* r8 is a host timebase value here, convert to guest TB */
801 ld r5,HSTATE_KVM_VCORE(r13)
802 ld r6,VCORE_TB_OFFSET(r5)
803 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000804 mftb r7
805 subf r3,r7,r8
806 mtspr SPRN_DEC,r3
807 stw r3,VCPU_DEC(r4)
808
809 ld r5, VCPU_SPRG0(r4)
810 ld r6, VCPU_SPRG1(r4)
811 ld r7, VCPU_SPRG2(r4)
812 ld r8, VCPU_SPRG3(r4)
813 mtspr SPRN_SPRG0, r5
814 mtspr SPRN_SPRG1, r6
815 mtspr SPRN_SPRG2, r7
816 mtspr SPRN_SPRG3, r8
817
Paul Mackerrasde56a942011-06-29 00:21:34 +0000818 /* Load up DAR and DSISR */
819 ld r5, VCPU_DAR(r4)
820 lwz r6, VCPU_DSISR(r4)
821 mtspr SPRN_DAR, r5
822 mtspr SPRN_DSISR, r6
823
Paul Mackerrasde56a942011-06-29 00:21:34 +0000824 /* Restore AMR and UAMOR, set AMOR to all 1s */
825 ld r5,VCPU_AMR(r4)
826 ld r6,VCPU_UAMOR(r4)
827 li r7,-1
828 mtspr SPRN_AMR,r5
829 mtspr SPRN_UAMOR,r6
830 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000831
832 /* Restore state of CTRL run bit; assume 1 on entry */
833 lwz r5,VCPU_CTRL(r4)
834 andi. r5,r5,1
835 bne 4f
836 mfspr r6,SPRN_CTRLF
837 clrrdi r6,r6,1
838 mtspr SPRN_CTRLT,r6
8394:
840 ld r6, VCPU_CTR(r4)
841 lwz r7, VCPU_XER(r4)
842
843 mtctr r6
844 mtxer r7
845
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100846kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000847 ld r10, VCPU_PC(r4)
848 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000849 ld r6, VCPU_SRR0(r4)
850 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100851 mtspr SPRN_SRR0, r6
852 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000853
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100854deliver_guest_interrupt:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000855 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000856 rldicl r11, r11, 63 - MSR_HV_LG, 1
857 rotldi r11, r11, 1 + MSR_HV_LG
858 ori r11, r11, MSR_ME
859
Paul Mackerras19ccb762011-07-23 17:42:46 +1000860 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100861 ld r0, VCPU_PENDING_EXC(r4)
862 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
863 cmpdi cr1, r0, 0
864 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100865 mfspr r8, SPRN_LPCR
866 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
867 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
868 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +1000869 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +1000870 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100871 li r0, BOOK3S_INTERRUPT_EXTERNAL
872 bne cr1, 12f
873 mfspr r0, SPRN_DEC
874 cmpwi r0, 0
875 li r0, BOOK3S_INTERRUPT_DECREMENTER
876 bge 5f
877
87812: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +1000879 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100880 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +1100881 mr r9, r4
882 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11008835:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000884
Liu Ping Fan27025a62013-11-19 14:12:48 +0800885/*
886 * Required state:
887 * R4 = vcpu
888 * R10: value for HSRR0
889 * R11: value for HSRR1
890 * R13 = PACA
891 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000892fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000893 li r0,0
894 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000895 mtspr SPRN_HSRR0,r10
896 mtspr SPRN_HSRR1,r11
897
898 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000899 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000900 stb r9, HSTATE_IN_GUEST(r13)
901
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100902#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
903 /* Accumulate timing */
904 addi r3, r4, VCPU_TB_GUEST
905 bl kvmhv_accumulate_time
906#endif
907
Paul Mackerrasde56a942011-06-29 00:21:34 +0000908 /* Enter guest */
909
Paul Mackerras0acb9112013-02-04 18:10:51 +0000910BEGIN_FTR_SECTION
911 ld r5, VCPU_CFAR(r4)
912 mtspr SPRN_CFAR, r5
913END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000914BEGIN_FTR_SECTION
915 ld r0, VCPU_PPR(r4)
916END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000917
Paul Mackerrasde56a942011-06-29 00:21:34 +0000918 ld r5, VCPU_LR(r4)
919 lwz r6, VCPU_CR(r4)
920 mtlr r5
921 mtcr r6
922
Michael Neulingc75df6f2012-06-25 13:33:10 +0000923 ld r1, VCPU_GPR(R1)(r4)
924 ld r2, VCPU_GPR(R2)(r4)
925 ld r3, VCPU_GPR(R3)(r4)
926 ld r5, VCPU_GPR(R5)(r4)
927 ld r6, VCPU_GPR(R6)(r4)
928 ld r7, VCPU_GPR(R7)(r4)
929 ld r8, VCPU_GPR(R8)(r4)
930 ld r9, VCPU_GPR(R9)(r4)
931 ld r10, VCPU_GPR(R10)(r4)
932 ld r11, VCPU_GPR(R11)(r4)
933 ld r12, VCPU_GPR(R12)(r4)
934 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000935
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000936BEGIN_FTR_SECTION
937 mtspr SPRN_PPR, r0
938END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
939 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000940 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000941
942 hrfid
943 b .
944
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100945#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
946secondary_too_late:
947 cmpdi r4, 0
948 beq 11f
949 addi r3, r4, VCPU_TB_RMEXIT
950 bl kvmhv_accumulate_time
95111: b kvmhv_switch_to_host
952
953hdec_soon:
954 ld r4, HSTATE_KVM_VCPU(r13)
955 cmpdi r4, 0
956 beq 12f
957 addi r3, r4, VCPU_TB_RMEXIT
958 bl kvmhv_accumulate_time
95912: b kvmhv_do_exit
960#endif
961
Paul Mackerrasde56a942011-06-29 00:21:34 +0000962/******************************************************************************
963 * *
964 * Exit code *
965 * *
966 *****************************************************************************/
967
968/*
969 * We come here from the first-level interrupt handlers.
970 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530971 .globl kvmppc_interrupt_hv
972kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000973 /*
974 * Register contents:
975 * R12 = interrupt vector
976 * R13 = PACA
977 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
978 * guest R13 saved in SPRN_SCRATCH0
979 */
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +0530980 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +1000981
982 lbz r9, HSTATE_IN_GUEST(r13)
983 cmpwi r9, KVM_GUEST_MODE_HOST_HV
984 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530985#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
986 cmpwi r9, KVM_GUEST_MODE_GUEST
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +0530987 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530988 beq kvmppc_interrupt_pr
989#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +1000990 /* We're now back in the host but in guest MMU context */
991 li r9, KVM_GUEST_MODE_HOST_HV
992 stb r9, HSTATE_IN_GUEST(r13)
993
Paul Mackerrasde56a942011-06-29 00:21:34 +0000994 ld r9, HSTATE_KVM_VCPU(r13)
995
996 /* Save registers */
997
Michael Neulingc75df6f2012-06-25 13:33:10 +0000998 std r0, VCPU_GPR(R0)(r9)
999 std r1, VCPU_GPR(R1)(r9)
1000 std r2, VCPU_GPR(R2)(r9)
1001 std r3, VCPU_GPR(R3)(r9)
1002 std r4, VCPU_GPR(R4)(r9)
1003 std r5, VCPU_GPR(R5)(r9)
1004 std r6, VCPU_GPR(R6)(r9)
1005 std r7, VCPU_GPR(R7)(r9)
1006 std r8, VCPU_GPR(R8)(r9)
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301007 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001008 std r0, VCPU_GPR(R9)(r9)
1009 std r10, VCPU_GPR(R10)(r9)
1010 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001011 ld r3, HSTATE_SCRATCH0(r13)
1012 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001013 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001014 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001015BEGIN_FTR_SECTION
1016 ld r3, HSTATE_CFAR(r13)
1017 std r3, VCPU_CFAR(r9)
1018END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001019BEGIN_FTR_SECTION
1020 ld r4, HSTATE_PPR(r13)
1021 std r4, VCPU_PPR(r9)
1022END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001023
1024 /* Restore R1/R2 so we can handle faults */
1025 ld r1, HSTATE_HOST_R1(r13)
1026 ld r2, PACATOC(r13)
1027
1028 mfspr r10, SPRN_SRR0
1029 mfspr r11, SPRN_SRR1
1030 std r10, VCPU_SRR0(r9)
1031 std r11, VCPU_SRR1(r9)
1032 andi. r0, r12, 2 /* need to read HSRR0/1? */
1033 beq 1f
1034 mfspr r10, SPRN_HSRR0
1035 mfspr r11, SPRN_HSRR1
1036 clrrdi r12, r12, 2
10371: std r10, VCPU_PC(r9)
1038 std r11, VCPU_MSR(r9)
1039
1040 GET_SCRATCH0(r3)
1041 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001042 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001043 std r4, VCPU_LR(r9)
1044
Paul Mackerrasde56a942011-06-29 00:21:34 +00001045 stw r12,VCPU_TRAP(r9)
1046
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001047#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1048 addi r3, r9, VCPU_TB_RMINTR
1049 mr r4, r9
1050 bl kvmhv_accumulate_time
1051 ld r5, VCPU_GPR(R5)(r9)
1052 ld r6, VCPU_GPR(R6)(r9)
1053 ld r7, VCPU_GPR(R7)(r9)
1054 ld r8, VCPU_GPR(R8)(r9)
1055#endif
1056
Paul Mackerras4a157d62014-12-03 13:30:39 +11001057 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001058 if this is an HEI (HV emulation interrupt, e40) */
1059 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001060 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001061 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1062 bne 11f
1063 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100106411: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001065
1066 /* these are volatile across C function calls */
1067 mfctr r3
1068 mfxer r4
1069 std r3, VCPU_CTR(r9)
1070 stw r4, VCPU_XER(r9)
1071
Paul Mackerras697d3892011-12-12 12:36:37 +00001072 /* If this is a page table miss then see if it's theirs or ours */
1073 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1074 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001075 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1076 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001077
Paul Mackerrasde56a942011-06-29 00:21:34 +00001078 /* See if this is a leftover HDEC interrupt */
1079 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1080 bne 2f
1081 mfspr r3,SPRN_HDEC
1082 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001083 mr r4,r9
1084 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000010852:
Paul Mackerras697d3892011-12-12 12:36:37 +00001086 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001087 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1088 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001089
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001090 /* External interrupt ? */
1091 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001092 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001093
1094 /* External interrupt, first check for host_ipi. If this is
1095 * set, we know the host wants us out so let's do it now
1096 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001097 bl kvmppc_read_intr
1098 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001099 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001100
Paul Mackerras4619ac82013-04-17 20:31:41 +00001101 /* Check if any CPU is heading out to the host, if so head out too */
1102 ld r5, HSTATE_KVM_VCORE(r13)
1103 lwz r0, VCORE_ENTRY_EXIT(r5)
1104 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001105 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001106 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001107
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001108guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001109 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001110 mfdar r6
1111 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001112 std r6, VCPU_DAR(r9)
1113 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001114 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001115 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1116 beq 6f
Paul Mackerras697d3892011-12-12 12:36:37 +00001117 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001118 stw r7, VCPU_FAULT_DSISR(r9)
1119
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001120 /* See if it is a machine check */
1121 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1122 beq machine_check_realmode
1123mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001124#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1125 addi r3, r9, VCPU_TB_RMEXIT
1126 mr r4, r9
1127 bl kvmhv_accumulate_time
1128#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001129
Paul Mackerrasde56a942011-06-29 00:21:34 +00001130 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras697d3892011-12-12 12:36:37 +000011316: mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001132 stw r6,VCPU_CTRL(r9)
1133 andi. r0,r6,1
1134 bne 4f
1135 ori r6,r6,1
1136 mtspr SPRN_CTRLT,r6
11374:
1138 /* Read the guest SLB and save it away */
1139 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1140 mtctr r0
1141 li r6,0
1142 addi r7,r9,VCPU_SLB
1143 li r5,0
11441: slbmfee r8,r6
1145 andis. r0,r8,SLB_ESID_V@h
1146 beq 2f
1147 add r8,r8,r6 /* put index in */
1148 slbmfev r3,r6
1149 std r8,VCPU_SLB_E(r7)
1150 std r3,VCPU_SLB_V(r7)
1151 addi r7,r7,VCPU_SLB_SIZE
1152 addi r5,r5,1
11532: addi r6,r6,1
1154 bdnz 1b
1155 stw r5,VCPU_SLB_MAX(r9)
1156
1157 /*
1158 * Save the guest PURR/SPURR
1159 */
1160 mfspr r5,SPRN_PURR
1161 mfspr r6,SPRN_SPURR
1162 ld r7,VCPU_PURR(r9)
1163 ld r8,VCPU_SPURR(r9)
1164 std r5,VCPU_PURR(r9)
1165 std r6,VCPU_SPURR(r9)
1166 subf r5,r7,r5
1167 subf r6,r8,r6
1168
1169 /*
1170 * Restore host PURR/SPURR and add guest times
1171 * so that the time in the guest gets accounted.
1172 */
1173 ld r3,HSTATE_PURR(r13)
1174 ld r4,HSTATE_SPURR(r13)
1175 add r3,r3,r5
1176 add r4,r4,r6
1177 mtspr SPRN_PURR,r3
1178 mtspr SPRN_SPURR,r4
1179
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001180 /* Save DEC */
1181 mfspr r5,SPRN_DEC
1182 mftb r6
1183 extsw r5,r5
1184 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001185 /* r5 is a guest timebase value here, convert to host TB */
1186 ld r3,HSTATE_KVM_VCORE(r13)
1187 ld r4,VCORE_TB_OFFSET(r3)
1188 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001189 std r5,VCPU_DEC_EXPIRES(r9)
1190
Michael Neulingb005255e2014-01-08 21:25:21 +11001191BEGIN_FTR_SECTION
1192 b 8f
1193END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001194 /* Save POWER8-specific registers */
1195 mfspr r5, SPRN_IAMR
1196 mfspr r6, SPRN_PSPB
1197 mfspr r7, SPRN_FSCR
1198 std r5, VCPU_IAMR(r9)
1199 stw r6, VCPU_PSPB(r9)
1200 std r7, VCPU_FSCR(r9)
1201 mfspr r5, SPRN_IC
1202 mfspr r6, SPRN_VTB
1203 mfspr r7, SPRN_TAR
1204 std r5, VCPU_IC(r9)
1205 std r6, VCPU_VTB(r9)
1206 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001207 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001208 std r8, VCPU_EBBHR(r9)
1209 mfspr r5, SPRN_EBBRR
1210 mfspr r6, SPRN_BESCR
1211 mfspr r7, SPRN_CSIGR
1212 mfspr r8, SPRN_TACR
1213 std r5, VCPU_EBBRR(r9)
1214 std r6, VCPU_BESCR(r9)
1215 std r7, VCPU_CSIGR(r9)
1216 std r8, VCPU_TACR(r9)
1217 mfspr r5, SPRN_TCSCR
1218 mfspr r6, SPRN_ACOP
1219 mfspr r7, SPRN_PID
1220 mfspr r8, SPRN_WORT
1221 std r5, VCPU_TCSCR(r9)
1222 std r6, VCPU_ACOP(r9)
1223 stw r7, VCPU_GUEST_PID(r9)
1224 std r8, VCPU_WORT(r9)
12258:
1226
Paul Mackerrasde56a942011-06-29 00:21:34 +00001227 /* Save and reset AMR and UAMOR before turning on the MMU */
1228 mfspr r5,SPRN_AMR
1229 mfspr r6,SPRN_UAMOR
1230 std r5,VCPU_AMR(r9)
1231 std r6,VCPU_UAMOR(r9)
1232 li r6,0
1233 mtspr SPRN_AMR,r6
1234
Paul Mackerrasde56a942011-06-29 00:21:34 +00001235 /* Switch DSCR back to host value */
1236 mfspr r8, SPRN_DSCR
1237 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001238 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001239 mtspr SPRN_DSCR, r7
1240
1241 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001242 std r14, VCPU_GPR(R14)(r9)
1243 std r15, VCPU_GPR(R15)(r9)
1244 std r16, VCPU_GPR(R16)(r9)
1245 std r17, VCPU_GPR(R17)(r9)
1246 std r18, VCPU_GPR(R18)(r9)
1247 std r19, VCPU_GPR(R19)(r9)
1248 std r20, VCPU_GPR(R20)(r9)
1249 std r21, VCPU_GPR(R21)(r9)
1250 std r22, VCPU_GPR(R22)(r9)
1251 std r23, VCPU_GPR(R23)(r9)
1252 std r24, VCPU_GPR(R24)(r9)
1253 std r25, VCPU_GPR(R25)(r9)
1254 std r26, VCPU_GPR(R26)(r9)
1255 std r27, VCPU_GPR(R27)(r9)
1256 std r28, VCPU_GPR(R28)(r9)
1257 std r29, VCPU_GPR(R29)(r9)
1258 std r30, VCPU_GPR(R30)(r9)
1259 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001260
1261 /* Save SPRGs */
1262 mfspr r3, SPRN_SPRG0
1263 mfspr r4, SPRN_SPRG1
1264 mfspr r5, SPRN_SPRG2
1265 mfspr r6, SPRN_SPRG3
1266 std r3, VCPU_SPRG0(r9)
1267 std r4, VCPU_SPRG1(r9)
1268 std r5, VCPU_SPRG2(r9)
1269 std r6, VCPU_SPRG3(r9)
1270
Paul Mackerras89436332012-03-02 01:38:23 +00001271 /* save FP state */
1272 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001273 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001274
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001275#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1276BEGIN_FTR_SECTION
1277 b 2f
1278END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1279 /* Turn on TM. */
1280 mfmsr r8
1281 li r0, 1
1282 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1283 mtmsrd r8
1284
1285 ld r5, VCPU_MSR(r9)
1286 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1287 beq 1f /* TM not active in guest. */
1288
1289 li r3, TM_CAUSE_KVM_RESCHED
1290
1291 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1292 li r5, 0
1293 mtmsrd r5, 1
1294
1295 /* All GPRs are volatile at this point. */
1296 TRECLAIM(R3)
1297
1298 /* Temporarily store r13 and r9 so we have some regs to play with */
1299 SET_SCRATCH0(r13)
1300 GET_PACA(r13)
1301 std r9, PACATMSCRATCH(r13)
1302 ld r9, HSTATE_KVM_VCPU(r13)
1303
1304 /* Get a few more GPRs free. */
1305 std r29, VCPU_GPRS_TM(29)(r9)
1306 std r30, VCPU_GPRS_TM(30)(r9)
1307 std r31, VCPU_GPRS_TM(31)(r9)
1308
1309 /* Save away PPR and DSCR soon so don't run with user values. */
1310 mfspr r31, SPRN_PPR
1311 HMT_MEDIUM
1312 mfspr r30, SPRN_DSCR
1313 ld r29, HSTATE_DSCR(r13)
1314 mtspr SPRN_DSCR, r29
1315
1316 /* Save all but r9, r13 & r29-r31 */
1317 reg = 0
1318 .rept 29
1319 .if (reg != 9) && (reg != 13)
1320 std reg, VCPU_GPRS_TM(reg)(r9)
1321 .endif
1322 reg = reg + 1
1323 .endr
1324 /* ... now save r13 */
1325 GET_SCRATCH0(r4)
1326 std r4, VCPU_GPRS_TM(13)(r9)
1327 /* ... and save r9 */
1328 ld r4, PACATMSCRATCH(r13)
1329 std r4, VCPU_GPRS_TM(9)(r9)
1330
1331 /* Reload stack pointer and TOC. */
1332 ld r1, HSTATE_HOST_R1(r13)
1333 ld r2, PACATOC(r13)
1334
1335 /* Set MSR RI now we have r1 and r13 back. */
1336 li r5, MSR_RI
1337 mtmsrd r5, 1
1338
1339 /* Save away checkpinted SPRs. */
1340 std r31, VCPU_PPR_TM(r9)
1341 std r30, VCPU_DSCR_TM(r9)
1342 mflr r5
1343 mfcr r6
1344 mfctr r7
1345 mfspr r8, SPRN_AMR
1346 mfspr r10, SPRN_TAR
1347 std r5, VCPU_LR_TM(r9)
1348 stw r6, VCPU_CR_TM(r9)
1349 std r7, VCPU_CTR_TM(r9)
1350 std r8, VCPU_AMR_TM(r9)
1351 std r10, VCPU_TAR_TM(r9)
1352
1353 /* Restore r12 as trap number. */
1354 lwz r12, VCPU_TRAP(r9)
1355
1356 /* Save FP/VSX. */
1357 addi r3, r9, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001358 bl store_fp_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001359 addi r3, r9, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001360 bl store_vr_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001361 mfspr r6, SPRN_VRSAVE
1362 stw r6, VCPU_VRSAVE_TM(r9)
13631:
1364 /*
1365 * We need to save these SPRs after the treclaim so that the software
1366 * error code is recorded correctly in the TEXASR. Also the user may
1367 * change these outside of a transaction, so they must always be
1368 * context switched.
1369 */
1370 mfspr r5, SPRN_TFHAR
1371 mfspr r6, SPRN_TFIAR
1372 mfspr r7, SPRN_TEXASR
1373 std r5, VCPU_TFHAR(r9)
1374 std r6, VCPU_TFIAR(r9)
1375 std r7, VCPU_TEXASR(r9)
13762:
1377#endif
1378
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001379 /* Increment yield count if they have a VPA */
1380 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1381 cmpdi r8, 0
1382 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001383 li r4, LPPACA_YIELDCOUNT
1384 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001385 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001386 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001387 li r3, 1
1388 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000138925:
1390 /* Save PMU registers if requested */
1391 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001392BEGIN_FTR_SECTION
1393 /*
1394 * POWER8 seems to have a hardware bug where setting
1395 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1396 * when some counters are already negative doesn't seem
1397 * to cause a performance monitor alert (and hence interrupt).
1398 * The effect of this is that when saving the PMU state,
1399 * if there is no PMU alert pending when we read MMCR0
1400 * before freezing the counters, but one becomes pending
1401 * before we read the counters, we lose it.
1402 * To work around this, we need a way to freeze the counters
1403 * before reading MMCR0. Normally, freezing the counters
1404 * is done by writing MMCR0 (to set MMCR0[FC]) which
1405 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1406 * we can also freeze the counters using MMCR2, by writing
1407 * 1s to all the counter freeze condition bits (there are
1408 * 9 bits each for 6 counters).
1409 */
1410 li r3, -1 /* set all freeze bits */
1411 clrrdi r3, r3, 10
1412 mfspr r10, SPRN_MMCR2
1413 mtspr SPRN_MMCR2, r3
1414 isync
1415END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001416 li r3, 1
1417 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1418 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1419 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001420 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001421 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001422 li r7, 0
1423 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001424 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001425 beq 21f /* if no VPA, save PMU stuff anyway */
1426 lbz r7, LPPACA_PMCINUSE(r8)
1427 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1428 bne 21f
1429 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1430 b 22f
143121: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001432 mfspr r7, SPRN_SIAR
1433 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001434 std r4, VCPU_MMCR(r9)
1435 std r5, VCPU_MMCR + 8(r9)
1436 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001437BEGIN_FTR_SECTION
1438 std r10, VCPU_MMCR + 24(r9)
1439END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001440 std r7, VCPU_SIAR(r9)
1441 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001442 mfspr r3, SPRN_PMC1
1443 mfspr r4, SPRN_PMC2
1444 mfspr r5, SPRN_PMC3
1445 mfspr r6, SPRN_PMC4
1446 mfspr r7, SPRN_PMC5
1447 mfspr r8, SPRN_PMC6
1448 stw r3, VCPU_PMC(r9)
1449 stw r4, VCPU_PMC + 4(r9)
1450 stw r5, VCPU_PMC + 8(r9)
1451 stw r6, VCPU_PMC + 12(r9)
1452 stw r7, VCPU_PMC + 16(r9)
1453 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001454BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001455 mfspr r5, SPRN_SIER
1456 mfspr r6, SPRN_SPMC1
1457 mfspr r7, SPRN_SPMC2
1458 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001459 std r5, VCPU_SIER(r9)
1460 stw r6, VCPU_PMC + 24(r9)
1461 stw r7, VCPU_PMC + 28(r9)
1462 std r8, VCPU_MMCR + 32(r9)
1463 lis r4, 0x8000
1464 mtspr SPRN_MMCRS, r4
1465END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000146622:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001467 /* Clear out SLB */
1468 li r5,0
1469 slbmte r5,r5
1470 slbia
1471 ptesync
1472
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001473#ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1474hdec_soon:
1475#endif
1476kvmhv_do_exit: /* r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001477 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001478 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001479 * We don't have to lock against tlbies but we do
1480 * have to coordinate the hardware threads.
1481 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11001482 /* Set our bit in the threads-exiting-guest map in the 0xff00
1483 bits of vcore->entry_exit_map */
1484 ld r5, HSTATE_KVM_VCORE(r13)
1485 lbz r4, HSTATE_PTID(r13)
1486 li r7, 0x100
1487 sld r7, r7, r4
1488 addi r6, r5, VCORE_ENTRY_EXIT
148941: lwarx r3, 0, r6
1490 or r0, r3, r7
1491 stwcx. r0, 0, r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001492 bne 41b
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11001493 isync /* order stwcx. vs. reading napping_threads */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001494
1495 /*
1496 * At this point we have an interrupt that we have to pass
1497 * up to the kernel or qemu; we can't handle it in real mode.
1498 * Thus we have to do a partition switch, so we have to
1499 * collect the other threads, if we are the first thread
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11001500 * to take an interrupt. To do this, we send a message or
1501 * IPI to all the threads that have their bit set in the entry
1502 * map in vcore->entry_exit_map (other than ourselves).
Paul Mackerrasde56a942011-06-29 00:21:34 +00001503 * However, we don't need to bother if this is an HDEC
1504 * interrupt, since the other threads will already be on their
1505 * way here in that case.
1506 */
1507 cmpwi r3,0x100 /* Are we the first here? */
1508 bge 43f
Paul Mackerrasde56a942011-06-29 00:21:34 +00001509 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11001510 beq 43f
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11001511
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11001512 srwi r0,r7,8
Paul Mackerrasde56a942011-06-29 00:21:34 +00001513 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1514 beq 43f
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11001515 /* Order entry/exit update vs. IPIs */
1516 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001517 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1518 subf r6,r4,r13
151942: andi. r0,r3,1
1520 beq 44f
1521 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1522 li r0,IPI_PRIORITY
1523 li r7,XICS_MFRR
1524 stbcix r0,r7,r8 /* trigger the IPI */
152544: srdi. r3,r3,1
1526 addi r6,r6,PACA_SIZE
1527 bne 42b
1528
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001529#ifndef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001530secondary_too_late:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001531#endif
1532kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001533 /* Secondary threads wait for primary to do partition switch */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100153443: ld r5,HSTATE_KVM_VCORE(r13)
1535 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1536 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001537 cmpwi r3,0
1538 beq 15f
1539 HMT_LOW
154013: lbz r3,VCORE_IN_GUEST(r5)
1541 cmpwi r3,0
1542 bne 13b
1543 HMT_MEDIUM
1544 b 16f
1545
1546 /* Primary thread waits for all the secondaries to exit guest */
154715: lwz r3,VCORE_ENTRY_EXIT(r5)
1548 srwi r0,r3,8
1549 clrldi r3,r3,56
1550 cmpw r3,r0
1551 bne 15b
1552 isync
1553
1554 /* Primary thread switches back to host partition */
1555 ld r6,KVM_HOST_SDR1(r4)
1556 lwz r7,KVM_HOST_LPID(r4)
1557 li r8,LPID_RSVD /* switch to reserved LPID */
1558 mtspr SPRN_LPID,r8
1559 ptesync
1560 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1561 mtspr SPRN_LPID,r7
1562 isync
1563
Michael Neulingb005255e2014-01-08 21:25:21 +11001564BEGIN_FTR_SECTION
1565 /* DPDES is shared between threads */
1566 mfspr r7, SPRN_DPDES
1567 std r7, VCORE_DPDES(r5)
1568 /* clear DPDES so we don't get guest doorbells in the host */
1569 li r8, 0
1570 mtspr SPRN_DPDES, r8
1571END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1572
Paul Mackerrasde56a942011-06-29 00:21:34 +00001573 /* Subtract timebase offset from timebase */
1574 ld r8,VCORE_TB_OFFSET(r5)
1575 cmpdi r8,0
1576 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001577 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001578 subf r8,r8,r6
1579 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1580 mftb r7 /* check if lower 24 bits overflowed */
1581 clrldi r6,r6,40
1582 clrldi r7,r7,40
1583 cmpld r7,r6
1584 bge 17f
1585 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1586 mtspr SPRN_TBU40,r8
1587
1588 /* Reset PCR */
158917: ld r0, VCORE_PCR(r5)
1590 cmpdi r0, 0
1591 beq 18f
1592 li r0, 0
1593 mtspr SPRN_PCR, r0
159418:
1595 /* Signal secondary CPUs to continue */
1596 stb r0,VCORE_IN_GUEST(r5)
1597 lis r8,0x7fff /* MAX_INT@h */
1598 mtspr SPRN_HDEC,r8
1599
160016: ld r8,KVM_HOST_LPCR(r4)
1601 mtspr SPRN_LPCR,r8
1602 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001603
1604 /* load host SLB entries */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001605 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001606
1607 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001608 li r3, SLBSHADOW_SAVEAREA
1609 LDX_BE r5, r8, r3
1610 addi r3, r3, 8
1611 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001612 andis. r7,r5,SLB_ESID_V@h
1613 beq 1f
1614 slbmte r6,r5
16151: addi r8,r8,16
1616 .endr
1617
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001618#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1619 /* Finish timing, if we have a vcpu */
1620 ld r4, HSTATE_KVM_VCPU(r13)
1621 cmpdi r4, 0
1622 li r3, 0
1623 beq 2f
1624 bl kvmhv_accumulate_time
16252:
1626#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001627 /* Unset guest mode */
1628 li r0, KVM_GUEST_MODE_NONE
1629 stb r0, HSTATE_IN_GUEST(r13)
1630
Paul Mackerras218309b2013-09-06 13:23:44 +10001631 ld r0, 112+PPC_LR_STKOFF(r1)
1632 addi r1, r1, 112
1633 mtlr r0
1634 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001635
Paul Mackerras697d3892011-12-12 12:36:37 +00001636/*
1637 * Check whether an HDSI is an HPTE not found fault or something else.
1638 * If it is an HPTE not found fault that is due to the guest accessing
1639 * a page that they have mapped but which we have paged out, then
1640 * we continue on with the guest exit path. In all other cases,
1641 * reflect the HDSI to the guest as a DSI.
1642 */
1643kvmppc_hdsi:
1644 mfspr r4, SPRN_HDAR
1645 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001646 /* HPTE not found fault or protection fault? */
1647 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001648 beq 1f /* if not, send it to the guest */
1649 andi. r0, r11, MSR_DR /* data relocation enabled? */
1650 beq 3f
1651 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001652 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras697d3892011-12-12 12:36:37 +00001653 bne 1f /* if no SLB entry found */
16544: std r4, VCPU_FAULT_DAR(r9)
1655 stw r6, VCPU_FAULT_DSISR(r9)
1656
1657 /* Search the hash table. */
1658 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001659 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001660 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001661 ld r9, HSTATE_KVM_VCPU(r13)
1662 ld r10, VCPU_PC(r9)
1663 ld r11, VCPU_MSR(r9)
1664 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1665 cmpdi r3, 0 /* retry the instruction */
1666 beq 6f
1667 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001668 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001669 cmpdi r3, -2 /* MMIO emulation; need instr word */
1670 beq 2f
1671
1672 /* Synthesize a DSI for the guest */
1673 ld r4, VCPU_FAULT_DAR(r9)
1674 mr r6, r3
16751: mtspr SPRN_DAR, r4
1676 mtspr SPRN_DSISR, r6
1677 mtspr SPRN_SRR0, r10
1678 mtspr SPRN_SRR1, r11
1679 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001680 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001681fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000016826: ld r7, VCPU_CTR(r9)
1683 lwz r8, VCPU_XER(r9)
1684 mtctr r7
1685 mtxer r8
1686 mr r4, r9
1687 b fast_guest_return
1688
16893: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1690 ld r5, KVM_VRMA_SLB_V(r5)
1691 b 4b
1692
1693 /* If this is for emulated MMIO, load the instruction word */
16942: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1695
1696 /* Set guest mode to 'jump over instruction' so if lwz faults
1697 * we'll just continue at the next IP. */
1698 li r0, KVM_GUEST_MODE_SKIP
1699 stb r0, HSTATE_IN_GUEST(r13)
1700
1701 /* Do the access with MSR:DR enabled */
1702 mfmsr r3
1703 ori r4, r3, MSR_DR /* Enable paging for data */
1704 mtmsrd r4
1705 lwz r8, 0(r10)
1706 mtmsrd r3
1707
1708 /* Store the result */
1709 stw r8, VCPU_LAST_INST(r9)
1710
1711 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001712 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001713 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001714 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001715
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001716/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001717 * Similarly for an HISI, reflect it to the guest as an ISI unless
1718 * it is an HPTE not found fault for a page that we have paged out.
1719 */
1720kvmppc_hisi:
1721 andis. r0, r11, SRR1_ISI_NOPT@h
1722 beq 1f
1723 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1724 beq 3f
1725 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001726 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001727 bne 1f /* if no SLB entry found */
17284:
1729 /* Search the hash table. */
1730 mr r3, r9 /* vcpu pointer */
1731 mr r4, r10
1732 mr r6, r11
1733 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001734 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001735 ld r9, HSTATE_KVM_VCPU(r13)
1736 ld r10, VCPU_PC(r9)
1737 ld r11, VCPU_MSR(r9)
1738 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1739 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001740 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001741 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001742 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001743
1744 /* Synthesize an ISI for the guest */
1745 mr r11, r3
17461: mtspr SPRN_SRR0, r10
1747 mtspr SPRN_SRR1, r11
1748 li r10, BOOK3S_INTERRUPT_INST_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001749 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001750 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001751
17523: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1753 ld r5, KVM_VRMA_SLB_V(r6)
1754 b 4b
1755
1756/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001757 * Try to handle an hcall in real mode.
1758 * Returns to the guest if we handle it, or continues on up to
1759 * the kernel if we can't (i.e. if we don't have a handler for
1760 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001761 *
1762 * r5 - r8 contain hcall args,
1763 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001764 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001765hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001766 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001767 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001768 /* sc 1 from userspace - reflect to guest syscall */
1769 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001770 clrrdi r3,r3,2
1771 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001772 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001773 /* See if this hcall is enabled for in-kernel handling */
1774 ld r4, VCPU_KVM(r9)
1775 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1776 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1777 add r4, r4, r0
1778 ld r0, KVM_ENABLED_HCALLS(r4)
1779 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1780 srd r0, r0, r4
1781 andi. r0, r0, 1
1782 beq guest_exit_cont
1783 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001784 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001785 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001786 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001787 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001788 add r12,r3,r4
1789 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001790 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001791 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001792 bctrl
1793 cmpdi r3,H_TOO_HARD
1794 beq hcall_real_fallback
1795 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001796 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001797 ld r10,VCPU_PC(r4)
1798 ld r11,VCPU_MSR(r4)
1799 b fast_guest_return
1800
Liu Ping Fan27025a62013-11-19 14:12:48 +08001801sc_1_fast_return:
1802 mtspr SPRN_SRR0,r10
1803 mtspr SPRN_SRR1,r11
1804 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001805 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001806 mr r4,r9
1807 b fast_guest_return
1808
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001809 /* We've attempted a real mode hcall, but it's punted it back
1810 * to userspace. We need to restore some clobbered volatiles
1811 * before resuming the pass-it-to-qemu path */
1812hcall_real_fallback:
1813 li r12,BOOK3S_INTERRUPT_SYSCALL
1814 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001815
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001816 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001817
1818 .globl hcall_real_table
1819hcall_real_table:
1820 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001821 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1822 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1823 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001824 .long 0 /* 0x10 - H_CLEAR_MOD */
1825 .long 0 /* 0x14 - H_CLEAR_REF */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001826 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1827 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1828 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001829 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001830 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001831 .long 0 /* 0x2c */
1832 .long 0 /* 0x30 */
1833 .long 0 /* 0x34 */
1834 .long 0 /* 0x38 */
1835 .long 0 /* 0x3c */
1836 .long 0 /* 0x40 */
1837 .long 0 /* 0x44 */
1838 .long 0 /* 0x48 */
1839 .long 0 /* 0x4c */
1840 .long 0 /* 0x50 */
1841 .long 0 /* 0x54 */
1842 .long 0 /* 0x58 */
1843 .long 0 /* 0x5c */
1844 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001845#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001846 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1847 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1848 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001849 .long 0 /* 0x70 - H_IPOLL */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001850 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001851#else
1852 .long 0 /* 0x64 - H_EOI */
1853 .long 0 /* 0x68 - H_CPPR */
1854 .long 0 /* 0x6c - H_IPI */
1855 .long 0 /* 0x70 - H_IPOLL */
1856 .long 0 /* 0x74 - H_XIRR */
1857#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001858 .long 0 /* 0x78 */
1859 .long 0 /* 0x7c */
1860 .long 0 /* 0x80 */
1861 .long 0 /* 0x84 */
1862 .long 0 /* 0x88 */
1863 .long 0 /* 0x8c */
1864 .long 0 /* 0x90 */
1865 .long 0 /* 0x94 */
1866 .long 0 /* 0x98 */
1867 .long 0 /* 0x9c */
1868 .long 0 /* 0xa0 */
1869 .long 0 /* 0xa4 */
1870 .long 0 /* 0xa8 */
1871 .long 0 /* 0xac */
1872 .long 0 /* 0xb0 */
1873 .long 0 /* 0xb4 */
1874 .long 0 /* 0xb8 */
1875 .long 0 /* 0xbc */
1876 .long 0 /* 0xc0 */
1877 .long 0 /* 0xc4 */
1878 .long 0 /* 0xc8 */
1879 .long 0 /* 0xcc */
1880 .long 0 /* 0xd0 */
1881 .long 0 /* 0xd4 */
1882 .long 0 /* 0xd8 */
1883 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001884 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11001885 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001886 .long 0 /* 0xe8 */
1887 .long 0 /* 0xec */
1888 .long 0 /* 0xf0 */
1889 .long 0 /* 0xf4 */
1890 .long 0 /* 0xf8 */
1891 .long 0 /* 0xfc */
1892 .long 0 /* 0x100 */
1893 .long 0 /* 0x104 */
1894 .long 0 /* 0x108 */
1895 .long 0 /* 0x10c */
1896 .long 0 /* 0x110 */
1897 .long 0 /* 0x114 */
1898 .long 0 /* 0x118 */
1899 .long 0 /* 0x11c */
1900 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001901 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001902 .long 0 /* 0x128 */
1903 .long 0 /* 0x12c */
1904 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001905 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11001906 .long 0 /* 0x138 */
1907 .long 0 /* 0x13c */
1908 .long 0 /* 0x140 */
1909 .long 0 /* 0x144 */
1910 .long 0 /* 0x148 */
1911 .long 0 /* 0x14c */
1912 .long 0 /* 0x150 */
1913 .long 0 /* 0x154 */
1914 .long 0 /* 0x158 */
1915 .long 0 /* 0x15c */
1916 .long 0 /* 0x160 */
1917 .long 0 /* 0x164 */
1918 .long 0 /* 0x168 */
1919 .long 0 /* 0x16c */
1920 .long 0 /* 0x170 */
1921 .long 0 /* 0x174 */
1922 .long 0 /* 0x178 */
1923 .long 0 /* 0x17c */
1924 .long 0 /* 0x180 */
1925 .long 0 /* 0x184 */
1926 .long 0 /* 0x188 */
1927 .long 0 /* 0x18c */
1928 .long 0 /* 0x190 */
1929 .long 0 /* 0x194 */
1930 .long 0 /* 0x198 */
1931 .long 0 /* 0x19c */
1932 .long 0 /* 0x1a0 */
1933 .long 0 /* 0x1a4 */
1934 .long 0 /* 0x1a8 */
1935 .long 0 /* 0x1ac */
1936 .long 0 /* 0x1b0 */
1937 .long 0 /* 0x1b4 */
1938 .long 0 /* 0x1b8 */
1939 .long 0 /* 0x1bc */
1940 .long 0 /* 0x1c0 */
1941 .long 0 /* 0x1c4 */
1942 .long 0 /* 0x1c8 */
1943 .long 0 /* 0x1cc */
1944 .long 0 /* 0x1d0 */
1945 .long 0 /* 0x1d4 */
1946 .long 0 /* 0x1d8 */
1947 .long 0 /* 0x1dc */
1948 .long 0 /* 0x1e0 */
1949 .long 0 /* 0x1e4 */
1950 .long 0 /* 0x1e8 */
1951 .long 0 /* 0x1ec */
1952 .long 0 /* 0x1f0 */
1953 .long 0 /* 0x1f4 */
1954 .long 0 /* 0x1f8 */
1955 .long 0 /* 0x1fc */
1956 .long 0 /* 0x200 */
1957 .long 0 /* 0x204 */
1958 .long 0 /* 0x208 */
1959 .long 0 /* 0x20c */
1960 .long 0 /* 0x210 */
1961 .long 0 /* 0x214 */
1962 .long 0 /* 0x218 */
1963 .long 0 /* 0x21c */
1964 .long 0 /* 0x220 */
1965 .long 0 /* 0x224 */
1966 .long 0 /* 0x228 */
1967 .long 0 /* 0x22c */
1968 .long 0 /* 0x230 */
1969 .long 0 /* 0x234 */
1970 .long 0 /* 0x238 */
1971 .long 0 /* 0x23c */
1972 .long 0 /* 0x240 */
1973 .long 0 /* 0x244 */
1974 .long 0 /* 0x248 */
1975 .long 0 /* 0x24c */
1976 .long 0 /* 0x250 */
1977 .long 0 /* 0x254 */
1978 .long 0 /* 0x258 */
1979 .long 0 /* 0x25c */
1980 .long 0 /* 0x260 */
1981 .long 0 /* 0x264 */
1982 .long 0 /* 0x268 */
1983 .long 0 /* 0x26c */
1984 .long 0 /* 0x270 */
1985 .long 0 /* 0x274 */
1986 .long 0 /* 0x278 */
1987 .long 0 /* 0x27c */
1988 .long 0 /* 0x280 */
1989 .long 0 /* 0x284 */
1990 .long 0 /* 0x288 */
1991 .long 0 /* 0x28c */
1992 .long 0 /* 0x290 */
1993 .long 0 /* 0x294 */
1994 .long 0 /* 0x298 */
1995 .long 0 /* 0x29c */
1996 .long 0 /* 0x2a0 */
1997 .long 0 /* 0x2a4 */
1998 .long 0 /* 0x2a8 */
1999 .long 0 /* 0x2ac */
2000 .long 0 /* 0x2b0 */
2001 .long 0 /* 0x2b4 */
2002 .long 0 /* 0x2b8 */
2003 .long 0 /* 0x2bc */
2004 .long 0 /* 0x2c0 */
2005 .long 0 /* 0x2c4 */
2006 .long 0 /* 0x2c8 */
2007 .long 0 /* 0x2cc */
2008 .long 0 /* 0x2d0 */
2009 .long 0 /* 0x2d4 */
2010 .long 0 /* 0x2d8 */
2011 .long 0 /* 0x2dc */
2012 .long 0 /* 0x2e0 */
2013 .long 0 /* 0x2e4 */
2014 .long 0 /* 0x2e8 */
2015 .long 0 /* 0x2ec */
2016 .long 0 /* 0x2f0 */
2017 .long 0 /* 0x2f4 */
2018 .long 0 /* 0x2f8 */
2019 .long 0 /* 0x2fc */
2020 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002021 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002022hcall_real_table_end:
2023
Paul Mackerras8563bf52014-01-08 21:25:29 +11002024_GLOBAL(kvmppc_h_set_xdabr)
2025 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2026 beq 6f
2027 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2028 andc. r0, r5, r0
2029 beq 3f
20306: li r3, H_PARAMETER
2031 blr
2032
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002033_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002034 li r5, DABRX_USER | DABRX_KERNEL
20353:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002036BEGIN_FTR_SECTION
2037 b 2f
2038END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002039 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002040 stw r5, VCPU_DABRX(r3)
2041 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002042 /* Work around P7 bug where DABR can get corrupted on mtspr */
20431: mtspr SPRN_DABR,r4
2044 mfspr r5, SPRN_DABR
2045 cmpd r4, r5
2046 bne 1b
2047 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002048 li r3,0
2049 blr
2050
Paul Mackerras8563bf52014-01-08 21:25:29 +11002051 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
20522: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2053 rlwimi r5, r4, 1, DAWRX_WT
2054 clrrdi r4, r4, 3
2055 std r4, VCPU_DAWR(r3)
2056 std r5, VCPU_DAWRX(r3)
2057 mtspr SPRN_DAWR, r4
2058 mtspr SPRN_DAWRX, r5
2059 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002060 blr
2061
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002062_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002063 ori r11,r11,MSR_EE
2064 std r11,VCPU_MSR(r3)
2065 li r0,1
2066 stb r0,VCPU_CEDED(r3)
2067 sync /* order setting ceded vs. testing prodded */
2068 lbz r5,VCPU_PRODDED(r3)
2069 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002070 bne kvm_cede_prodded
Paul Mackerras19ccb762011-07-23 17:42:46 +10002071 li r0,0 /* set trap to 0 to say hcall is handled */
2072 stw r0,VCPU_TRAP(r3)
2073 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002074 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002075
2076 /*
2077 * Set our bit in the bitmask of napping threads unless all the
2078 * other threads are already napping, in which case we send this
2079 * up to the host.
2080 */
2081 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002082 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002083 lwz r8,VCORE_ENTRY_EXIT(r5)
2084 clrldi r8,r8,56
2085 li r0,1
2086 sld r0,r0,r6
2087 addi r6,r5,VCORE_NAPPING_THREADS
208831: lwarx r4,0,r6
2089 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002090 cmpw r4,r8
2091 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002092 stwcx. r4,0,r6
2093 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002094 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002095 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002096 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002097 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002098 lwz r7,VCORE_ENTRY_EXIT(r5)
2099 cmpwi r7,0x100
2100 bge 33f /* another thread already exiting */
2101
2102/*
2103 * Although not specifically required by the architecture, POWER7
2104 * preserves the following registers in nap mode, even if an SMT mode
2105 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2106 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2107 */
2108 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002109 std r14, VCPU_GPR(R14)(r3)
2110 std r15, VCPU_GPR(R15)(r3)
2111 std r16, VCPU_GPR(R16)(r3)
2112 std r17, VCPU_GPR(R17)(r3)
2113 std r18, VCPU_GPR(R18)(r3)
2114 std r19, VCPU_GPR(R19)(r3)
2115 std r20, VCPU_GPR(R20)(r3)
2116 std r21, VCPU_GPR(R21)(r3)
2117 std r22, VCPU_GPR(R22)(r3)
2118 std r23, VCPU_GPR(R23)(r3)
2119 std r24, VCPU_GPR(R24)(r3)
2120 std r25, VCPU_GPR(R25)(r3)
2121 std r26, VCPU_GPR(R26)(r3)
2122 std r27, VCPU_GPR(R27)(r3)
2123 std r28, VCPU_GPR(R28)(r3)
2124 std r29, VCPU_GPR(R29)(r3)
2125 std r30, VCPU_GPR(R30)(r3)
2126 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002127
2128 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002129 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002130
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002131 /*
2132 * Set DEC to the smaller of DEC and HDEC, so that we wake
2133 * no later than the end of our timeslice (HDEC interrupts
2134 * don't wake us from nap).
2135 */
2136 mfspr r3, SPRN_DEC
2137 mfspr r4, SPRN_HDEC
2138 mftb r5
2139 cmpw r3, r4
2140 ble 67f
2141 mtspr SPRN_DEC, r4
214267:
2143 /* save expiry time of guest decrementer */
2144 extsw r3, r3
2145 add r3, r3, r5
2146 ld r4, HSTATE_KVM_VCPU(r13)
2147 ld r5, HSTATE_KVM_VCORE(r13)
2148 ld r6, VCORE_TB_OFFSET(r5)
2149 subf r3, r6, r3 /* convert to host TB value */
2150 std r3, VCPU_DEC_EXPIRES(r4)
2151
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002152#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2153 ld r4, HSTATE_KVM_VCPU(r13)
2154 addi r3, r4, VCPU_TB_CEDE
2155 bl kvmhv_accumulate_time
2156#endif
2157
Paul Mackerrasccc07772015-03-28 14:21:07 +11002158 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2159
Paul Mackerras19ccb762011-07-23 17:42:46 +10002160 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002161 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002162 * occurs, with PECE1 and PECE0 set in LPCR.
2163 * On POWER8, if we are ceding, also set PECEDP.
2164 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002165 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002166kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002167 mfspr r0, SPRN_CTRLF
2168 clrrdi r0, r0, 1
2169 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302170
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002171 li r0,1
2172 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002173 mfspr r5,SPRN_LPCR
2174 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002175BEGIN_FTR_SECTION
Paul Mackerrasccc07772015-03-28 14:21:07 +11002176 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002177END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002178 mtspr SPRN_LPCR,r5
2179 isync
2180 li r0, 0
2181 std r0, HSTATE_SCRATCH0(r13)
2182 ptesync
2183 ld r0, HSTATE_SCRATCH0(r13)
21841: cmpd r0, r0
2185 bne 1b
2186 nap
2187 b .
2188
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100218933: mr r4, r3
2190 li r3, 0
2191 li r12, 0
2192 b 34f
2193
Paul Mackerras19ccb762011-07-23 17:42:46 +10002194kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002195 /* get vcpu pointer */
2196 ld r4, HSTATE_KVM_VCPU(r13)
2197
Paul Mackerras19ccb762011-07-23 17:42:46 +10002198 /* Woken by external or decrementer interrupt */
2199 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002200
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002201#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2202 addi r3, r4, VCPU_TB_RMINTR
2203 bl kvmhv_accumulate_time
2204#endif
2205
Paul Mackerras19ccb762011-07-23 17:42:46 +10002206 /* load up FP state */
2207 bl kvmppc_load_fp
2208
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002209 /* Restore guest decrementer */
2210 ld r3, VCPU_DEC_EXPIRES(r4)
2211 ld r5, HSTATE_KVM_VCORE(r13)
2212 ld r6, VCORE_TB_OFFSET(r5)
2213 add r3, r3, r6 /* convert host TB to guest TB value */
2214 mftb r7
2215 subf r3, r7, r3
2216 mtspr SPRN_DEC, r3
2217
Paul Mackerras19ccb762011-07-23 17:42:46 +10002218 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002219 ld r14, VCPU_GPR(R14)(r4)
2220 ld r15, VCPU_GPR(R15)(r4)
2221 ld r16, VCPU_GPR(R16)(r4)
2222 ld r17, VCPU_GPR(R17)(r4)
2223 ld r18, VCPU_GPR(R18)(r4)
2224 ld r19, VCPU_GPR(R19)(r4)
2225 ld r20, VCPU_GPR(R20)(r4)
2226 ld r21, VCPU_GPR(R21)(r4)
2227 ld r22, VCPU_GPR(R22)(r4)
2228 ld r23, VCPU_GPR(R23)(r4)
2229 ld r24, VCPU_GPR(R24)(r4)
2230 ld r25, VCPU_GPR(R25)(r4)
2231 ld r26, VCPU_GPR(R26)(r4)
2232 ld r27, VCPU_GPR(R27)(r4)
2233 ld r28, VCPU_GPR(R28)(r4)
2234 ld r29, VCPU_GPR(R29)(r4)
2235 ld r30, VCPU_GPR(R30)(r4)
2236 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002237
2238 /* Check the wake reason in SRR1 to see why we got here */
2239 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002240
2241 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100224234: ld r5,HSTATE_KVM_VCORE(r13)
2243 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002244 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002245 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002246 addi r6,r5,VCORE_NAPPING_THREADS
224732: lwarx r7,0,r6
2248 andc r7,r7,r0
2249 stwcx. r7,0,r6
2250 bne 32b
2251 li r0,0
2252 stb r0,HSTATE_NAPPING(r13)
2253
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002254 /* See if the wake reason means we need to exit */
2255 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002256 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002257 cmpdi r3, 0
2258 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002259
Paul Mackerras19ccb762011-07-23 17:42:46 +10002260 /* see if any other thread is already exiting */
2261 lwz r0,VCORE_ENTRY_EXIT(r5)
2262 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002263 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002264
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002265 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002266
2267 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002268kvm_cede_prodded:
2269 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002270 stb r0,VCPU_PRODDED(r3)
2271 sync /* order testing prodded vs. clearing ceded */
2272 stb r0,VCPU_CEDED(r3)
2273 li r3,H_SUCCESS
2274 blr
2275
2276 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002277kvm_cede_exit:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002278 b hcall_real_fallback
Paul Mackerras19ccb762011-07-23 17:42:46 +10002279
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002280 /* Try to handle a machine check in real mode */
2281machine_check_realmode:
2282 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002283 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002284 nop
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302285 cmpdi r3, 0 /* Did we handle MCE ? */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002286 ld r9, HSTATE_KVM_VCPU(r13)
2287 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302288 /*
2289 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2290 * machine check interrupt (set HSRR0 to 0x200). And for handled
2291 * errors (no-fatal), just go back to guest execution with current
2292 * HSRR0 instead of exiting guest. This new approach will inject
2293 * machine check to guest for fatal error causing guest to crash.
2294 *
2295 * The old code used to return to host for unhandled errors which
2296 * was causing guest to hang with soft lockups inside guest and
2297 * makes it difficult to recover guest instance.
2298 */
2299 ld r10, VCPU_PC(r9)
2300 ld r11, VCPU_MSR(r9)
2301 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002302 /* If not, deliver a machine check. SRR0/1 are already set */
2303 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Paul Mackerras000a25d2014-05-26 19:48:41 +10002304 ld r11, VCPU_MSR(r9)
Michael Neulinge4e38122014-03-25 10:47:02 +11002305 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053023062: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002307
Paul Mackerrasde56a942011-06-29 00:21:34 +00002308/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002309 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002310 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002311 * 0 if nothing needs to be done
2312 * 1 if something happened that needs to be handled by the host
2313 * -1 if there was a guest wakeup (IPI)
2314 *
2315 * Also sets r12 to the interrupt vector for any interrupt that needs
2316 * to be handled now by the host (0x500 for external interrupt), or zero.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002317 * Modifies r0, r6, r7, r8.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002318 */
2319kvmppc_check_wake_reason:
2320 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002321BEGIN_FTR_SECTION
2322 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2323FTR_SECTION_ELSE
2324 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2325ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2326 cmpwi r6, 8 /* was it an external interrupt? */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002327 li r12, BOOK3S_INTERRUPT_EXTERNAL
2328 beq kvmppc_read_intr /* if so, see what it was */
2329 li r3, 0
2330 li r12, 0
2331 cmpwi r6, 6 /* was it the decrementer? */
2332 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002333BEGIN_FTR_SECTION
2334 cmpwi r6, 5 /* privileged doorbell? */
2335 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002336 cmpwi r6, 3 /* hypervisor doorbell? */
2337 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002338END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002339 li r3, 1 /* anything else, return 1 */
23400: blr
2341
Paul Mackerras5d00f662014-01-08 21:25:28 +11002342 /* hypervisor doorbell */
23433: li r12, BOOK3S_INTERRUPT_H_DOORBELL
2344 li r3, 1
2345 blr
2346
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002347/*
Paul Mackerrasc9342432013-09-06 13:24:13 +10002348 * Determine what sort of external interrupt is pending (if any).
2349 * Returns:
2350 * 0 if no interrupt is pending
2351 * 1 if an interrupt is pending that needs to be handled by the host
2352 * -1 if there was a guest wakeup IPI (which has now been cleared)
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002353 * Modifies r0, r6, r7, r8, returns value in r3.
Paul Mackerrasc9342432013-09-06 13:24:13 +10002354 */
2355kvmppc_read_intr:
2356 /* see if a host IPI is pending */
2357 li r3, 1
2358 lbz r0, HSTATE_HOST_IPI(r13)
2359 cmpwi r0, 0
2360 bne 1f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002361
Paul Mackerrasc9342432013-09-06 13:24:13 +10002362 /* Now read the interrupt from the ICP */
2363 ld r6, HSTATE_XICS_PHYS(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002364 li r7, XICS_XIRR
Paul Mackerrasc9342432013-09-06 13:24:13 +10002365 cmpdi r6, 0
2366 beq- 1f
2367 lwzcix r0, r6, r7
Alexander Graf76d072f2014-06-11 10:37:52 +02002368 /*
2369 * Save XIRR for later. Since we get in in reverse endian on LE
2370 * systems, save it byte reversed and fetch it back in host endian.
2371 */
2372 li r3, HSTATE_SAVED_XIRR
2373 STWX_BE r0, r3, r13
2374#ifdef __LITTLE_ENDIAN__
2375 lwz r3, HSTATE_SAVED_XIRR(r13)
2376#else
2377 mr r3, r0
2378#endif
2379 rlwinm. r3, r3, 0, 0xffffff
Paul Mackerrasde56a942011-06-29 00:21:34 +00002380 sync
Paul Mackerrasc9342432013-09-06 13:24:13 +10002381 beq 1f /* if nothing pending in the ICP */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002382
Paul Mackerrasc9342432013-09-06 13:24:13 +10002383 /* We found something in the ICP...
2384 *
2385 * If it's not an IPI, stash it in the PACA and return to
2386 * the host, we don't (yet) handle directing real external
2387 * interrupts directly to the guest
2388 */
2389 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
Paul Mackerrasc9342432013-09-06 13:24:13 +10002390 bne 42f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002391
Paul Mackerrasc9342432013-09-06 13:24:13 +10002392 /* It's an IPI, clear the MFRR and EOI it */
2393 li r3, 0xff
2394 li r8, XICS_MFRR
2395 stbcix r3, r6, r8 /* clear the IPI */
2396 stwcix r0, r6, r7 /* EOI it */
2397 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00002398
Paul Mackerrasc9342432013-09-06 13:24:13 +10002399 /* We need to re-check host IPI now in case it got set in the
2400 * meantime. If it's clear, we bounce the interrupt to the
2401 * guest
2402 */
2403 lbz r0, HSTATE_HOST_IPI(r13)
2404 cmpwi r0, 0
2405 bne- 43f
2406
2407 /* OK, it's an IPI for us */
2408 li r3, -1
24091: blr
2410
Alexander Graf76d072f2014-06-11 10:37:52 +0200241142: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2412 * the PACA earlier, it will be picked up by the host ICP driver
Paul Mackerrasc9342432013-09-06 13:24:13 +10002413 */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002414 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002415 b 1b
2416
241743: /* We raced with the host, we need to resend that IPI, bummer */
2418 li r0, IPI_PRIORITY
2419 stbcix r0, r6, r8 /* set the IPI */
2420 sync
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002421 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002422 b 1b
Paul Mackerrasde56a942011-06-29 00:21:34 +00002423
2424/*
2425 * Save away FP, VMX and VSX registers.
2426 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002427 * N.B. r30 and r31 are volatile across this function,
2428 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002429 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002430kvmppc_save_fp:
2431 mflr r30
2432 mr r31,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00002433 mfmsr r5
2434 ori r8,r5,MSR_FP
2435#ifdef CONFIG_ALTIVEC
2436BEGIN_FTR_SECTION
2437 oris r8,r8,MSR_VEC@h
2438END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2439#endif
2440#ifdef CONFIG_VSX
2441BEGIN_FTR_SECTION
2442 oris r8,r8,MSR_VSX@h
2443END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2444#endif
2445 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002446 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002447 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002448#ifdef CONFIG_ALTIVEC
2449BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002450 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002451 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002452END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2453#endif
2454 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002455 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002456 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002457 blr
2458
2459/*
2460 * Load up FP, VMX and VSX registers
2461 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002462 * N.B. r30 and r31 are volatile across this function,
2463 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002464 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002465kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002466 mflr r30
2467 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002468 mfmsr r9
2469 ori r8,r9,MSR_FP
2470#ifdef CONFIG_ALTIVEC
2471BEGIN_FTR_SECTION
2472 oris r8,r8,MSR_VEC@h
2473END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2474#endif
2475#ifdef CONFIG_VSX
2476BEGIN_FTR_SECTION
2477 oris r8,r8,MSR_VSX@h
2478END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2479#endif
2480 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002481 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002482 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002483#ifdef CONFIG_ALTIVEC
2484BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002485 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002486 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002487END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2488#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002489 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002490 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002491 mtlr r30
2492 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002493 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002494
2495/*
2496 * We come here if we get any exception or interrupt while we are
2497 * executing host real mode code while in guest MMU context.
2498 * For now just spin, but we should do something better.
2499 */
2500kvmppc_bad_host_intr:
2501 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002502
2503/*
2504 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2505 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2506 * r11 has the guest MSR value (in/out)
2507 * r9 has a vcpu pointer (in)
2508 * r0 is used as a scratch register
2509 */
2510kvmppc_msr_interrupt:
2511 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2512 cmpwi r0, 2 /* Check if we are in transactional state.. */
2513 ld r11, VCPU_INTR_MSR(r9)
2514 bne 1f
2515 /* ... if transactional, change to suspended */
2516 li r0, 1
25171: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2518 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002519
2520/*
2521 * This works around a hardware bug on POWER8E processors, where
2522 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2523 * performance monitor interrupt. Instead, when we need to have
2524 * an interrupt pending, we have to arrange for a counter to overflow.
2525 */
2526kvmppc_fix_pmao:
2527 li r3, 0
2528 mtspr SPRN_MMCR2, r3
2529 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2530 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2531 mtspr SPRN_MMCR0, r3
2532 lis r3, 0x7fff
2533 ori r3, r3, 0xffff
2534 mtspr SPRN_PMC6, r3
2535 isync
2536 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002537
2538#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2539/*
2540 * Start timing an activity
2541 * r3 = pointer to time accumulation struct, r4 = vcpu
2542 */
2543kvmhv_start_timing:
2544 ld r5, HSTATE_KVM_VCORE(r13)
2545 lbz r6, VCORE_IN_GUEST(r5)
2546 cmpwi r6, 0
2547 beq 5f /* if in guest, need to */
2548 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
25495: mftb r5
2550 subf r5, r6, r5
2551 std r3, VCPU_CUR_ACTIVITY(r4)
2552 std r5, VCPU_ACTIVITY_START(r4)
2553 blr
2554
2555/*
2556 * Accumulate time to one activity and start another.
2557 * r3 = pointer to new time accumulation struct, r4 = vcpu
2558 */
2559kvmhv_accumulate_time:
2560 ld r5, HSTATE_KVM_VCORE(r13)
2561 lbz r8, VCORE_IN_GUEST(r5)
2562 cmpwi r8, 0
2563 beq 4f /* if in guest, need to */
2564 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
25654: ld r5, VCPU_CUR_ACTIVITY(r4)
2566 ld r6, VCPU_ACTIVITY_START(r4)
2567 std r3, VCPU_CUR_ACTIVITY(r4)
2568 mftb r7
2569 subf r7, r8, r7
2570 std r7, VCPU_ACTIVITY_START(r4)
2571 cmpdi r5, 0
2572 beqlr
2573 subf r3, r6, r7
2574 ld r8, TAS_SEQCOUNT(r5)
2575 cmpdi r8, 0
2576 addi r8, r8, 1
2577 std r8, TAS_SEQCOUNT(r5)
2578 lwsync
2579 ld r7, TAS_TOTAL(r5)
2580 add r7, r7, r3
2581 std r7, TAS_TOTAL(r5)
2582 ld r6, TAS_MIN(r5)
2583 ld r7, TAS_MAX(r5)
2584 beq 3f
2585 cmpd r3, r6
2586 bge 1f
25873: std r3, TAS_MIN(r5)
25881: cmpd r3, r7
2589 ble 2f
2590 std r3, TAS_MAX(r5)
25912: lwsync
2592 addi r8, r8, 1
2593 std r8, TAS_SEQCOUNT(r5)
2594 blr
2595#endif