blob: b98889e9851d07ce28de0e648b44686984b20d0a [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Paul Mackerrasb4072df2012-11-23 22:37:50 +000030#include <asm/mmu-hash64.h>
Michael Neulinge4e38122014-03-25 10:47:02 +110031#include <asm/tm.h>
32
33#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
Paul Mackerrasde56a942011-06-29 00:21:34 +000034
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110035/* Values in HSTATE_NAPPING(r13) */
36#define NAPPING_CEDE 1
37#define NAPPING_NOVCPU 2
38
Paul Mackerrasde56a942011-06-29 00:21:34 +000039/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100040 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000041 * Must be called with interrupts hard-disabled.
42 *
43 * Input Registers:
44 *
45 * LR = return address to continue at after eventually re-enabling MMU
46 */
Anton Blanchard6ed179b2014-06-12 18:16:53 +100047_GLOBAL_TOC(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100048 mflr r0
49 std r0, PPC_LR_STKOFF(r1)
50 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000051 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100052 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000053 li r0,MSR_RI
54 andc r0,r10,r0
55 li r6,MSR_IR | MSR_DR
56 andc r6,r10,r6
57 mtmsrd r0,1 /* clear RI in MSR */
58 mtsrr0 r5
59 mtsrr1 r6
60 RFI
61
Paul Mackerras218309b2013-09-06 13:23:44 +100062kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110063 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100064 bl kvmppc_hv_entry
65
66 /* Back from guest - restore host state and return to caller */
67
Michael Neulingeee7ff92014-01-08 21:25:19 +110068BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100069 /* Restore host DABR and DABRX */
70 ld r5,HSTATE_DABR(r13)
71 li r6,7
72 mtspr SPRN_DABR,r5
73 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110074END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100075
76 /* Restore SPRG3 */
Scott Wood9d378df2014-03-10 17:29:38 -050077 ld r3,PACA_SPRG_VDSO(r13)
78 mtspr SPRN_SPRG_VDSO_WRITE,r3
Paul Mackerras218309b2013-09-06 13:23:44 +100079
Paul Mackerras218309b2013-09-06 13:23:44 +100080 /* Reload the host's PMU registers */
81 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
82 lbz r4, LPPACA_PMCINUSE(r3)
83 cmpwi r4, 0
84 beq 23f /* skip if not */
Paul Mackerras9bc01a92014-05-26 19:48:40 +100085BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100086 ld r3, HSTATE_MMCR0(r13)
Paul Mackerras9bc01a92014-05-26 19:48:40 +100087 andi. r4, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
88 cmpwi r4, MMCR0_PMAO
89 beql kvmppc_fix_pmao
90END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +100091 lwz r3, HSTATE_PMC1(r13)
92 lwz r4, HSTATE_PMC2(r13)
93 lwz r5, HSTATE_PMC3(r13)
94 lwz r6, HSTATE_PMC4(r13)
95 lwz r8, HSTATE_PMC5(r13)
96 lwz r9, HSTATE_PMC6(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100097 mtspr SPRN_PMC1, r3
98 mtspr SPRN_PMC2, r4
99 mtspr SPRN_PMC3, r5
100 mtspr SPRN_PMC4, r6
101 mtspr SPRN_PMC5, r8
102 mtspr SPRN_PMC6, r9
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000103 ld r3, HSTATE_MMCR0(r13)
104 ld r4, HSTATE_MMCR1(r13)
105 ld r5, HSTATE_MMCRA(r13)
106 ld r6, HSTATE_SIAR(r13)
107 ld r7, HSTATE_SDAR(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000108 mtspr SPRN_MMCR1, r4
109 mtspr SPRN_MMCRA, r5
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100110 mtspr SPRN_SIAR, r6
111 mtspr SPRN_SDAR, r7
112BEGIN_FTR_SECTION
Michael Ellerman9a4fc4e2014-07-10 19:34:31 +1000113 ld r8, HSTATE_MMCR2(r13)
114 ld r9, HSTATE_SIER(r13)
Paul Mackerras72cde5a2014-03-25 10:47:08 +1100115 mtspr SPRN_MMCR2, r8
116 mtspr SPRN_SIER, r9
117END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +1000118 mtspr SPRN_MMCR0, r3
119 isync
12023:
121
122 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100123 * Reload DEC. HDEC interrupts were disabled when
124 * we reloaded the host's LPCR value.
125 */
126 ld r3, HSTATE_DECEXP(r13)
127 mftb r4
128 subf r4, r4, r3
129 mtspr SPRN_DEC, r4
130
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000131 /* hwthread_req may have got set by cede or no vcpu, so clear it */
132 li r0, 0
133 stb r0, HSTATE_HWTHREAD_REQ(r13)
134
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100135 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000136 * For external and machine check interrupts, we need
137 * to call the Linux handler to process the interrupt.
138 * We do that by jumping to absolute address 0x500 for
139 * external interrupts, or the machine_check_fwnmi label
140 * for machine checks (since firmware might have patched
141 * the vector area at 0x200). The [h]rfid at the end of the
142 * handler will return to the book3s_hv_interrupts.S code.
143 * For other interrupts we do the rfid to get back
144 * to the book3s_hv_interrupts.S code here.
145 */
146 ld r8, 112+PPC_LR_STKOFF(r1)
147 addi r1, r1, 112
148 ld r7, HSTATE_HOST_MSR(r13)
149
150 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
151 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras218309b2013-09-06 13:23:44 +1000152 beq 11f
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +0530153 cmpwi cr2, r12, BOOK3S_INTERRUPT_HMI
154 beq cr2, 14f /* HMI check */
Paul Mackerras218309b2013-09-06 13:23:44 +1000155
156 /* RFI into the highmem handler, or branch to interrupt handler */
157 mfmsr r6
158 li r0, MSR_RI
159 andc r6, r6, r0
160 mtmsrd r6, 1 /* Clear RI in MSR */
161 mtsrr0 r8
162 mtsrr1 r7
Paul Mackerras218309b2013-09-06 13:23:44 +1000163 beq cr1, 13f /* machine check */
164 RFI
165
166 /* On POWER7, we have external interrupts set to use HSRR0/1 */
16711: mtspr SPRN_HSRR0, r8
168 mtspr SPRN_HSRR1, r7
169 ba 0x500
170
17113: b machine_check_fwnmi
172
Mahesh Salgaonkar0869b6f2014-07-29 18:40:01 +053017314: mtspr SPRN_HSRR0, r8
174 mtspr SPRN_HSRR1, r7
175 b hmi_exception_after_realmode
176
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100177kvmppc_primary_no_guest:
178 /* We handle this much like a ceded vcpu */
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100179 /* put the HDEC into the DEC, since HDEC interrupts don't wake us */
180 mfspr r3, SPRN_HDEC
181 mtspr SPRN_DEC, r3
Paul Mackerras6af27c82015-03-28 14:21:10 +1100182 /*
183 * Make sure the primary has finished the MMU switch.
184 * We should never get here on a secondary thread, but
185 * check it for robustness' sake.
186 */
187 ld r5, HSTATE_KVM_VCORE(r13)
18865: lbz r0, VCORE_IN_GUEST(r5)
189 cmpwi r0, 0
190 beq 65b
191 /* Set LPCR. */
192 ld r8,VCORE_LPCR(r5)
193 mtspr SPRN_LPCR,r8
194 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100195 /* set our bit in napping_threads */
196 ld r5, HSTATE_KVM_VCORE(r13)
197 lbz r7, HSTATE_PTID(r13)
198 li r0, 1
199 sld r0, r0, r7
200 addi r6, r5, VCORE_NAPPING_THREADS
2011: lwarx r3, 0, r6
202 or r3, r3, r0
203 stwcx. r3, 0, r6
204 bne 1b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100205 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100206 isync
207 li r12, 0
208 lwz r7, VCORE_ENTRY_EXIT(r5)
209 cmpwi r7, 0x100
210 bge kvm_novcpu_exit /* another thread already exiting */
211 li r3, NAPPING_NOVCPU
212 stb r3, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100213
Paul Mackerrasccc07772015-03-28 14:21:07 +1100214 li r3, 0 /* Don't wake on privileged (OS) doorbell */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100215 b kvm_do_nap
216
217kvm_novcpu_wakeup:
218 ld r1, HSTATE_HOST_R1(r13)
219 ld r5, HSTATE_KVM_VCORE(r13)
220 li r0, 0
221 stb r0, HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100222
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100223 /* check the wake reason */
224 bl kvmppc_check_wake_reason
Paul Mackerras6af27c82015-03-28 14:21:10 +1100225
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100226 /* see if any other thread is already exiting */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100227 lwz r0, VCORE_ENTRY_EXIT(r5)
228 cmpwi r0, 0x100
229 bge kvm_novcpu_exit
230
231 /* clear our bit in napping_threads */
232 lbz r7, HSTATE_PTID(r13)
233 li r0, 1
234 sld r0, r0, r7
235 addi r6, r5, VCORE_NAPPING_THREADS
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002364: lwarx r7, 0, r6
237 andc r7, r7, r0
238 stwcx. r7, 0, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100239 bne 4b
240
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100241 /* See if the wake reason means we need to exit */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100242 cmpdi r3, 0
243 bge kvm_novcpu_exit
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100244
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +1100245 /* See if our timeslice has expired (HDEC is negative) */
246 mfspr r0, SPRN_HDEC
247 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
248 cmpwi r0, 0
249 blt kvm_novcpu_exit
250
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100251 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
252 ld r4, HSTATE_KVM_VCPU(r13)
253 cmpdi r4, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100254 beq kvmppc_primary_no_guest
255
256#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
257 addi r3, r4, VCPU_TB_RMENTRY
258 bl kvmhv_start_timing
259#endif
260 b kvmppc_got_guest
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100261
262kvm_novcpu_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100263#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
264 ld r4, HSTATE_KVM_VCPU(r13)
265 cmpdi r4, 0
266 beq 13f
267 addi r3, r4, VCPU_TB_RMEXIT
268 bl kvmhv_accumulate_time
269#endif
Paul Mackerraseddb60f2015-03-28 14:21:11 +110027013: mr r3, r12
271 stw r12, 112-4(r1)
272 bl kvmhv_commence_exit
273 nop
274 lwz r12, 112-4(r1)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100275 b kvmhv_switch_to_host
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100276
Paul Mackerras371fefd2011-06-29 00:23:08 +0000277/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100278 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000279 * Relocation is off and most register values are lost.
280 * r13 points to the PACA.
281 */
282 .globl kvm_start_guest
283kvm_start_guest:
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530284
285 /* Set runlatch bit the minute you wake up from nap */
Paul Mackerras1f09c3e2015-03-28 14:21:04 +1100286 mfspr r0, SPRN_CTRLF
287 ori r0, r0, 1
288 mtspr SPRN_CTRLT, r0
Preeti U Murthyfd17dc72014-04-11 16:01:58 +0530289
Paul Mackerras19ccb762011-07-23 17:42:46 +1000290 ld r2,PACATOC(r13)
291
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000292 li r0,KVM_HWTHREAD_IN_KVM
293 stb r0,HSTATE_HWTHREAD_STATE(r13)
294
295 /* NV GPR values from power7_idle() will no longer be valid */
296 li r0,1
297 stb r0,PACA_NAPSTATELOST(r13)
298
Paul Mackerras4619ac82013-04-17 20:31:41 +0000299 /* were we napping due to cede? */
300 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100301 cmpwi r0,NAPPING_CEDE
302 beq kvm_end_cede
303 cmpwi r0,NAPPING_NOVCPU
304 beq kvm_novcpu_wakeup
305
306 ld r1,PACAEMERGSP(r13)
307 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000308
309 /*
310 * We weren't napping due to cede, so this must be a secondary
311 * thread being woken up to run a guest, or being woken up due
312 * to a stray IPI. (Or due to some machine check or hypervisor
313 * maintenance interrupt while the core is in KVM.)
314 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000315
316 /* Check the wake reason in SRR1 to see why we got here */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100317 bl kvmppc_check_wake_reason
318 cmpdi r3, 0
319 bge kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000320
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000321 /* get vcore pointer, NULL if we have nothing to run */
322 ld r5,HSTATE_KVM_VCORE(r13)
323 cmpdi r5,0
324 /* if we have no vcore to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000325 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000326
Paul Mackerras56548fc2014-12-03 14:48:40 +1100327kvm_secondary_got_guest:
328
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100329 /* Set HSTATE_DSCR(r13) to something sensible */
Anshuman Khandual1db36522015-05-21 12:13:03 +0530330 ld r6, PACA_DSCR_DEFAULT(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100331 std r6, HSTATE_DSCR(r13)
Paul Mackerras371fefd2011-06-29 00:23:08 +0000332
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000333 /* On thread 0 of a subcore, set HDEC to max */
334 lbz r4, HSTATE_PTID(r13)
335 cmpwi r4, 0
336 bne 63f
337 lis r6, 0x7fff
338 ori r6, r6, 0xffff
339 mtspr SPRN_HDEC, r6
340 /* and set per-LPAR registers, if doing dynamic micro-threading */
341 ld r6, HSTATE_SPLIT_MODE(r13)
342 cmpdi r6, 0
343 beq 63f
344 ld r0, KVM_SPLIT_RPR(r6)
345 mtspr SPRN_RPR, r0
346 ld r0, KVM_SPLIT_PMMAR(r6)
347 mtspr SPRN_PMMAR, r0
348 ld r0, KVM_SPLIT_LDBAR(r6)
349 mtspr SPRN_LDBAR, r0
350 isync
35163:
352 /* Order load of vcpu after load of vcore */
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100353 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000354 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100355 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000356
357 /* Back from the guest, go back to nap */
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000358 /* Clear our vcpu and vcore pointers so we don't come back in early */
Paul Mackerras218309b2013-09-06 13:23:44 +1000359 li r0, 0
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000360 std r0, HSTATE_KVM_VCPU(r13)
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100361 /*
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000362 * Once we clear HSTATE_KVM_VCORE(r13), the code in
Paul Mackerras5d5b99c2015-03-28 14:21:06 +1100363 * kvmppc_run_core() is going to assume that all our vcpu
364 * state is visible in memory. This lwsync makes sure
365 * that that is true.
Paul Mackerrasf019b7a2013-11-16 17:46:03 +1100366 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000367 lwsync
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000368 std r0, HSTATE_KVM_VCORE(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +1000369
Paul Mackerras56548fc2014-12-03 14:48:40 +1100370/*
371 * At this point we have finished executing in the guest.
372 * We need to wait for hwthread_req to become zero, since
373 * we may not turn on the MMU while hwthread_req is non-zero.
374 * While waiting we also need to check if we get given a vcpu to run.
375 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000376kvm_no_guest:
Paul Mackerras56548fc2014-12-03 14:48:40 +1100377 lbz r3, HSTATE_HWTHREAD_REQ(r13)
378 cmpwi r3, 0
379 bne 53f
380 HMT_MEDIUM
381 li r0, KVM_HWTHREAD_IN_KERNEL
Paul Mackerras218309b2013-09-06 13:23:44 +1000382 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerras56548fc2014-12-03 14:48:40 +1100383 /* need to recheck hwthread_req after a barrier, to avoid race */
384 sync
385 lbz r3, HSTATE_HWTHREAD_REQ(r13)
386 cmpwi r3, 0
387 bne 54f
388/*
389 * We jump to power7_wakeup_loss, which will return to the caller
390 * of power7_nap in the powernv cpu offline loop. The value we
391 * put in r3 becomes the return value for power7_nap.
392 */
Paul Mackerras218309b2013-09-06 13:23:44 +1000393 li r3, LPCR_PECE0
394 mfspr r4, SPRN_LPCR
395 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
396 mtspr SPRN_LPCR, r4
Paul Mackerras56548fc2014-12-03 14:48:40 +1100397 li r3, 0
398 b power7_wakeup_loss
399
40053: HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000401 ld r5, HSTATE_KVM_VCORE(r13)
402 cmpdi r5, 0
403 bne 60f
404 ld r3, HSTATE_SPLIT_MODE(r13)
405 cmpdi r3, 0
406 beq kvm_no_guest
407 lbz r0, KVM_SPLIT_DO_NAP(r3)
408 cmpwi r0, 0
Paul Mackerras56548fc2014-12-03 14:48:40 +1100409 beq kvm_no_guest
410 HMT_MEDIUM
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000411 b kvm_unsplit_nap
41260: HMT_MEDIUM
Paul Mackerras56548fc2014-12-03 14:48:40 +1100413 b kvm_secondary_got_guest
414
41554: li r0, KVM_HWTHREAD_IN_KVM
416 stb r0, HSTATE_HWTHREAD_STATE(r13)
417 b kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000418
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000419/*
420 * Here the primary thread is trying to return the core to
421 * whole-core mode, so we need to nap.
422 */
423kvm_unsplit_nap:
Gautham R. Shenoy7f235322015-09-02 21:48:58 +0530424 /*
425 * Ensure that secondary doesn't nap when it has
426 * its vcore pointer set.
427 */
428 sync /* matches smp_mb() before setting split_info.do_nap */
429 ld r0, HSTATE_KVM_VCORE(r13)
430 cmpdi r0, 0
431 bne kvm_no_guest
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000432 /* clear any pending message */
433BEGIN_FTR_SECTION
434 lis r6, (PPC_DBELL_SERVER << (63-36))@h
435 PPC_MSGCLR(6)
436END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
437 /* Set kvm_split_mode.napped[tid] = 1 */
438 ld r3, HSTATE_SPLIT_MODE(r13)
439 li r0, 1
440 lhz r4, PACAPACAINDEX(r13)
441 clrldi r4, r4, 61 /* micro-threading => P8 => 8 threads/core */
442 addi r4, r4, KVM_SPLIT_NAPPED
443 stbx r0, r3, r4
444 /* Check the do_nap flag again after setting napped[] */
445 sync
446 lbz r0, KVM_SPLIT_DO_NAP(r3)
447 cmpwi r0, 0
448 beq 57f
449 li r3, (LPCR_PECEDH | LPCR_PECE0) >> 4
450 mfspr r4, SPRN_LPCR
451 rlwimi r4, r3, 4, (LPCR_PECEDP | LPCR_PECEDH | LPCR_PECE0 | LPCR_PECE1)
452 mtspr SPRN_LPCR, r4
453 isync
454 std r0, HSTATE_SCRATCH0(r13)
455 ptesync
456 ld r0, HSTATE_SCRATCH0(r13)
4571: cmpd r0, r0
458 bne 1b
459 nap
460 b .
461
46257: li r0, 0
463 stbx r0, r3, r4
464 b kvm_no_guest
465
Paul Mackerras218309b2013-09-06 13:23:44 +1000466/******************************************************************************
467 * *
468 * Entry code *
469 * *
470 *****************************************************************************/
471
Paul Mackerrasde56a942011-06-29 00:21:34 +0000472.global kvmppc_hv_entry
473kvmppc_hv_entry:
474
475 /* Required state:
476 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100477 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000478 * MSR = ~IR|DR
479 * R13 = PACA
480 * R1 = host R1
Michael Neuling06a29e42014-08-19 14:59:30 +1000481 * R2 = TOC
Paul Mackerrasde56a942011-06-29 00:21:34 +0000482 * all other volatile GPRS = free
483 */
484 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000485 std r0, PPC_LR_STKOFF(r1)
486 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000487
Paul Mackerrasde56a942011-06-29 00:21:34 +0000488 /* Save R1 in the PACA */
489 std r1, HSTATE_HOST_R1(r13)
490
Paul Mackerras44a3add2013-10-04 21:45:04 +1000491 li r6, KVM_GUEST_MODE_HOST_HV
492 stb r6, HSTATE_IN_GUEST(r13)
493
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100494#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
495 /* Store initial timestamp */
496 cmpdi r4, 0
497 beq 1f
498 addi r3, r4, VCPU_TB_RMENTRY
499 bl kvmhv_start_timing
5001:
501#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +0000502 /* Clear out SLB */
503 li r6,0
504 slbmte r6,r6
505 slbia
506 ptesync
507
Paul Mackerras9e368f22011-06-29 00:40:08 +0000508 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100509 * POWER7/POWER8 host -> guest partition switch code.
Paul Mackerras9e368f22011-06-29 00:40:08 +0000510 * We don't have to lock against concurrent tlbies,
511 * but we do have to coordinate across hardware threads.
512 */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100513 /* Set bit in entry map iff exit map is zero. */
514 ld r5, HSTATE_KVM_VCORE(r13)
515 li r7, 1
516 lbz r6, HSTATE_PTID(r13)
517 sld r7, r7, r6
518 addi r9, r5, VCORE_ENTRY_EXIT
51921: lwarx r3, 0, r9
520 cmpwi r3, 0x100 /* any threads starting to exit? */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000521 bge secondary_too_late /* if so we're too late to the party */
Paul Mackerras7d6c40d2015-03-28 14:21:09 +1100522 or r3, r3, r7
523 stwcx. r3, 0, r9
Paul Mackerras371fefd2011-06-29 00:23:08 +0000524 bne 21b
525
526 /* Primary thread switches to guest partition. */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100527 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000528 cmpwi r6,0
Paul Mackerras6af27c82015-03-28 14:21:10 +1100529 bne 10f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000530 ld r6,KVM_SDR1(r9)
531 lwz r7,KVM_LPID(r9)
532 li r0,LPID_RSVD /* switch to reserved LPID */
533 mtspr SPRN_LPID,r0
534 ptesync
535 mtspr SPRN_SDR1,r6 /* switch to partition page table */
536 mtspr SPRN_LPID,r7
537 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000538
539 /* See if we need to flush the TLB */
540 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
541 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
542 srdi r6,r6,6 /* doubleword number */
543 sldi r6,r6,3 /* address offset */
544 add r6,r6,r9
545 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000546 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000547 sld r0,r0,r7
548 ld r7,0(r6)
549 and. r7,r7,r0
550 beq 22f
55123: ldarx r7,0,r6 /* if set, clear the bit */
552 andc r7,r7,r0
553 stdcx. r7,0,r6
554 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100555 /* Flush the TLB of any entries for this LPID */
556 /* use arch 2.07S as a proxy for POWER8 */
557BEGIN_FTR_SECTION
558 li r6,512 /* POWER8 has 512 sets */
559FTR_SECTION_ELSE
560 li r6,128 /* POWER7 has 128 sets */
561ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000562 mtctr r6
563 li r7,0x800 /* IS field = 0b10 */
564 ptesync
56528: tlbiel r7
566 addi r7,r7,0x1000
567 bdnz 28b
568 ptesync
569
Paul Mackerras93b0f4d2013-09-06 13:17:46 +1000570 /* Add timebase offset onto timebase */
57122: ld r8,VCORE_TB_OFFSET(r5)
572 cmpdi r8,0
573 beq 37f
574 mftb r6 /* current host timebase */
575 add r8,r8,r6
576 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
577 mftb r7 /* check if lower 24 bits overflowed */
578 clrldi r6,r6,40
579 clrldi r7,r7,40
580 cmpld r7,r6
581 bge 37f
582 addis r8,r8,0x100 /* if so, increment upper 40 bits */
583 mtspr SPRN_TBU40,r8
584
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000585 /* Load guest PCR value to select appropriate compat mode */
58637: ld r7, VCORE_PCR(r5)
587 cmpdi r7, 0
588 beq 38f
589 mtspr SPRN_PCR, r7
59038:
Michael Neulingb005255e2014-01-08 21:25:21 +1100591
592BEGIN_FTR_SECTION
593 /* DPDES is shared between threads */
594 ld r8, VCORE_DPDES(r5)
595 mtspr SPRN_DPDES, r8
596END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
597
Paul Mackerras388cc6e2013-09-21 14:35:02 +1000598 li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000599 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000600
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100601 /* Do we have a guest vcpu to run? */
Paul Mackerras6af27c82015-03-28 14:21:10 +110060210: cmpdi r4, 0
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100603 beq kvmppc_primary_no_guest
604kvmppc_got_guest:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000605
606 /* Load up guest SLB entries */
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100607 lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000608 cmpwi r5,0
609 beq 9f
610 mtctr r5
611 addi r6,r4,VCPU_SLB
6121: ld r8,VCPU_SLB_E(r6)
613 ld r9,VCPU_SLB_V(r6)
614 slbmte r9,r8
615 addi r6,r6,VCPU_SLB_SIZE
616 bdnz 1b
6179:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100618 /* Increment yield count if they have a VPA */
619 ld r3, VCPU_VPA(r4)
620 cmpdi r3, 0
621 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +0200622 li r6, LPPACA_YIELDCOUNT
623 LWZX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100624 addi r5, r5, 1
Alexander Graf0865a582014-06-11 10:36:17 +0200625 STWX_BE r5, r3, r6
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100626 li r6, 1
627 stb r6, VCPU_VPA_DIRTY(r4)
62825:
629
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100630 /* Save purr/spurr */
631 mfspr r5,SPRN_PURR
632 mfspr r6,SPRN_SPURR
633 std r5,HSTATE_PURR(r13)
634 std r6,HSTATE_SPURR(r13)
635 ld r7,VCPU_PURR(r4)
636 ld r8,VCPU_SPURR(r4)
637 mtspr SPRN_PURR,r7
638 mtspr SPRN_SPURR,r8
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100639
Michael Neulingeee7ff92014-01-08 21:25:19 +1100640BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000641 /* Set partition DABR */
642 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
Paul Mackerras8563bf52014-01-08 21:25:29 +1100643 lwz r5,VCPU_DABRX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000644 ld r6,VCPU_DABR(r4)
645 mtspr SPRN_DABRX,r5
646 mtspr SPRN_DABR,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000647 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100648END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000649
Michael Neulinge4e38122014-03-25 10:47:02 +1100650#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
651BEGIN_FTR_SECTION
652 b skip_tm
653END_FTR_SECTION_IFCLR(CPU_FTR_TM)
654
655 /* Turn on TM/FP/VSX/VMX so we can restore them. */
656 mfmsr r5
657 li r6, MSR_TM >> 32
658 sldi r6, r6, 32
659 or r5, r5, r6
660 ori r5, r5, MSR_FP
661 oris r5, r5, (MSR_VEC | MSR_VSX)@h
662 mtmsrd r5
663
664 /*
665 * The user may change these outside of a transaction, so they must
666 * always be context switched.
667 */
668 ld r5, VCPU_TFHAR(r4)
669 ld r6, VCPU_TFIAR(r4)
670 ld r7, VCPU_TEXASR(r4)
671 mtspr SPRN_TFHAR, r5
672 mtspr SPRN_TFIAR, r6
673 mtspr SPRN_TEXASR, r7
674
675 ld r5, VCPU_MSR(r4)
676 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
677 beq skip_tm /* TM not active in guest */
678
679 /* Make sure the failure summary is set, otherwise we'll program check
680 * when we trechkpt. It's possible that this might have been not set
681 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
682 * host.
683 */
684 oris r7, r7, (TEXASR_FS)@h
685 mtspr SPRN_TEXASR, r7
686
687 /*
688 * We need to load up the checkpointed state for the guest.
689 * We need to do this early as it will blow away any GPRs, VSRs and
690 * some SPRs.
691 */
692
693 mr r31, r4
694 addi r3, r31, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200695 bl load_fp_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100696 addi r3, r31, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +0200697 bl load_vr_state
Michael Neulinge4e38122014-03-25 10:47:02 +1100698 mr r4, r31
699 lwz r7, VCPU_VRSAVE_TM(r4)
700 mtspr SPRN_VRSAVE, r7
701
702 ld r5, VCPU_LR_TM(r4)
703 lwz r6, VCPU_CR_TM(r4)
704 ld r7, VCPU_CTR_TM(r4)
705 ld r8, VCPU_AMR_TM(r4)
706 ld r9, VCPU_TAR_TM(r4)
707 mtlr r5
708 mtcr r6
709 mtctr r7
710 mtspr SPRN_AMR, r8
711 mtspr SPRN_TAR, r9
712
713 /*
714 * Load up PPR and DSCR values but don't put them in the actual SPRs
715 * till the last moment to avoid running with userspace PPR and DSCR for
716 * too long.
717 */
718 ld r29, VCPU_DSCR_TM(r4)
719 ld r30, VCPU_PPR_TM(r4)
720
721 std r2, PACATMSCRATCH(r13) /* Save TOC */
722
723 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
724 li r5, 0
725 mtmsrd r5, 1
726
727 /* Load GPRs r0-r28 */
728 reg = 0
729 .rept 29
730 ld reg, VCPU_GPRS_TM(reg)(r31)
731 reg = reg + 1
732 .endr
733
734 mtspr SPRN_DSCR, r29
735 mtspr SPRN_PPR, r30
736
737 /* Load final GPRs */
738 ld 29, VCPU_GPRS_TM(29)(r31)
739 ld 30, VCPU_GPRS_TM(30)(r31)
740 ld 31, VCPU_GPRS_TM(31)(r31)
741
742 /* TM checkpointed state is now setup. All GPRs are now volatile. */
743 TRECHKPT
744
745 /* Now let's get back the state we need. */
746 HMT_MEDIUM
747 GET_PACA(r13)
748 ld r29, HSTATE_DSCR(r13)
749 mtspr SPRN_DSCR, r29
750 ld r4, HSTATE_KVM_VCPU(r13)
751 ld r1, HSTATE_HOST_R1(r13)
752 ld r2, PACATMSCRATCH(r13)
753
754 /* Set the MSR RI since we have our registers back. */
755 li r5, MSR_RI
756 mtmsrd r5, 1
757skip_tm:
758#endif
759
Paul Mackerrasde56a942011-06-29 00:21:34 +0000760 /* Load guest PMU registers */
761 /* R4 is live here (vcpu pointer) */
762 li r3, 1
763 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
764 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
765 isync
Paul Mackerras9bc01a92014-05-26 19:48:40 +1000766BEGIN_FTR_SECTION
767 ld r3, VCPU_MMCR(r4)
768 andi. r5, r3, MMCR0_PMAO_SYNC | MMCR0_PMAO
769 cmpwi r5, MMCR0_PMAO
770 beql kvmppc_fix_pmao
771END_FTR_SECTION_IFSET(CPU_FTR_PMAO_BUG)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000772 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
773 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
774 lwz r6, VCPU_PMC + 8(r4)
775 lwz r7, VCPU_PMC + 12(r4)
776 lwz r8, VCPU_PMC + 16(r4)
777 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000778 mtspr SPRN_PMC1, r3
779 mtspr SPRN_PMC2, r5
780 mtspr SPRN_PMC3, r6
781 mtspr SPRN_PMC4, r7
782 mtspr SPRN_PMC5, r8
783 mtspr SPRN_PMC6, r9
Paul Mackerrasde56a942011-06-29 00:21:34 +0000784 ld r3, VCPU_MMCR(r4)
785 ld r5, VCPU_MMCR + 8(r4)
786 ld r6, VCPU_MMCR + 16(r4)
787 ld r7, VCPU_SIAR(r4)
788 ld r8, VCPU_SDAR(r4)
789 mtspr SPRN_MMCR1, r5
790 mtspr SPRN_MMCRA, r6
791 mtspr SPRN_SIAR, r7
792 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100793BEGIN_FTR_SECTION
794 ld r5, VCPU_MMCR + 24(r4)
795 ld r6, VCPU_SIER(r4)
796 lwz r7, VCPU_PMC + 24(r4)
797 lwz r8, VCPU_PMC + 28(r4)
798 ld r9, VCPU_MMCR + 32(r4)
799 mtspr SPRN_MMCR2, r5
800 mtspr SPRN_SIER, r6
801 mtspr SPRN_SPMC1, r7
802 mtspr SPRN_SPMC2, r8
803 mtspr SPRN_MMCRS, r9
804END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000805 mtspr SPRN_MMCR0, r3
806 isync
807
808 /* Load up FP, VMX and VSX registers */
809 bl kvmppc_load_fp
810
811 ld r14, VCPU_GPR(R14)(r4)
812 ld r15, VCPU_GPR(R15)(r4)
813 ld r16, VCPU_GPR(R16)(r4)
814 ld r17, VCPU_GPR(R17)(r4)
815 ld r18, VCPU_GPR(R18)(r4)
816 ld r19, VCPU_GPR(R19)(r4)
817 ld r20, VCPU_GPR(R20)(r4)
818 ld r21, VCPU_GPR(R21)(r4)
819 ld r22, VCPU_GPR(R22)(r4)
820 ld r23, VCPU_GPR(R23)(r4)
821 ld r24, VCPU_GPR(R24)(r4)
822 ld r25, VCPU_GPR(R25)(r4)
823 ld r26, VCPU_GPR(R26)(r4)
824 ld r27, VCPU_GPR(R27)(r4)
825 ld r28, VCPU_GPR(R28)(r4)
826 ld r29, VCPU_GPR(R29)(r4)
827 ld r30, VCPU_GPR(R30)(r4)
828 ld r31, VCPU_GPR(R31)(r4)
829
Paul Mackerrasde56a942011-06-29 00:21:34 +0000830 /* Switch DSCR to guest value */
831 ld r5, VCPU_DSCR(r4)
832 mtspr SPRN_DSCR, r5
Paul Mackerrasde56a942011-06-29 00:21:34 +0000833
Michael Neulingb005255e2014-01-08 21:25:21 +1100834BEGIN_FTR_SECTION
Paul Mackerrasc17b98c2014-12-03 13:30:38 +1100835 /* Skip next section on POWER7 */
Michael Neulingb005255e2014-01-08 21:25:21 +1100836 b 8f
837END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
838 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
839 mfmsr r8
840 li r0, 1
841 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
842 mtmsrd r8
843
844 /* Load up POWER8-specific registers */
845 ld r5, VCPU_IAMR(r4)
846 lwz r6, VCPU_PSPB(r4)
847 ld r7, VCPU_FSCR(r4)
848 mtspr SPRN_IAMR, r5
849 mtspr SPRN_PSPB, r6
850 mtspr SPRN_FSCR, r7
851 ld r5, VCPU_DAWR(r4)
852 ld r6, VCPU_DAWRX(r4)
853 ld r7, VCPU_CIABR(r4)
854 ld r8, VCPU_TAR(r4)
855 mtspr SPRN_DAWR, r5
856 mtspr SPRN_DAWRX, r6
857 mtspr SPRN_CIABR, r7
858 mtspr SPRN_TAR, r8
859 ld r5, VCPU_IC(r4)
860 ld r6, VCPU_VTB(r4)
861 mtspr SPRN_IC, r5
862 mtspr SPRN_VTB, r6
Michael Neuling7b490412014-01-08 21:25:32 +1100863 ld r8, VCPU_EBBHR(r4)
Michael Neulingb005255e2014-01-08 21:25:21 +1100864 mtspr SPRN_EBBHR, r8
865 ld r5, VCPU_EBBRR(r4)
866 ld r6, VCPU_BESCR(r4)
867 ld r7, VCPU_CSIGR(r4)
868 ld r8, VCPU_TACR(r4)
869 mtspr SPRN_EBBRR, r5
870 mtspr SPRN_BESCR, r6
871 mtspr SPRN_CSIGR, r7
872 mtspr SPRN_TACR, r8
873 ld r5, VCPU_TCSCR(r4)
874 ld r6, VCPU_ACOP(r4)
875 lwz r7, VCPU_GUEST_PID(r4)
876 ld r8, VCPU_WORT(r4)
877 mtspr SPRN_TCSCR, r5
878 mtspr SPRN_ACOP, r6
879 mtspr SPRN_PID, r7
880 mtspr SPRN_WORT, r8
8818:
882
Paul Mackerrasde56a942011-06-29 00:21:34 +0000883 /*
884 * Set the decrementer to the guest decrementer.
885 */
886 ld r8,VCPU_DEC_EXPIRES(r4)
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +1100887 /* r8 is a host timebase value here, convert to guest TB */
888 ld r5,HSTATE_KVM_VCORE(r13)
889 ld r6,VCORE_TB_OFFSET(r5)
890 add r8,r8,r6
Paul Mackerrasde56a942011-06-29 00:21:34 +0000891 mftb r7
892 subf r3,r7,r8
893 mtspr SPRN_DEC,r3
894 stw r3,VCPU_DEC(r4)
895
896 ld r5, VCPU_SPRG0(r4)
897 ld r6, VCPU_SPRG1(r4)
898 ld r7, VCPU_SPRG2(r4)
899 ld r8, VCPU_SPRG3(r4)
900 mtspr SPRN_SPRG0, r5
901 mtspr SPRN_SPRG1, r6
902 mtspr SPRN_SPRG2, r7
903 mtspr SPRN_SPRG3, r8
904
Paul Mackerrasde56a942011-06-29 00:21:34 +0000905 /* Load up DAR and DSISR */
906 ld r5, VCPU_DAR(r4)
907 lwz r6, VCPU_DSISR(r4)
908 mtspr SPRN_DAR, r5
909 mtspr SPRN_DSISR, r6
910
Paul Mackerrasde56a942011-06-29 00:21:34 +0000911 /* Restore AMR and UAMOR, set AMOR to all 1s */
912 ld r5,VCPU_AMR(r4)
913 ld r6,VCPU_UAMOR(r4)
914 li r7,-1
915 mtspr SPRN_AMR,r5
916 mtspr SPRN_UAMOR,r6
917 mtspr SPRN_AMOR,r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000918
919 /* Restore state of CTRL run bit; assume 1 on entry */
920 lwz r5,VCPU_CTRL(r4)
921 andi. r5,r5,1
922 bne 4f
923 mfspr r6,SPRN_CTRLF
924 clrrdi r6,r6,1
925 mtspr SPRN_CTRLT,r6
9264:
Paul Mackerras6af27c82015-03-28 14:21:10 +1100927 /* Secondary threads wait for primary to have done partition switch */
928 ld r5, HSTATE_KVM_VCORE(r13)
929 lbz r6, HSTATE_PTID(r13)
930 cmpwi r6, 0
931 beq 21f
932 lbz r0, VCORE_IN_GUEST(r5)
933 cmpwi r0, 0
934 bne 21f
935 HMT_LOW
Paul Mackerrasb4deba52015-07-02 20:38:16 +100093620: lwz r3, VCORE_ENTRY_EXIT(r5)
937 cmpwi r3, 0x100
938 bge no_switch_exit
939 lbz r0, VCORE_IN_GUEST(r5)
Paul Mackerras6af27c82015-03-28 14:21:10 +1100940 cmpwi r0, 0
941 beq 20b
942 HMT_MEDIUM
94321:
944 /* Set LPCR. */
945 ld r8,VCORE_LPCR(r5)
946 mtspr SPRN_LPCR,r8
947 isync
948
949 /* Check if HDEC expires soon */
950 mfspr r3, SPRN_HDEC
951 cmpwi r3, 512 /* 1 microsecond */
952 blt hdec_soon
953
Paul Mackerrasde56a942011-06-29 00:21:34 +0000954 ld r6, VCPU_CTR(r4)
Sam bobroffc63517c2015-05-27 09:56:57 +1000955 ld r7, VCPU_XER(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000956
957 mtctr r6
958 mtxer r7
959
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100960kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000961 ld r10, VCPU_PC(r4)
962 ld r11, VCPU_MSR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000963 ld r6, VCPU_SRR0(r4)
964 ld r7, VCPU_SRR1(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100965 mtspr SPRN_SRR0, r6
966 mtspr SPRN_SRR1, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000967
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100968deliver_guest_interrupt:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000969 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000970 rldicl r11, r11, 63 - MSR_HV_LG, 1
971 rotldi r11, r11, 1 + MSR_HV_LG
972 ori r11, r11, MSR_ME
973
Paul Mackerras19ccb762011-07-23 17:42:46 +1000974 /* Check if we can deliver an external or decrementer interrupt now */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100975 ld r0, VCPU_PENDING_EXC(r4)
976 rldicl r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
977 cmpdi cr1, r0, 0
978 andi. r8, r11, MSR_EE
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100979 mfspr r8, SPRN_LPCR
980 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
981 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
982 mtspr SPRN_LPCR, r8
Paul Mackerras19ccb762011-07-23 17:42:46 +1000983 isync
Paul Mackerras19ccb762011-07-23 17:42:46 +1000984 beq 5f
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100985 li r0, BOOK3S_INTERRUPT_EXTERNAL
986 bne cr1, 12f
987 mfspr r0, SPRN_DEC
988 cmpwi r0, 0
989 li r0, BOOK3S_INTERRUPT_DECREMENTER
990 bge 5f
991
99212: mtspr SPRN_SRR0, r10
Paul Mackerras19ccb762011-07-23 17:42:46 +1000993 mr r10,r0
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100994 mtspr SPRN_SRR1, r11
Michael Neulinge4e38122014-03-25 10:47:02 +1100995 mr r9, r4
996 bl kvmppc_msr_interrupt
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11009975:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000998
Liu Ping Fan27025a62013-11-19 14:12:48 +0800999/*
1000 * Required state:
1001 * R4 = vcpu
1002 * R10: value for HSRR0
1003 * R11: value for HSRR1
1004 * R13 = PACA
1005 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001006fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001007 li r0,0
1008 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001009 mtspr SPRN_HSRR0,r10
1010 mtspr SPRN_HSRR1,r11
1011
1012 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001013 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +00001014 stb r9, HSTATE_IN_GUEST(r13)
1015
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001016#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1017 /* Accumulate timing */
1018 addi r3, r4, VCPU_TB_GUEST
1019 bl kvmhv_accumulate_time
1020#endif
1021
Paul Mackerrasde56a942011-06-29 00:21:34 +00001022 /* Enter guest */
1023
Paul Mackerras0acb9112013-02-04 18:10:51 +00001024BEGIN_FTR_SECTION
1025 ld r5, VCPU_CFAR(r4)
1026 mtspr SPRN_CFAR, r5
1027END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001028BEGIN_FTR_SECTION
1029 ld r0, VCPU_PPR(r4)
1030END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001031
Paul Mackerrasde56a942011-06-29 00:21:34 +00001032 ld r5, VCPU_LR(r4)
1033 lwz r6, VCPU_CR(r4)
1034 mtlr r5
1035 mtcr r6
1036
Michael Neulingc75df6f2012-06-25 13:33:10 +00001037 ld r1, VCPU_GPR(R1)(r4)
1038 ld r2, VCPU_GPR(R2)(r4)
1039 ld r3, VCPU_GPR(R3)(r4)
1040 ld r5, VCPU_GPR(R5)(r4)
1041 ld r6, VCPU_GPR(R6)(r4)
1042 ld r7, VCPU_GPR(R7)(r4)
1043 ld r8, VCPU_GPR(R8)(r4)
1044 ld r9, VCPU_GPR(R9)(r4)
1045 ld r10, VCPU_GPR(R10)(r4)
1046 ld r11, VCPU_GPR(R11)(r4)
1047 ld r12, VCPU_GPR(R12)(r4)
1048 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001049
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001050BEGIN_FTR_SECTION
1051 mtspr SPRN_PPR, r0
1052END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
1053 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001054 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001055
1056 hrfid
1057 b .
1058
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001059secondary_too_late:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001060 li r12, 0
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001061 cmpdi r4, 0
1062 beq 11f
Paul Mackerras6af27c82015-03-28 14:21:10 +11001063 stw r12, VCPU_TRAP(r4)
1064#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001065 addi r3, r4, VCPU_TB_RMEXIT
1066 bl kvmhv_accumulate_time
Paul Mackerras6af27c82015-03-28 14:21:10 +11001067#endif
Paul Mackerrasb6c295d2015-03-28 14:21:02 +1100106811: b kvmhv_switch_to_host
1069
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001070no_switch_exit:
1071 HMT_MEDIUM
1072 li r12, 0
1073 b 12f
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001074hdec_soon:
Paul Mackerras6af27c82015-03-28 14:21:10 +11001075 li r12, BOOK3S_INTERRUPT_HV_DECREMENTER
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000107612: stw r12, VCPU_TRAP(r4)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001077 mr r9, r4
1078#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001079 addi r3, r4, VCPU_TB_RMEXIT
1080 bl kvmhv_accumulate_time
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001081#endif
Paul Mackerras6af27c82015-03-28 14:21:10 +11001082 b guest_exit_cont
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001083
Paul Mackerrasde56a942011-06-29 00:21:34 +00001084/******************************************************************************
1085 * *
1086 * Exit code *
1087 * *
1088 *****************************************************************************/
1089
1090/*
1091 * We come here from the first-level interrupt handlers.
1092 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301093 .globl kvmppc_interrupt_hv
1094kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001095 /*
1096 * Register contents:
1097 * R12 = interrupt vector
1098 * R13 = PACA
1099 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
1100 * guest R13 saved in SPRN_SCRATCH0
1101 */
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301102 std r9, HSTATE_SCRATCH2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +10001103
1104 lbz r9, HSTATE_IN_GUEST(r13)
1105 cmpwi r9, KVM_GUEST_MODE_HOST_HV
1106 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301107#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
1108 cmpwi r9, KVM_GUEST_MODE_GUEST
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301109 ld r9, HSTATE_SCRATCH2(r13)
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +05301110 beq kvmppc_interrupt_pr
1111#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +10001112 /* We're now back in the host but in guest MMU context */
1113 li r9, KVM_GUEST_MODE_HOST_HV
1114 stb r9, HSTATE_IN_GUEST(r13)
1115
Paul Mackerrasde56a942011-06-29 00:21:34 +00001116 ld r9, HSTATE_KVM_VCPU(r13)
1117
1118 /* Save registers */
1119
Michael Neulingc75df6f2012-06-25 13:33:10 +00001120 std r0, VCPU_GPR(R0)(r9)
1121 std r1, VCPU_GPR(R1)(r9)
1122 std r2, VCPU_GPR(R2)(r9)
1123 std r3, VCPU_GPR(R3)(r9)
1124 std r4, VCPU_GPR(R4)(r9)
1125 std r5, VCPU_GPR(R5)(r9)
1126 std r6, VCPU_GPR(R6)(r9)
1127 std r7, VCPU_GPR(R7)(r9)
1128 std r8, VCPU_GPR(R8)(r9)
Aneesh Kumar K.V36e7bb32013-11-11 19:29:47 +05301129 ld r0, HSTATE_SCRATCH2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001130 std r0, VCPU_GPR(R9)(r9)
1131 std r10, VCPU_GPR(R10)(r9)
1132 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001133 ld r3, HSTATE_SCRATCH0(r13)
1134 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001135 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001136 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +00001137BEGIN_FTR_SECTION
1138 ld r3, HSTATE_CFAR(r13)
1139 std r3, VCPU_CFAR(r9)
1140END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +10001141BEGIN_FTR_SECTION
1142 ld r4, HSTATE_PPR(r13)
1143 std r4, VCPU_PPR(r9)
1144END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001145
1146 /* Restore R1/R2 so we can handle faults */
1147 ld r1, HSTATE_HOST_R1(r13)
1148 ld r2, PACATOC(r13)
1149
1150 mfspr r10, SPRN_SRR0
1151 mfspr r11, SPRN_SRR1
1152 std r10, VCPU_SRR0(r9)
1153 std r11, VCPU_SRR1(r9)
1154 andi. r0, r12, 2 /* need to read HSRR0/1? */
1155 beq 1f
1156 mfspr r10, SPRN_HSRR0
1157 mfspr r11, SPRN_HSRR1
1158 clrrdi r12, r12, 2
11591: std r10, VCPU_PC(r9)
1160 std r11, VCPU_MSR(r9)
1161
1162 GET_SCRATCH0(r3)
1163 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +00001164 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001165 std r4, VCPU_LR(r9)
1166
Paul Mackerrasde56a942011-06-29 00:21:34 +00001167 stw r12,VCPU_TRAP(r9)
1168
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001169#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1170 addi r3, r9, VCPU_TB_RMINTR
1171 mr r4, r9
1172 bl kvmhv_accumulate_time
1173 ld r5, VCPU_GPR(R5)(r9)
1174 ld r6, VCPU_GPR(R6)(r9)
1175 ld r7, VCPU_GPR(R7)(r9)
1176 ld r8, VCPU_GPR(R8)(r9)
1177#endif
1178
Paul Mackerras4a157d62014-12-03 13:30:39 +11001179 /* Save HEIR (HV emulation assist reg) in emul_inst
Paul Mackerras697d3892011-12-12 12:36:37 +00001180 if this is an HEI (HV emulation interrupt, e40) */
1181 li r3,KVM_INST_FETCH_FAILED
Paul Mackerras2bf27602015-03-20 20:39:40 +11001182 stw r3,VCPU_LAST_INST(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001183 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1184 bne 11f
1185 mfspr r3,SPRN_HEIR
Paul Mackerras4a157d62014-12-03 13:30:39 +1100118611: stw r3,VCPU_HEIR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001187
1188 /* these are volatile across C function calls */
1189 mfctr r3
1190 mfxer r4
1191 std r3, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001192 std r4, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001193
Paul Mackerras697d3892011-12-12 12:36:37 +00001194 /* If this is a page table miss then see if it's theirs or ours */
1195 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1196 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001197 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1198 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001199
Paul Mackerrasde56a942011-06-29 00:21:34 +00001200 /* See if this is a leftover HDEC interrupt */
1201 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1202 bne 2f
1203 mfspr r3,SPRN_HDEC
1204 cmpwi r3,0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001205 mr r4,r9
1206 bge fast_guest_return
Paul Mackerrasde56a942011-06-29 00:21:34 +000012072:
Paul Mackerras697d3892011-12-12 12:36:37 +00001208 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001209 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1210 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001211
Paul Mackerras66feed62015-03-28 14:21:12 +11001212 /* Hypervisor doorbell - exit only if host IPI flag set */
1213 cmpwi r12, BOOK3S_INTERRUPT_H_DOORBELL
1214 bne 3f
1215 lbz r0, HSTATE_HOST_IPI(r13)
Gautham R. Shenoy06554d92015-08-07 17:41:20 +05301216 cmpwi r0, 0
Paul Mackerras66feed62015-03-28 14:21:12 +11001217 beq 4f
1218 b guest_exit_cont
12193:
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001220 /* External interrupt ? */
1221 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001222 bne+ guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001223
1224 /* External interrupt, first check for host_ipi. If this is
1225 * set, we know the host wants us out so let's do it now
1226 */
Paul Mackerrasc9342432013-09-06 13:24:13 +10001227 bl kvmppc_read_intr
1228 cmpdi r3, 0
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001229 bgt guest_exit_cont
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001230
Paul Mackerras4619ac82013-04-17 20:31:41 +00001231 /* Check if any CPU is heading out to the host, if so head out too */
Paul Mackerras66feed62015-03-28 14:21:12 +110012324: ld r5, HSTATE_KVM_VCORE(r13)
Paul Mackerras4619ac82013-04-17 20:31:41 +00001233 lwz r0, VCORE_ENTRY_EXIT(r5)
1234 cmpwi r0, 0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11001235 mr r4, r9
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001236 blt deliver_guest_interrupt
Paul Mackerrasde56a942011-06-29 00:21:34 +00001237
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001238guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001239 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001240 mfdar r6
1241 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001242 std r6, VCPU_DAR(r9)
1243 stw r7, VCPU_DSISR(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001244 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001245 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
Paul Mackerras6af27c82015-03-28 14:21:10 +11001246 beq mc_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001247 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001248 stw r7, VCPU_FAULT_DSISR(r9)
1249
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001250 /* See if it is a machine check */
1251 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1252 beq machine_check_realmode
1253mc_cont:
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001254#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1255 addi r3, r9, VCPU_TB_RMEXIT
1256 mr r4, r9
1257 bl kvmhv_accumulate_time
1258#endif
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001259
Gautham R. Shenoy7e022e72015-05-21 13:57:04 +05301260 mr r3, r12
Paul Mackerras6af27c82015-03-28 14:21:10 +11001261 /* Increment exit count, poke other threads to exit */
1262 bl kvmhv_commence_exit
Paul Mackerraseddb60f2015-03-28 14:21:11 +11001263 nop
1264 ld r9, HSTATE_KVM_VCPU(r13)
1265 lwz r12, VCPU_TRAP(r9)
Paul Mackerras6af27c82015-03-28 14:21:10 +11001266
Paul Mackerrasec257162015-06-24 21:18:03 +10001267 /* Stop others sending VCPU interrupts to this physical CPU */
1268 li r0, -1
1269 stw r0, VCPU_CPU(r9)
1270 stw r0, VCPU_THREAD_CPU(r9)
1271
Paul Mackerrasde56a942011-06-29 00:21:34 +00001272 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001273 mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001274 stw r6,VCPU_CTRL(r9)
1275 andi. r0,r6,1
1276 bne 4f
1277 ori r6,r6,1
1278 mtspr SPRN_CTRLT,r6
12794:
1280 /* Read the guest SLB and save it away */
1281 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1282 mtctr r0
1283 li r6,0
1284 addi r7,r9,VCPU_SLB
1285 li r5,0
12861: slbmfee r8,r6
1287 andis. r0,r8,SLB_ESID_V@h
1288 beq 2f
1289 add r8,r8,r6 /* put index in */
1290 slbmfev r3,r6
1291 std r8,VCPU_SLB_E(r7)
1292 std r3,VCPU_SLB_V(r7)
1293 addi r7,r7,VCPU_SLB_SIZE
1294 addi r5,r5,1
12952: addi r6,r6,1
1296 bdnz 1b
1297 stw r5,VCPU_SLB_MAX(r9)
1298
1299 /*
1300 * Save the guest PURR/SPURR
1301 */
1302 mfspr r5,SPRN_PURR
1303 mfspr r6,SPRN_SPURR
1304 ld r7,VCPU_PURR(r9)
1305 ld r8,VCPU_SPURR(r9)
1306 std r5,VCPU_PURR(r9)
1307 std r6,VCPU_SPURR(r9)
1308 subf r5,r7,r5
1309 subf r6,r8,r6
1310
1311 /*
1312 * Restore host PURR/SPURR and add guest times
1313 * so that the time in the guest gets accounted.
1314 */
1315 ld r3,HSTATE_PURR(r13)
1316 ld r4,HSTATE_SPURR(r13)
1317 add r3,r3,r5
1318 add r4,r4,r6
1319 mtspr SPRN_PURR,r3
1320 mtspr SPRN_SPURR,r4
1321
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001322 /* Save DEC */
1323 mfspr r5,SPRN_DEC
1324 mftb r6
1325 extsw r5,r5
1326 add r5,r5,r6
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001327 /* r5 is a guest timebase value here, convert to host TB */
1328 ld r3,HSTATE_KVM_VCORE(r13)
1329 ld r4,VCORE_TB_OFFSET(r3)
1330 subf r5,r4,r5
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001331 std r5,VCPU_DEC_EXPIRES(r9)
1332
Michael Neulingb005255e2014-01-08 21:25:21 +11001333BEGIN_FTR_SECTION
1334 b 8f
1335END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Michael Neulingb005255e2014-01-08 21:25:21 +11001336 /* Save POWER8-specific registers */
1337 mfspr r5, SPRN_IAMR
1338 mfspr r6, SPRN_PSPB
1339 mfspr r7, SPRN_FSCR
1340 std r5, VCPU_IAMR(r9)
1341 stw r6, VCPU_PSPB(r9)
1342 std r7, VCPU_FSCR(r9)
1343 mfspr r5, SPRN_IC
1344 mfspr r6, SPRN_VTB
1345 mfspr r7, SPRN_TAR
1346 std r5, VCPU_IC(r9)
1347 std r6, VCPU_VTB(r9)
1348 std r7, VCPU_TAR(r9)
Michael Neuling7b490412014-01-08 21:25:32 +11001349 mfspr r8, SPRN_EBBHR
Michael Neulingb005255e2014-01-08 21:25:21 +11001350 std r8, VCPU_EBBHR(r9)
1351 mfspr r5, SPRN_EBBRR
1352 mfspr r6, SPRN_BESCR
1353 mfspr r7, SPRN_CSIGR
1354 mfspr r8, SPRN_TACR
1355 std r5, VCPU_EBBRR(r9)
1356 std r6, VCPU_BESCR(r9)
1357 std r7, VCPU_CSIGR(r9)
1358 std r8, VCPU_TACR(r9)
1359 mfspr r5, SPRN_TCSCR
1360 mfspr r6, SPRN_ACOP
1361 mfspr r7, SPRN_PID
1362 mfspr r8, SPRN_WORT
1363 std r5, VCPU_TCSCR(r9)
1364 std r6, VCPU_ACOP(r9)
1365 stw r7, VCPU_GUEST_PID(r9)
1366 std r8, VCPU_WORT(r9)
13678:
1368
Paul Mackerrasde56a942011-06-29 00:21:34 +00001369 /* Save and reset AMR and UAMOR before turning on the MMU */
1370 mfspr r5,SPRN_AMR
1371 mfspr r6,SPRN_UAMOR
1372 std r5,VCPU_AMR(r9)
1373 std r6,VCPU_UAMOR(r9)
1374 li r6,0
1375 mtspr SPRN_AMR,r6
1376
Paul Mackerrasde56a942011-06-29 00:21:34 +00001377 /* Switch DSCR back to host value */
1378 mfspr r8, SPRN_DSCR
1379 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001380 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001381 mtspr SPRN_DSCR, r7
1382
1383 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001384 std r14, VCPU_GPR(R14)(r9)
1385 std r15, VCPU_GPR(R15)(r9)
1386 std r16, VCPU_GPR(R16)(r9)
1387 std r17, VCPU_GPR(R17)(r9)
1388 std r18, VCPU_GPR(R18)(r9)
1389 std r19, VCPU_GPR(R19)(r9)
1390 std r20, VCPU_GPR(R20)(r9)
1391 std r21, VCPU_GPR(R21)(r9)
1392 std r22, VCPU_GPR(R22)(r9)
1393 std r23, VCPU_GPR(R23)(r9)
1394 std r24, VCPU_GPR(R24)(r9)
1395 std r25, VCPU_GPR(R25)(r9)
1396 std r26, VCPU_GPR(R26)(r9)
1397 std r27, VCPU_GPR(R27)(r9)
1398 std r28, VCPU_GPR(R28)(r9)
1399 std r29, VCPU_GPR(R29)(r9)
1400 std r30, VCPU_GPR(R30)(r9)
1401 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001402
1403 /* Save SPRGs */
1404 mfspr r3, SPRN_SPRG0
1405 mfspr r4, SPRN_SPRG1
1406 mfspr r5, SPRN_SPRG2
1407 mfspr r6, SPRN_SPRG3
1408 std r3, VCPU_SPRG0(r9)
1409 std r4, VCPU_SPRG1(r9)
1410 std r5, VCPU_SPRG2(r9)
1411 std r6, VCPU_SPRG3(r9)
1412
Paul Mackerras89436332012-03-02 01:38:23 +00001413 /* save FP state */
1414 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001415 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001416
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001417#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
1418BEGIN_FTR_SECTION
1419 b 2f
1420END_FTR_SECTION_IFCLR(CPU_FTR_TM)
1421 /* Turn on TM. */
1422 mfmsr r8
1423 li r0, 1
1424 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1425 mtmsrd r8
1426
1427 ld r5, VCPU_MSR(r9)
1428 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
1429 beq 1f /* TM not active in guest. */
1430
1431 li r3, TM_CAUSE_KVM_RESCHED
1432
1433 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
1434 li r5, 0
1435 mtmsrd r5, 1
1436
1437 /* All GPRs are volatile at this point. */
1438 TRECLAIM(R3)
1439
1440 /* Temporarily store r13 and r9 so we have some regs to play with */
1441 SET_SCRATCH0(r13)
1442 GET_PACA(r13)
1443 std r9, PACATMSCRATCH(r13)
1444 ld r9, HSTATE_KVM_VCPU(r13)
1445
1446 /* Get a few more GPRs free. */
1447 std r29, VCPU_GPRS_TM(29)(r9)
1448 std r30, VCPU_GPRS_TM(30)(r9)
1449 std r31, VCPU_GPRS_TM(31)(r9)
1450
1451 /* Save away PPR and DSCR soon so don't run with user values. */
1452 mfspr r31, SPRN_PPR
1453 HMT_MEDIUM
1454 mfspr r30, SPRN_DSCR
1455 ld r29, HSTATE_DSCR(r13)
1456 mtspr SPRN_DSCR, r29
1457
1458 /* Save all but r9, r13 & r29-r31 */
1459 reg = 0
1460 .rept 29
1461 .if (reg != 9) && (reg != 13)
1462 std reg, VCPU_GPRS_TM(reg)(r9)
1463 .endif
1464 reg = reg + 1
1465 .endr
1466 /* ... now save r13 */
1467 GET_SCRATCH0(r4)
1468 std r4, VCPU_GPRS_TM(13)(r9)
1469 /* ... and save r9 */
1470 ld r4, PACATMSCRATCH(r13)
1471 std r4, VCPU_GPRS_TM(9)(r9)
1472
1473 /* Reload stack pointer and TOC. */
1474 ld r1, HSTATE_HOST_R1(r13)
1475 ld r2, PACATOC(r13)
1476
1477 /* Set MSR RI now we have r1 and r13 back. */
1478 li r5, MSR_RI
1479 mtmsrd r5, 1
1480
1481 /* Save away checkpinted SPRs. */
1482 std r31, VCPU_PPR_TM(r9)
1483 std r30, VCPU_DSCR_TM(r9)
1484 mflr r5
1485 mfcr r6
1486 mfctr r7
1487 mfspr r8, SPRN_AMR
1488 mfspr r10, SPRN_TAR
1489 std r5, VCPU_LR_TM(r9)
1490 stw r6, VCPU_CR_TM(r9)
1491 std r7, VCPU_CTR_TM(r9)
1492 std r8, VCPU_AMR_TM(r9)
1493 std r10, VCPU_TAR_TM(r9)
1494
1495 /* Restore r12 as trap number. */
1496 lwz r12, VCPU_TRAP(r9)
1497
1498 /* Save FP/VSX. */
1499 addi r3, r9, VCPU_FPRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001500 bl store_fp_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001501 addi r3, r9, VCPU_VRS_TM
Alexander Graf9bf163f2014-06-16 14:41:15 +02001502 bl store_vr_state
Paul Mackerras0a8ecce2014-04-14 08:56:26 +10001503 mfspr r6, SPRN_VRSAVE
1504 stw r6, VCPU_VRSAVE_TM(r9)
15051:
1506 /*
1507 * We need to save these SPRs after the treclaim so that the software
1508 * error code is recorded correctly in the TEXASR. Also the user may
1509 * change these outside of a transaction, so they must always be
1510 * context switched.
1511 */
1512 mfspr r5, SPRN_TFHAR
1513 mfspr r6, SPRN_TFIAR
1514 mfspr r7, SPRN_TEXASR
1515 std r5, VCPU_TFHAR(r9)
1516 std r6, VCPU_TFIAR(r9)
1517 std r7, VCPU_TEXASR(r9)
15182:
1519#endif
1520
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001521 /* Increment yield count if they have a VPA */
1522 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1523 cmpdi r8, 0
1524 beq 25f
Alexander Graf0865a582014-06-11 10:36:17 +02001525 li r4, LPPACA_YIELDCOUNT
1526 LWZX_BE r3, r8, r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001527 addi r3, r3, 1
Alexander Graf0865a582014-06-11 10:36:17 +02001528 STWX_BE r3, r8, r4
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001529 li r3, 1
1530 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000153125:
1532 /* Save PMU registers if requested */
1533 /* r8 and cr0.eq are live here */
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001534BEGIN_FTR_SECTION
1535 /*
1536 * POWER8 seems to have a hardware bug where setting
1537 * MMCR0[PMAE] along with MMCR0[PMC1CE] and/or MMCR0[PMCjCE]
1538 * when some counters are already negative doesn't seem
1539 * to cause a performance monitor alert (and hence interrupt).
1540 * The effect of this is that when saving the PMU state,
1541 * if there is no PMU alert pending when we read MMCR0
1542 * before freezing the counters, but one becomes pending
1543 * before we read the counters, we lose it.
1544 * To work around this, we need a way to freeze the counters
1545 * before reading MMCR0. Normally, freezing the counters
1546 * is done by writing MMCR0 (to set MMCR0[FC]) which
1547 * unavoidably writes MMCR0[PMA0] as well. On POWER8,
1548 * we can also freeze the counters using MMCR2, by writing
1549 * 1s to all the counter freeze condition bits (there are
1550 * 9 bits each for 6 counters).
1551 */
1552 li r3, -1 /* set all freeze bits */
1553 clrrdi r3, r3, 10
1554 mfspr r10, SPRN_MMCR2
1555 mtspr SPRN_MMCR2, r3
1556 isync
1557END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001558 li r3, 1
1559 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1560 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1561 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001562 mfspr r6, SPRN_MMCRA
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001563 /* Clear MMCRA in order to disable SDAR updates */
Paul Mackerras89436332012-03-02 01:38:23 +00001564 li r7, 0
1565 mtspr SPRN_MMCRA, r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001566 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001567 beq 21f /* if no VPA, save PMU stuff anyway */
1568 lbz r7, LPPACA_PMCINUSE(r8)
1569 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1570 bne 21f
1571 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1572 b 22f
157321: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001574 mfspr r7, SPRN_SIAR
1575 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001576 std r4, VCPU_MMCR(r9)
1577 std r5, VCPU_MMCR + 8(r9)
1578 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras9bc01a92014-05-26 19:48:40 +10001579BEGIN_FTR_SECTION
1580 std r10, VCPU_MMCR + 24(r9)
1581END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras14941782013-09-06 13:11:18 +10001582 std r7, VCPU_SIAR(r9)
1583 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001584 mfspr r3, SPRN_PMC1
1585 mfspr r4, SPRN_PMC2
1586 mfspr r5, SPRN_PMC3
1587 mfspr r6, SPRN_PMC4
1588 mfspr r7, SPRN_PMC5
1589 mfspr r8, SPRN_PMC6
1590 stw r3, VCPU_PMC(r9)
1591 stw r4, VCPU_PMC + 4(r9)
1592 stw r5, VCPU_PMC + 8(r9)
1593 stw r6, VCPU_PMC + 12(r9)
1594 stw r7, VCPU_PMC + 16(r9)
1595 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001596BEGIN_FTR_SECTION
Michael Neulingb005255e2014-01-08 21:25:21 +11001597 mfspr r5, SPRN_SIER
1598 mfspr r6, SPRN_SPMC1
1599 mfspr r7, SPRN_SPMC2
1600 mfspr r8, SPRN_MMCRS
Michael Neulingb005255e2014-01-08 21:25:21 +11001601 std r5, VCPU_SIER(r9)
1602 stw r6, VCPU_PMC + 24(r9)
1603 stw r7, VCPU_PMC + 28(r9)
1604 std r8, VCPU_MMCR + 32(r9)
1605 lis r4, 0x8000
1606 mtspr SPRN_MMCRS, r4
1607END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000160822:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001609 /* Clear out SLB */
1610 li r5,0
1611 slbmte r5,r5
1612 slbia
1613 ptesync
1614
Paul Mackerrasde56a942011-06-29 00:21:34 +00001615 /*
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001616 * POWER7/POWER8 guest -> host partition switch code.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001617 * We don't have to lock against tlbies but we do
1618 * have to coordinate the hardware threads.
1619 */
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001620kvmhv_switch_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001621 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras6af27c82015-03-28 14:21:10 +11001622 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001623 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1624 lbz r3,HSTATE_PTID(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001625 cmpwi r3,0
1626 beq 15f
1627 HMT_LOW
162813: lbz r3,VCORE_IN_GUEST(r5)
1629 cmpwi r3,0
1630 bne 13b
1631 HMT_MEDIUM
1632 b 16f
1633
1634 /* Primary thread waits for all the secondaries to exit guest */
163515: lwz r3,VCORE_ENTRY_EXIT(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001636 rlwinm r0,r3,32-8,0xff
Paul Mackerrasde56a942011-06-29 00:21:34 +00001637 clrldi r3,r3,56
1638 cmpw r3,r0
1639 bne 15b
1640 isync
1641
Paul Mackerrasb4deba52015-07-02 20:38:16 +10001642 /* Did we actually switch to the guest at all? */
1643 lbz r6, VCORE_IN_GUEST(r5)
1644 cmpwi r6, 0
1645 beq 19f
1646
Paul Mackerrasde56a942011-06-29 00:21:34 +00001647 /* Primary thread switches back to host partition */
1648 ld r6,KVM_HOST_SDR1(r4)
1649 lwz r7,KVM_HOST_LPID(r4)
1650 li r8,LPID_RSVD /* switch to reserved LPID */
1651 mtspr SPRN_LPID,r8
1652 ptesync
1653 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1654 mtspr SPRN_LPID,r7
1655 isync
1656
Michael Neulingb005255e2014-01-08 21:25:21 +11001657BEGIN_FTR_SECTION
1658 /* DPDES is shared between threads */
1659 mfspr r7, SPRN_DPDES
1660 std r7, VCORE_DPDES(r5)
1661 /* clear DPDES so we don't get guest doorbells in the host */
1662 li r8, 0
1663 mtspr SPRN_DPDES, r8
1664END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1665
Paul Mackerrasde56a942011-06-29 00:21:34 +00001666 /* Subtract timebase offset from timebase */
1667 ld r8,VCORE_TB_OFFSET(r5)
1668 cmpdi r8,0
1669 beq 17f
Paul Mackerrasc5fb80d2014-03-25 10:47:07 +11001670 mftb r6 /* current guest timebase */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001671 subf r8,r8,r6
1672 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1673 mftb r7 /* check if lower 24 bits overflowed */
1674 clrldi r6,r6,40
1675 clrldi r7,r7,40
1676 cmpld r7,r6
1677 bge 17f
1678 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1679 mtspr SPRN_TBU40,r8
1680
1681 /* Reset PCR */
168217: ld r0, VCORE_PCR(r5)
1683 cmpdi r0, 0
1684 beq 18f
1685 li r0, 0
1686 mtspr SPRN_PCR, r0
168718:
1688 /* Signal secondary CPUs to continue */
1689 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasb4deba52015-07-02 20:38:16 +1000169019: lis r8,0x7fff /* MAX_INT@h */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001691 mtspr SPRN_HDEC,r8
1692
169316: ld r8,KVM_HOST_LPCR(r4)
1694 mtspr SPRN_LPCR,r8
1695 isync
Paul Mackerrasde56a942011-06-29 00:21:34 +00001696
1697 /* load host SLB entries */
Paul Mackerrasc17b98c2014-12-03 13:30:38 +11001698 ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001699
1700 .rept SLB_NUM_BOLTED
Alexander Graf0865a582014-06-11 10:36:17 +02001701 li r3, SLBSHADOW_SAVEAREA
1702 LDX_BE r5, r8, r3
1703 addi r3, r3, 8
1704 LDX_BE r6, r8, r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00001705 andis. r7,r5,SLB_ESID_V@h
1706 beq 1f
1707 slbmte r6,r5
17081: addi r8,r8,16
1709 .endr
1710
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11001711#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
1712 /* Finish timing, if we have a vcpu */
1713 ld r4, HSTATE_KVM_VCPU(r13)
1714 cmpdi r4, 0
1715 li r3, 0
1716 beq 2f
1717 bl kvmhv_accumulate_time
17182:
1719#endif
Paul Mackerrasde56a942011-06-29 00:21:34 +00001720 /* Unset guest mode */
1721 li r0, KVM_GUEST_MODE_NONE
1722 stb r0, HSTATE_IN_GUEST(r13)
1723
Paul Mackerras218309b2013-09-06 13:23:44 +10001724 ld r0, 112+PPC_LR_STKOFF(r1)
1725 addi r1, r1, 112
1726 mtlr r0
1727 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001728
Paul Mackerras697d3892011-12-12 12:36:37 +00001729/*
1730 * Check whether an HDSI is an HPTE not found fault or something else.
1731 * If it is an HPTE not found fault that is due to the guest accessing
1732 * a page that they have mapped but which we have paged out, then
1733 * we continue on with the guest exit path. In all other cases,
1734 * reflect the HDSI to the guest as a DSI.
1735 */
1736kvmppc_hdsi:
1737 mfspr r4, SPRN_HDAR
1738 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001739 /* HPTE not found fault or protection fault? */
1740 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001741 beq 1f /* if not, send it to the guest */
1742 andi. r0, r11, MSR_DR /* data relocation enabled? */
1743 beq 3f
1744 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001745 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras697d3892011-12-12 12:36:37 +00001746 bne 1f /* if no SLB entry found */
17474: std r4, VCPU_FAULT_DAR(r9)
1748 stw r6, VCPU_FAULT_DSISR(r9)
1749
1750 /* Search the hash table. */
1751 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001752 li r7, 1 /* data fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001753 bl kvmppc_hpte_hv_fault
Paul Mackerras697d3892011-12-12 12:36:37 +00001754 ld r9, HSTATE_KVM_VCPU(r13)
1755 ld r10, VCPU_PC(r9)
1756 ld r11, VCPU_MSR(r9)
1757 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1758 cmpdi r3, 0 /* retry the instruction */
1759 beq 6f
1760 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001761 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001762 cmpdi r3, -2 /* MMIO emulation; need instr word */
1763 beq 2f
1764
1765 /* Synthesize a DSI for the guest */
1766 ld r4, VCPU_FAULT_DAR(r9)
1767 mr r6, r3
17681: mtspr SPRN_DAR, r4
1769 mtspr SPRN_DSISR, r6
1770 mtspr SPRN_SRR0, r10
1771 mtspr SPRN_SRR1, r11
1772 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001773 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001774fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000017756: ld r7, VCPU_CTR(r9)
Sam bobroffc63517c2015-05-27 09:56:57 +10001776 ld r8, VCPU_XER(r9)
Paul Mackerras697d3892011-12-12 12:36:37 +00001777 mtctr r7
1778 mtxer r8
1779 mr r4, r9
1780 b fast_guest_return
1781
17823: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1783 ld r5, KVM_VRMA_SLB_V(r5)
1784 b 4b
1785
1786 /* If this is for emulated MMIO, load the instruction word */
17872: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1788
1789 /* Set guest mode to 'jump over instruction' so if lwz faults
1790 * we'll just continue at the next IP. */
1791 li r0, KVM_GUEST_MODE_SKIP
1792 stb r0, HSTATE_IN_GUEST(r13)
1793
1794 /* Do the access with MSR:DR enabled */
1795 mfmsr r3
1796 ori r4, r3, MSR_DR /* Enable paging for data */
1797 mtmsrd r4
1798 lwz r8, 0(r10)
1799 mtmsrd r3
1800
1801 /* Store the result */
1802 stw r8, VCPU_LAST_INST(r9)
1803
1804 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001805 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001806 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001807 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001808
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001809/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001810 * Similarly for an HISI, reflect it to the guest as an ISI unless
1811 * it is an HPTE not found fault for a page that we have paged out.
1812 */
1813kvmppc_hisi:
1814 andis. r0, r11, SRR1_ISI_NOPT@h
1815 beq 1f
1816 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1817 beq 3f
1818 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001819 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001820 bne 1f /* if no SLB entry found */
18214:
1822 /* Search the hash table. */
1823 mr r3, r9 /* vcpu pointer */
1824 mr r4, r10
1825 mr r6, r11
1826 li r7, 0 /* instruction fault */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11001827 bl kvmppc_hpte_hv_fault
Paul Mackerras342d3db2011-12-12 12:38:05 +00001828 ld r9, HSTATE_KVM_VCPU(r13)
1829 ld r10, VCPU_PC(r9)
1830 ld r11, VCPU_MSR(r9)
1831 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1832 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001833 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001834 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001835 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001836
1837 /* Synthesize an ISI for the guest */
1838 mr r11, r3
18391: mtspr SPRN_SRR0, r10
1840 mtspr SPRN_SRR1, r11
1841 li r10, BOOK3S_INTERRUPT_INST_STORAGE
Michael Neulinge4e38122014-03-25 10:47:02 +11001842 bl kvmppc_msr_interrupt
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001843 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001844
18453: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1846 ld r5, KVM_VRMA_SLB_V(r6)
1847 b 4b
1848
1849/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001850 * Try to handle an hcall in real mode.
1851 * Returns to the guest if we handle it, or continues on up to
1852 * the kernel if we can't (i.e. if we don't have a handler for
1853 * it, or if the handler returns H_TOO_HARD).
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11001854 *
1855 * r5 - r8 contain hcall args,
1856 * r9 = vcpu, r10 = pc, r11 = msr, r12 = trap, r13 = paca
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001857 */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001858hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001859 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001860 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001861 /* sc 1 from userspace - reflect to guest syscall */
1862 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001863 clrrdi r3,r3,2
1864 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001865 bge guest_exit_cont
Paul Mackerras699a0ea2014-06-02 11:02:59 +10001866 /* See if this hcall is enabled for in-kernel handling */
1867 ld r4, VCPU_KVM(r9)
1868 srdi r0, r3, 8 /* r0 = (r3 / 4) >> 6 */
1869 sldi r0, r0, 3 /* index into kvm->arch.enabled_hcalls[] */
1870 add r4, r4, r0
1871 ld r0, KVM_ENABLED_HCALLS(r4)
1872 rlwinm r4, r3, 32-2, 0x3f /* r4 = (r3 / 4) & 0x3f */
1873 srd r0, r0, r4
1874 andi. r0, r0, 1
1875 beq guest_exit_cont
1876 /* Get pointer to handler, if any, and call it */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001877 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001878 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001879 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001880 beq guest_exit_cont
Anton Blanchard05a308c2014-06-12 18:16:10 +10001881 add r12,r3,r4
1882 mtctr r12
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001883 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001884 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001885 bctrl
1886 cmpdi r3,H_TOO_HARD
1887 beq hcall_real_fallback
1888 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001889 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001890 ld r10,VCPU_PC(r4)
1891 ld r11,VCPU_MSR(r4)
1892 b fast_guest_return
1893
Liu Ping Fan27025a62013-11-19 14:12:48 +08001894sc_1_fast_return:
1895 mtspr SPRN_SRR0,r10
1896 mtspr SPRN_SRR1,r11
1897 li r10, BOOK3S_INTERRUPT_SYSCALL
Michael Neulinge4e38122014-03-25 10:47:02 +11001898 bl kvmppc_msr_interrupt
Liu Ping Fan27025a62013-11-19 14:12:48 +08001899 mr r4,r9
1900 b fast_guest_return
1901
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001902 /* We've attempted a real mode hcall, but it's punted it back
1903 * to userspace. We need to restore some clobbered volatiles
1904 * before resuming the pass-it-to-qemu path */
1905hcall_real_fallback:
1906 li r12,BOOK3S_INTERRUPT_SYSCALL
1907 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001908
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001909 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001910
1911 .globl hcall_real_table
1912hcall_real_table:
1913 .long 0 /* 0 - unused */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001914 .long DOTSYM(kvmppc_h_remove) - hcall_real_table
1915 .long DOTSYM(kvmppc_h_enter) - hcall_real_table
1916 .long DOTSYM(kvmppc_h_read) - hcall_real_table
Paul Mackerrascdeee512015-06-24 21:18:07 +10001917 .long DOTSYM(kvmppc_h_clear_mod) - hcall_real_table
1918 .long DOTSYM(kvmppc_h_clear_ref) - hcall_real_table
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001919 .long DOTSYM(kvmppc_h_protect) - hcall_real_table
1920 .long DOTSYM(kvmppc_h_get_tce) - hcall_real_table
1921 .long DOTSYM(kvmppc_h_put_tce) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001922 .long 0 /* 0x24 - H_SET_SPRG0 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001923 .long DOTSYM(kvmppc_h_set_dabr) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001924 .long 0 /* 0x2c */
1925 .long 0 /* 0x30 */
1926 .long 0 /* 0x34 */
1927 .long 0 /* 0x38 */
1928 .long 0 /* 0x3c */
1929 .long 0 /* 0x40 */
1930 .long 0 /* 0x44 */
1931 .long 0 /* 0x48 */
1932 .long 0 /* 0x4c */
1933 .long 0 /* 0x50 */
1934 .long 0 /* 0x54 */
1935 .long 0 /* 0x58 */
1936 .long 0 /* 0x5c */
1937 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001938#ifdef CONFIG_KVM_XICS
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001939 .long DOTSYM(kvmppc_rm_h_eoi) - hcall_real_table
1940 .long DOTSYM(kvmppc_rm_h_cppr) - hcall_real_table
1941 .long DOTSYM(kvmppc_rm_h_ipi) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001942 .long 0 /* 0x70 - H_IPOLL */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001943 .long DOTSYM(kvmppc_rm_h_xirr) - hcall_real_table
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001944#else
1945 .long 0 /* 0x64 - H_EOI */
1946 .long 0 /* 0x68 - H_CPPR */
1947 .long 0 /* 0x6c - H_IPI */
1948 .long 0 /* 0x70 - H_IPOLL */
1949 .long 0 /* 0x74 - H_XIRR */
1950#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001951 .long 0 /* 0x78 */
1952 .long 0 /* 0x7c */
1953 .long 0 /* 0x80 */
1954 .long 0 /* 0x84 */
1955 .long 0 /* 0x88 */
1956 .long 0 /* 0x8c */
1957 .long 0 /* 0x90 */
1958 .long 0 /* 0x94 */
1959 .long 0 /* 0x98 */
1960 .long 0 /* 0x9c */
1961 .long 0 /* 0xa0 */
1962 .long 0 /* 0xa4 */
1963 .long 0 /* 0xa8 */
1964 .long 0 /* 0xac */
1965 .long 0 /* 0xb0 */
1966 .long 0 /* 0xb4 */
1967 .long 0 /* 0xb8 */
1968 .long 0 /* 0xbc */
1969 .long 0 /* 0xc0 */
1970 .long 0 /* 0xc4 */
1971 .long 0 /* 0xc8 */
1972 .long 0 /* 0xcc */
1973 .long 0 /* 0xd0 */
1974 .long 0 /* 0xd4 */
1975 .long 0 /* 0xd8 */
1976 .long 0 /* 0xdc */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001977 .long DOTSYM(kvmppc_h_cede) - hcall_real_table
Sam Bobroff90fd09f2014-12-03 13:30:40 +11001978 .long DOTSYM(kvmppc_rm_h_confer) - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001979 .long 0 /* 0xe8 */
1980 .long 0 /* 0xec */
1981 .long 0 /* 0xf0 */
1982 .long 0 /* 0xf4 */
1983 .long 0 /* 0xf8 */
1984 .long 0 /* 0xfc */
1985 .long 0 /* 0x100 */
1986 .long 0 /* 0x104 */
1987 .long 0 /* 0x108 */
1988 .long 0 /* 0x10c */
1989 .long 0 /* 0x110 */
1990 .long 0 /* 0x114 */
1991 .long 0 /* 0x118 */
1992 .long 0 /* 0x11c */
1993 .long 0 /* 0x120 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001994 .long DOTSYM(kvmppc_h_bulk_remove) - hcall_real_table
Paul Mackerras8563bf52014-01-08 21:25:29 +11001995 .long 0 /* 0x128 */
1996 .long 0 /* 0x12c */
1997 .long 0 /* 0x130 */
Anton Blanchardc1fb0192014-02-04 16:07:01 +11001998 .long DOTSYM(kvmppc_h_set_xdabr) - hcall_real_table
Michael Ellermane928e9c2015-03-20 20:39:41 +11001999 .long 0 /* 0x138 */
2000 .long 0 /* 0x13c */
2001 .long 0 /* 0x140 */
2002 .long 0 /* 0x144 */
2003 .long 0 /* 0x148 */
2004 .long 0 /* 0x14c */
2005 .long 0 /* 0x150 */
2006 .long 0 /* 0x154 */
2007 .long 0 /* 0x158 */
2008 .long 0 /* 0x15c */
2009 .long 0 /* 0x160 */
2010 .long 0 /* 0x164 */
2011 .long 0 /* 0x168 */
2012 .long 0 /* 0x16c */
2013 .long 0 /* 0x170 */
2014 .long 0 /* 0x174 */
2015 .long 0 /* 0x178 */
2016 .long 0 /* 0x17c */
2017 .long 0 /* 0x180 */
2018 .long 0 /* 0x184 */
2019 .long 0 /* 0x188 */
2020 .long 0 /* 0x18c */
2021 .long 0 /* 0x190 */
2022 .long 0 /* 0x194 */
2023 .long 0 /* 0x198 */
2024 .long 0 /* 0x19c */
2025 .long 0 /* 0x1a0 */
2026 .long 0 /* 0x1a4 */
2027 .long 0 /* 0x1a8 */
2028 .long 0 /* 0x1ac */
2029 .long 0 /* 0x1b0 */
2030 .long 0 /* 0x1b4 */
2031 .long 0 /* 0x1b8 */
2032 .long 0 /* 0x1bc */
2033 .long 0 /* 0x1c0 */
2034 .long 0 /* 0x1c4 */
2035 .long 0 /* 0x1c8 */
2036 .long 0 /* 0x1cc */
2037 .long 0 /* 0x1d0 */
2038 .long 0 /* 0x1d4 */
2039 .long 0 /* 0x1d8 */
2040 .long 0 /* 0x1dc */
2041 .long 0 /* 0x1e0 */
2042 .long 0 /* 0x1e4 */
2043 .long 0 /* 0x1e8 */
2044 .long 0 /* 0x1ec */
2045 .long 0 /* 0x1f0 */
2046 .long 0 /* 0x1f4 */
2047 .long 0 /* 0x1f8 */
2048 .long 0 /* 0x1fc */
2049 .long 0 /* 0x200 */
2050 .long 0 /* 0x204 */
2051 .long 0 /* 0x208 */
2052 .long 0 /* 0x20c */
2053 .long 0 /* 0x210 */
2054 .long 0 /* 0x214 */
2055 .long 0 /* 0x218 */
2056 .long 0 /* 0x21c */
2057 .long 0 /* 0x220 */
2058 .long 0 /* 0x224 */
2059 .long 0 /* 0x228 */
2060 .long 0 /* 0x22c */
2061 .long 0 /* 0x230 */
2062 .long 0 /* 0x234 */
2063 .long 0 /* 0x238 */
2064 .long 0 /* 0x23c */
2065 .long 0 /* 0x240 */
2066 .long 0 /* 0x244 */
2067 .long 0 /* 0x248 */
2068 .long 0 /* 0x24c */
2069 .long 0 /* 0x250 */
2070 .long 0 /* 0x254 */
2071 .long 0 /* 0x258 */
2072 .long 0 /* 0x25c */
2073 .long 0 /* 0x260 */
2074 .long 0 /* 0x264 */
2075 .long 0 /* 0x268 */
2076 .long 0 /* 0x26c */
2077 .long 0 /* 0x270 */
2078 .long 0 /* 0x274 */
2079 .long 0 /* 0x278 */
2080 .long 0 /* 0x27c */
2081 .long 0 /* 0x280 */
2082 .long 0 /* 0x284 */
2083 .long 0 /* 0x288 */
2084 .long 0 /* 0x28c */
2085 .long 0 /* 0x290 */
2086 .long 0 /* 0x294 */
2087 .long 0 /* 0x298 */
2088 .long 0 /* 0x29c */
2089 .long 0 /* 0x2a0 */
2090 .long 0 /* 0x2a4 */
2091 .long 0 /* 0x2a8 */
2092 .long 0 /* 0x2ac */
2093 .long 0 /* 0x2b0 */
2094 .long 0 /* 0x2b4 */
2095 .long 0 /* 0x2b8 */
2096 .long 0 /* 0x2bc */
2097 .long 0 /* 0x2c0 */
2098 .long 0 /* 0x2c4 */
2099 .long 0 /* 0x2c8 */
2100 .long 0 /* 0x2cc */
2101 .long 0 /* 0x2d0 */
2102 .long 0 /* 0x2d4 */
2103 .long 0 /* 0x2d8 */
2104 .long 0 /* 0x2dc */
2105 .long 0 /* 0x2e0 */
2106 .long 0 /* 0x2e4 */
2107 .long 0 /* 0x2e8 */
2108 .long 0 /* 0x2ec */
2109 .long 0 /* 0x2f0 */
2110 .long 0 /* 0x2f4 */
2111 .long 0 /* 0x2f8 */
2112 .long 0 /* 0x2fc */
2113 .long DOTSYM(kvmppc_h_random) - hcall_real_table
Paul Mackerrasae2113a2014-06-02 11:03:00 +10002114 .globl hcall_real_table_end
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002115hcall_real_table_end:
2116
Paul Mackerras8563bf52014-01-08 21:25:29 +11002117_GLOBAL(kvmppc_h_set_xdabr)
2118 andi. r0, r5, DABRX_USER | DABRX_KERNEL
2119 beq 6f
2120 li r0, DABRX_USER | DABRX_KERNEL | DABRX_BTI
2121 andc. r0, r5, r0
2122 beq 3f
21236: li r3, H_PARAMETER
2124 blr
2125
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002126_GLOBAL(kvmppc_h_set_dabr)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002127 li r5, DABRX_USER | DABRX_KERNEL
21283:
Michael Neulingeee7ff92014-01-08 21:25:19 +11002129BEGIN_FTR_SECTION
2130 b 2f
2131END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002132 std r4,VCPU_DABR(r3)
Paul Mackerras8563bf52014-01-08 21:25:29 +11002133 stw r5, VCPU_DABRX(r3)
2134 mtspr SPRN_DABRX, r5
Paul Mackerras89436332012-03-02 01:38:23 +00002135 /* Work around P7 bug where DABR can get corrupted on mtspr */
21361: mtspr SPRN_DABR,r4
2137 mfspr r5, SPRN_DABR
2138 cmpd r4, r5
2139 bne 1b
2140 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00002141 li r3,0
2142 blr
2143
Paul Mackerras8563bf52014-01-08 21:25:29 +11002144 /* Emulate H_SET_DABR/X on P8 for the sake of compat mode guests */
21452: rlwimi r5, r4, 5, DAWRX_DR | DAWRX_DW
2146 rlwimi r5, r4, 1, DAWRX_WT
2147 clrrdi r4, r4, 3
2148 std r4, VCPU_DAWR(r3)
2149 std r5, VCPU_DAWRX(r3)
2150 mtspr SPRN_DAWR, r4
2151 mtspr SPRN_DAWRX, r5
2152 li r3, 0
Paul Mackerrasde56a942011-06-29 00:21:34 +00002153 blr
2154
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002155_GLOBAL(kvmppc_h_cede) /* r3 = vcpu pointer, r11 = msr, r13 = paca */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002156 ori r11,r11,MSR_EE
2157 std r11,VCPU_MSR(r3)
2158 li r0,1
2159 stb r0,VCPU_CEDED(r3)
2160 sync /* order setting ceded vs. testing prodded */
2161 lbz r5,VCPU_PRODDED(r3)
2162 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00002163 bne kvm_cede_prodded
Paul Mackerras6af27c82015-03-28 14:21:10 +11002164 li r12,0 /* set trap to 0 to say hcall is handled */
2165 stw r12,VCPU_TRAP(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002166 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00002167 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002168
2169 /*
2170 * Set our bit in the bitmask of napping threads unless all the
2171 * other threads are already napping, in which case we send this
2172 * up to the host.
2173 */
2174 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002175 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002176 lwz r8,VCORE_ENTRY_EXIT(r5)
2177 clrldi r8,r8,56
2178 li r0,1
2179 sld r0,r0,r6
2180 addi r6,r5,VCORE_NAPPING_THREADS
218131: lwarx r4,0,r6
2182 or r4,r4,r0
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002183 cmpw r4,r8
2184 beq kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10002185 stwcx. r4,0,r6
2186 bne 31b
Paul Mackerras7d6c40d2015-03-28 14:21:09 +11002187 /* order napping_threads update vs testing entry_exit_map */
Paul Mackerrasf019b7a2013-11-16 17:46:03 +11002188 isync
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11002189 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10002190 stb r0,HSTATE_NAPPING(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002191 lwz r7,VCORE_ENTRY_EXIT(r5)
2192 cmpwi r7,0x100
2193 bge 33f /* another thread already exiting */
2194
2195/*
2196 * Although not specifically required by the architecture, POWER7
2197 * preserves the following registers in nap mode, even if an SMT mode
2198 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
2199 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
2200 */
2201 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002202 std r14, VCPU_GPR(R14)(r3)
2203 std r15, VCPU_GPR(R15)(r3)
2204 std r16, VCPU_GPR(R16)(r3)
2205 std r17, VCPU_GPR(R17)(r3)
2206 std r18, VCPU_GPR(R18)(r3)
2207 std r19, VCPU_GPR(R19)(r3)
2208 std r20, VCPU_GPR(R20)(r3)
2209 std r21, VCPU_GPR(R21)(r3)
2210 std r22, VCPU_GPR(R22)(r3)
2211 std r23, VCPU_GPR(R23)(r3)
2212 std r24, VCPU_GPR(R24)(r3)
2213 std r25, VCPU_GPR(R25)(r3)
2214 std r26, VCPU_GPR(R26)(r3)
2215 std r27, VCPU_GPR(R27)(r3)
2216 std r28, VCPU_GPR(R28)(r3)
2217 std r29, VCPU_GPR(R29)(r3)
2218 std r30, VCPU_GPR(R30)(r3)
2219 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002220
2221 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002222 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10002223
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002224 /*
2225 * Set DEC to the smaller of DEC and HDEC, so that we wake
2226 * no later than the end of our timeslice (HDEC interrupts
2227 * don't wake us from nap).
2228 */
2229 mfspr r3, SPRN_DEC
2230 mfspr r4, SPRN_HDEC
2231 mftb r5
2232 cmpw r3, r4
2233 ble 67f
2234 mtspr SPRN_DEC, r4
223567:
2236 /* save expiry time of guest decrementer */
2237 extsw r3, r3
2238 add r3, r3, r5
2239 ld r4, HSTATE_KVM_VCPU(r13)
2240 ld r5, HSTATE_KVM_VCORE(r13)
2241 ld r6, VCORE_TB_OFFSET(r5)
2242 subf r3, r6, r3 /* convert to host TB value */
2243 std r3, VCPU_DEC_EXPIRES(r4)
2244
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002245#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2246 ld r4, HSTATE_KVM_VCPU(r13)
2247 addi r3, r4, VCPU_TB_CEDE
2248 bl kvmhv_accumulate_time
2249#endif
2250
Paul Mackerrasccc07772015-03-28 14:21:07 +11002251 lis r3, LPCR_PECEDP@h /* Do wake on privileged doorbell */
2252
Paul Mackerras19ccb762011-07-23 17:42:46 +10002253 /*
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002254 * Take a nap until a decrementer or external or doobell interrupt
Paul Mackerrasccc07772015-03-28 14:21:07 +11002255 * occurs, with PECE1 and PECE0 set in LPCR.
Paul Mackerras66feed62015-03-28 14:21:12 +11002256 * On POWER8, set PECEDH, and if we are ceding, also set PECEDP.
Paul Mackerrasccc07772015-03-28 14:21:07 +11002257 * Also clear the runlatch bit before napping.
Paul Mackerras19ccb762011-07-23 17:42:46 +10002258 */
Paul Mackerras56548fc2014-12-03 14:48:40 +11002259kvm_do_nap:
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002260 mfspr r0, SPRN_CTRLF
2261 clrrdi r0, r0, 1
2262 mtspr SPRN_CTRLT, r0
Preeti U Murthy582b9102014-04-11 16:02:08 +05302263
Paul Mackerrasf0888f72012-02-03 00:54:17 +00002264 li r0,1
2265 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002266 mfspr r5,SPRN_LPCR
2267 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002268BEGIN_FTR_SECTION
Paul Mackerras66feed62015-03-28 14:21:12 +11002269 ori r5, r5, LPCR_PECEDH
Paul Mackerrasccc07772015-03-28 14:21:07 +11002270 rlwimi r5, r3, 0, LPCR_PECEDP
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002271END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002272 mtspr SPRN_LPCR,r5
2273 isync
2274 li r0, 0
2275 std r0, HSTATE_SCRATCH0(r13)
2276 ptesync
2277 ld r0, HSTATE_SCRATCH0(r13)
22781: cmpd r0, r0
2279 bne 1b
2280 nap
2281 b .
2282
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100228333: mr r4, r3
2284 li r3, 0
2285 li r12, 0
2286 b 34f
2287
Paul Mackerras19ccb762011-07-23 17:42:46 +10002288kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002289 /* get vcpu pointer */
2290 ld r4, HSTATE_KVM_VCPU(r13)
2291
Paul Mackerras19ccb762011-07-23 17:42:46 +10002292 /* Woken by external or decrementer interrupt */
2293 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002294
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002295#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2296 addi r3, r4, VCPU_TB_RMINTR
2297 bl kvmhv_accumulate_time
2298#endif
2299
Paul Mackerras19ccb762011-07-23 17:42:46 +10002300 /* load up FP state */
2301 bl kvmppc_load_fp
2302
Paul Mackerrasfd6d53b2015-03-28 14:21:08 +11002303 /* Restore guest decrementer */
2304 ld r3, VCPU_DEC_EXPIRES(r4)
2305 ld r5, HSTATE_KVM_VCORE(r13)
2306 ld r6, VCORE_TB_OFFSET(r5)
2307 add r3, r3, r6 /* convert host TB to guest TB value */
2308 mftb r7
2309 subf r3, r7, r3
2310 mtspr SPRN_DEC, r3
2311
Paul Mackerras19ccb762011-07-23 17:42:46 +10002312 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00002313 ld r14, VCPU_GPR(R14)(r4)
2314 ld r15, VCPU_GPR(R15)(r4)
2315 ld r16, VCPU_GPR(R16)(r4)
2316 ld r17, VCPU_GPR(R17)(r4)
2317 ld r18, VCPU_GPR(R18)(r4)
2318 ld r19, VCPU_GPR(R19)(r4)
2319 ld r20, VCPU_GPR(R20)(r4)
2320 ld r21, VCPU_GPR(R21)(r4)
2321 ld r22, VCPU_GPR(R22)(r4)
2322 ld r23, VCPU_GPR(R23)(r4)
2323 ld r24, VCPU_GPR(R24)(r4)
2324 ld r25, VCPU_GPR(R25)(r4)
2325 ld r26, VCPU_GPR(R26)(r4)
2326 ld r27, VCPU_GPR(R27)(r4)
2327 ld r28, VCPU_GPR(R28)(r4)
2328 ld r29, VCPU_GPR(R29)(r4)
2329 ld r30, VCPU_GPR(R30)(r4)
2330 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002331
2332 /* Check the wake reason in SRR1 to see why we got here */
2333 bl kvmppc_check_wake_reason
Paul Mackerras19ccb762011-07-23 17:42:46 +10002334
2335 /* clear our bit in vcore->napping_threads */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +1100233634: ld r5,HSTATE_KVM_VCORE(r13)
2337 lbz r7,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10002338 li r0,1
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002339 sld r0,r0,r7
Paul Mackerras19ccb762011-07-23 17:42:46 +10002340 addi r6,r5,VCORE_NAPPING_THREADS
234132: lwarx r7,0,r6
2342 andc r7,r7,r0
2343 stwcx. r7,0,r6
2344 bne 32b
2345 li r0,0
2346 stb r0,HSTATE_NAPPING(r13)
2347
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002348 /* See if the wake reason means we need to exit */
2349 stw r12, VCPU_TRAP(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +00002350 mr r9, r4
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002351 cmpdi r3, 0
2352 bgt guest_exit_cont
Paul Mackerras4619ac82013-04-17 20:31:41 +00002353
Paul Mackerras19ccb762011-07-23 17:42:46 +10002354 /* see if any other thread is already exiting */
2355 lwz r0,VCORE_ENTRY_EXIT(r5)
2356 cmpwi r0,0x100
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002357 bge guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002358
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002359 b kvmppc_cede_reentry /* if not go back to guest */
Paul Mackerras19ccb762011-07-23 17:42:46 +10002360
2361 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002362kvm_cede_prodded:
2363 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002364 stb r0,VCPU_PRODDED(r3)
2365 sync /* order testing prodded vs. clearing ceded */
2366 stb r0,VCPU_CEDED(r3)
2367 li r3,H_SUCCESS
2368 blr
2369
2370 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002371kvm_cede_exit:
Paul Mackerras6af27c82015-03-28 14:21:10 +11002372 ld r9, HSTATE_KVM_VCPU(r13)
2373 b guest_exit_cont
Paul Mackerras19ccb762011-07-23 17:42:46 +10002374
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002375 /* Try to handle a machine check in real mode */
2376machine_check_realmode:
2377 mr r3, r9 /* get vcpu pointer */
Anton Blanchardb1576fe2014-02-04 16:04:35 +11002378 bl kvmppc_realmode_machine_check
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002379 nop
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302380 cmpdi r3, 0 /* Did we handle MCE ? */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002381 ld r9, HSTATE_KVM_VCPU(r13)
2382 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +05302383 /*
2384 * Deliver unhandled/fatal (e.g. UE) MCE errors to guest through
2385 * machine check interrupt (set HSRR0 to 0x200). And for handled
2386 * errors (no-fatal), just go back to guest execution with current
2387 * HSRR0 instead of exiting guest. This new approach will inject
2388 * machine check to guest for fatal error causing guest to crash.
2389 *
2390 * The old code used to return to host for unhandled errors which
2391 * was causing guest to hang with soft lockups inside guest and
2392 * makes it difficult to recover guest instance.
2393 */
2394 ld r10, VCPU_PC(r9)
2395 ld r11, VCPU_MSR(r9)
2396 bne 2f /* Continue guest execution. */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002397 /* If not, deliver a machine check. SRR0/1 are already set */
2398 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
Paul Mackerras000a25d2014-05-26 19:48:41 +10002399 ld r11, VCPU_MSR(r9)
Michael Neulinge4e38122014-03-25 10:47:02 +11002400 bl kvmppc_msr_interrupt
Mahesh Salgaonkar74845bc2014-06-11 14:18:21 +053024012: b fast_interrupt_c_return
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002402
Paul Mackerrasde56a942011-06-29 00:21:34 +00002403/*
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002404 * Check the reason we woke from nap, and take appropriate action.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002405 * Returns (in r3):
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002406 * 0 if nothing needs to be done
2407 * 1 if something happened that needs to be handled by the host
Paul Mackerras66feed62015-03-28 14:21:12 +11002408 * -1 if there was a guest wakeup (IPI or msgsnd)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002409 *
2410 * Also sets r12 to the interrupt vector for any interrupt that needs
2411 * to be handled now by the host (0x500 for external interrupt), or zero.
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002412 * Modifies r0, r6, r7, r8.
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002413 */
2414kvmppc_check_wake_reason:
2415 mfspr r6, SPRN_SRR1
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002416BEGIN_FTR_SECTION
2417 rlwinm r6, r6, 45-31, 0xf /* extract wake reason field (P8) */
2418FTR_SECTION_ELSE
2419 rlwinm r6, r6, 45-31, 0xe /* P7 wake reason field is 3 bits */
2420ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
2421 cmpwi r6, 8 /* was it an external interrupt? */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002422 li r12, BOOK3S_INTERRUPT_EXTERNAL
2423 beq kvmppc_read_intr /* if so, see what it was */
2424 li r3, 0
2425 li r12, 0
2426 cmpwi r6, 6 /* was it the decrementer? */
2427 beq 0f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002428BEGIN_FTR_SECTION
2429 cmpwi r6, 5 /* privileged doorbell? */
2430 beq 0f
Paul Mackerras5d00f662014-01-08 21:25:28 +11002431 cmpwi r6, 3 /* hypervisor doorbell? */
2432 beq 3f
Paul Mackerrasaa31e842014-01-08 21:25:26 +11002433END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002434 li r3, 1 /* anything else, return 1 */
24350: blr
2436
Paul Mackerras5d00f662014-01-08 21:25:28 +11002437 /* hypervisor doorbell */
24383: li r12, BOOK3S_INTERRUPT_H_DOORBELL
Paul Mackerras66feed62015-03-28 14:21:12 +11002439 /* see if it's a host IPI */
Paul Mackerras5d00f662014-01-08 21:25:28 +11002440 li r3, 1
Paul Mackerras66feed62015-03-28 14:21:12 +11002441 lbz r0, HSTATE_HOST_IPI(r13)
2442 cmpwi r0, 0
2443 bnelr
2444 /* if not, clear it and return -1 */
2445 lis r6, (PPC_DBELL_SERVER << (63-36))@h
2446 PPC_MSGCLR(6)
2447 li r3, -1
Paul Mackerras5d00f662014-01-08 21:25:28 +11002448 blr
2449
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002450/*
Paul Mackerrasc9342432013-09-06 13:24:13 +10002451 * Determine what sort of external interrupt is pending (if any).
2452 * Returns:
2453 * 0 if no interrupt is pending
2454 * 1 if an interrupt is pending that needs to be handled by the host
2455 * -1 if there was a guest wakeup IPI (which has now been cleared)
Paul Mackerras1f09c3e2015-03-28 14:21:04 +11002456 * Modifies r0, r6, r7, r8, returns value in r3.
Paul Mackerrasc9342432013-09-06 13:24:13 +10002457 */
2458kvmppc_read_intr:
2459 /* see if a host IPI is pending */
2460 li r3, 1
2461 lbz r0, HSTATE_HOST_IPI(r13)
2462 cmpwi r0, 0
2463 bne 1f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002464
Paul Mackerrasc9342432013-09-06 13:24:13 +10002465 /* Now read the interrupt from the ICP */
2466 ld r6, HSTATE_XICS_PHYS(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002467 li r7, XICS_XIRR
Paul Mackerrasc9342432013-09-06 13:24:13 +10002468 cmpdi r6, 0
2469 beq- 1f
2470 lwzcix r0, r6, r7
Alexander Graf76d072f2014-06-11 10:37:52 +02002471 /*
2472 * Save XIRR for later. Since we get in in reverse endian on LE
2473 * systems, save it byte reversed and fetch it back in host endian.
2474 */
2475 li r3, HSTATE_SAVED_XIRR
2476 STWX_BE r0, r3, r13
2477#ifdef __LITTLE_ENDIAN__
2478 lwz r3, HSTATE_SAVED_XIRR(r13)
2479#else
2480 mr r3, r0
2481#endif
2482 rlwinm. r3, r3, 0, 0xffffff
Paul Mackerrasde56a942011-06-29 00:21:34 +00002483 sync
Paul Mackerrasc9342432013-09-06 13:24:13 +10002484 beq 1f /* if nothing pending in the ICP */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002485
Paul Mackerrasc9342432013-09-06 13:24:13 +10002486 /* We found something in the ICP...
2487 *
2488 * If it's not an IPI, stash it in the PACA and return to
2489 * the host, we don't (yet) handle directing real external
2490 * interrupts directly to the guest
2491 */
2492 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
Paul Mackerrasc9342432013-09-06 13:24:13 +10002493 bne 42f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002494
Paul Mackerrasc9342432013-09-06 13:24:13 +10002495 /* It's an IPI, clear the MFRR and EOI it */
2496 li r3, 0xff
2497 li r8, XICS_MFRR
2498 stbcix r3, r6, r8 /* clear the IPI */
2499 stwcix r0, r6, r7 /* EOI it */
2500 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00002501
Paul Mackerrasc9342432013-09-06 13:24:13 +10002502 /* We need to re-check host IPI now in case it got set in the
2503 * meantime. If it's clear, we bounce the interrupt to the
2504 * guest
2505 */
2506 lbz r0, HSTATE_HOST_IPI(r13)
2507 cmpwi r0, 0
2508 bne- 43f
2509
2510 /* OK, it's an IPI for us */
Paul Mackerras6af27c82015-03-28 14:21:10 +11002511 li r12, 0
Paul Mackerrasc9342432013-09-06 13:24:13 +10002512 li r3, -1
25131: blr
2514
Alexander Graf76d072f2014-06-11 10:37:52 +0200251542: /* It's not an IPI and it's for the host. We saved a copy of XIRR in
2516 * the PACA earlier, it will be picked up by the host ICP driver
Paul Mackerrasc9342432013-09-06 13:24:13 +10002517 */
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002518 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002519 b 1b
2520
252143: /* We raced with the host, we need to resend that IPI, bummer */
2522 li r0, IPI_PRIORITY
2523 stbcix r0, r6, r8 /* set the IPI */
2524 sync
Paul Mackerrase3bbbbf2014-01-08 21:25:25 +11002525 li r3, 1
Paul Mackerrasc9342432013-09-06 13:24:13 +10002526 b 1b
Paul Mackerrasde56a942011-06-29 00:21:34 +00002527
2528/*
2529 * Save away FP, VMX and VSX registers.
2530 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002531 * N.B. r30 and r31 are volatile across this function,
2532 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002533 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002534kvmppc_save_fp:
2535 mflr r30
2536 mr r31,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00002537 mfmsr r5
2538 ori r8,r5,MSR_FP
2539#ifdef CONFIG_ALTIVEC
2540BEGIN_FTR_SECTION
2541 oris r8,r8,MSR_VEC@h
2542END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2543#endif
2544#ifdef CONFIG_VSX
2545BEGIN_FTR_SECTION
2546 oris r8,r8,MSR_VSX@h
2547END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2548#endif
2549 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002550 addi r3,r3,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002551 bl store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002552#ifdef CONFIG_ALTIVEC
2553BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002554 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002555 bl store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002556END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2557#endif
2558 mfspr r6,SPRN_VRSAVE
Paul Mackerrase724f082014-03-13 20:02:48 +11002559 stw r6,VCPU_VRSAVE(r31)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002560 mtlr r30
Paul Mackerrasde56a942011-06-29 00:21:34 +00002561 blr
2562
2563/*
2564 * Load up FP, VMX and VSX registers
2565 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002566 * N.B. r30 and r31 are volatile across this function,
2567 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002568 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002569kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002570 mflr r30
2571 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002572 mfmsr r9
2573 ori r8,r9,MSR_FP
2574#ifdef CONFIG_ALTIVEC
2575BEGIN_FTR_SECTION
2576 oris r8,r8,MSR_VEC@h
2577END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2578#endif
2579#ifdef CONFIG_VSX
2580BEGIN_FTR_SECTION
2581 oris r8,r8,MSR_VSX@h
2582END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2583#endif
2584 mtmsrd r8
Paul Mackerras595e4f72013-10-15 20:43:04 +11002585 addi r3,r4,VCPU_FPRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002586 bl load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002587#ifdef CONFIG_ALTIVEC
2588BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002589 addi r3,r31,VCPU_VRS
Alexander Graf9bf163f2014-06-16 14:41:15 +02002590 bl load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002591END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2592#endif
Paul Mackerrase724f082014-03-13 20:02:48 +11002593 lwz r7,VCPU_VRSAVE(r31)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002594 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002595 mtlr r30
2596 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002597 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002598
2599/*
2600 * We come here if we get any exception or interrupt while we are
2601 * executing host real mode code while in guest MMU context.
2602 * For now just spin, but we should do something better.
2603 */
2604kvmppc_bad_host_intr:
2605 b .
Michael Neulinge4e38122014-03-25 10:47:02 +11002606
2607/*
2608 * This mimics the MSR transition on IRQ delivery. The new guest MSR is taken
2609 * from VCPU_INTR_MSR and is modified based on the required TM state changes.
2610 * r11 has the guest MSR value (in/out)
2611 * r9 has a vcpu pointer (in)
2612 * r0 is used as a scratch register
2613 */
2614kvmppc_msr_interrupt:
2615 rldicl r0, r11, 64 - MSR_TS_S_LG, 62
2616 cmpwi r0, 2 /* Check if we are in transactional state.. */
2617 ld r11, VCPU_INTR_MSR(r9)
2618 bne 1f
2619 /* ... if transactional, change to suspended */
2620 li r0, 1
26211: rldimi r11, r0, MSR_TS_S_LG, 63 - MSR_TS_T_LG
2622 blr
Paul Mackerras9bc01a92014-05-26 19:48:40 +10002623
2624/*
2625 * This works around a hardware bug on POWER8E processors, where
2626 * writing a 1 to the MMCR0[PMAO] bit doesn't generate a
2627 * performance monitor interrupt. Instead, when we need to have
2628 * an interrupt pending, we have to arrange for a counter to overflow.
2629 */
2630kvmppc_fix_pmao:
2631 li r3, 0
2632 mtspr SPRN_MMCR2, r3
2633 lis r3, (MMCR0_PMXE | MMCR0_FCECE)@h
2634 ori r3, r3, MMCR0_PMCjCE | MMCR0_C56RUN
2635 mtspr SPRN_MMCR0, r3
2636 lis r3, 0x7fff
2637 ori r3, r3, 0xffff
2638 mtspr SPRN_PMC6, r3
2639 isync
2640 blr
Paul Mackerrasb6c295d2015-03-28 14:21:02 +11002641
2642#ifdef CONFIG_KVM_BOOK3S_HV_EXIT_TIMING
2643/*
2644 * Start timing an activity
2645 * r3 = pointer to time accumulation struct, r4 = vcpu
2646 */
2647kvmhv_start_timing:
2648 ld r5, HSTATE_KVM_VCORE(r13)
2649 lbz r6, VCORE_IN_GUEST(r5)
2650 cmpwi r6, 0
2651 beq 5f /* if in guest, need to */
2652 ld r6, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
26535: mftb r5
2654 subf r5, r6, r5
2655 std r3, VCPU_CUR_ACTIVITY(r4)
2656 std r5, VCPU_ACTIVITY_START(r4)
2657 blr
2658
2659/*
2660 * Accumulate time to one activity and start another.
2661 * r3 = pointer to new time accumulation struct, r4 = vcpu
2662 */
2663kvmhv_accumulate_time:
2664 ld r5, HSTATE_KVM_VCORE(r13)
2665 lbz r8, VCORE_IN_GUEST(r5)
2666 cmpwi r8, 0
2667 beq 4f /* if in guest, need to */
2668 ld r8, VCORE_TB_OFFSET(r5) /* subtract timebase offset */
26694: ld r5, VCPU_CUR_ACTIVITY(r4)
2670 ld r6, VCPU_ACTIVITY_START(r4)
2671 std r3, VCPU_CUR_ACTIVITY(r4)
2672 mftb r7
2673 subf r7, r8, r7
2674 std r7, VCPU_ACTIVITY_START(r4)
2675 cmpdi r5, 0
2676 beqlr
2677 subf r3, r6, r7
2678 ld r8, TAS_SEQCOUNT(r5)
2679 cmpdi r8, 0
2680 addi r8, r8, 1
2681 std r8, TAS_SEQCOUNT(r5)
2682 lwsync
2683 ld r7, TAS_TOTAL(r5)
2684 add r7, r7, r3
2685 std r7, TAS_TOTAL(r5)
2686 ld r6, TAS_MIN(r5)
2687 ld r7, TAS_MAX(r5)
2688 beq 3f
2689 cmpd r3, r6
2690 bge 1f
26913: std r3, TAS_MIN(r5)
26921: cmpd r3, r7
2693 ble 2f
2694 std r3, TAS_MAX(r5)
26952: lwsync
2696 addi r8, r8, 1
2697 std r8, TAS_SEQCOUNT(r5)
2698 blr
2699#endif