blob: 19f8819f90fa5829277b7c893db2e338df2be7fd [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Paul Mackerrasb4072df2012-11-23 22:37:50 +000030#include <asm/mmu-hash64.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000031
Anton Blanchard7ffcf8e2013-08-07 02:01:46 +100032#ifdef __LITTLE_ENDIAN__
33#error Need to fix lppaca and SLB shadow accesses in little endian mode
34#endif
35
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110036/* Values in HSTATE_NAPPING(r13) */
37#define NAPPING_CEDE 1
38#define NAPPING_NOVCPU 2
39
Paul Mackerrasde56a942011-06-29 00:21:34 +000040/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100041 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000042 * Must be called with interrupts hard-disabled.
43 *
44 * Input Registers:
45 *
46 * LR = return address to continue at after eventually re-enabling MMU
47 */
48_GLOBAL(kvmppc_hv_entry_trampoline)
Paul Mackerras218309b2013-09-06 13:23:44 +100049 mflr r0
50 std r0, PPC_LR_STKOFF(r1)
51 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +000052 mfmsr r10
Paul Mackerras218309b2013-09-06 13:23:44 +100053 LOAD_REG_ADDR(r5, kvmppc_call_hv_entry)
Paul Mackerrasde56a942011-06-29 00:21:34 +000054 li r0,MSR_RI
55 andc r0,r10,r0
56 li r6,MSR_IR | MSR_DR
57 andc r6,r10,r6
58 mtmsrd r0,1 /* clear RI in MSR */
59 mtsrr0 r5
60 mtsrr1 r6
61 RFI
62
Paul Mackerras218309b2013-09-06 13:23:44 +100063kvmppc_call_hv_entry:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110064 ld r4, HSTATE_KVM_VCPU(r13)
Paul Mackerras218309b2013-09-06 13:23:44 +100065 bl kvmppc_hv_entry
66
67 /* Back from guest - restore host state and return to caller */
68
Michael Neulingeee7ff92014-01-08 21:25:19 +110069BEGIN_FTR_SECTION
Paul Mackerras218309b2013-09-06 13:23:44 +100070 /* Restore host DABR and DABRX */
71 ld r5,HSTATE_DABR(r13)
72 li r6,7
73 mtspr SPRN_DABR,r5
74 mtspr SPRN_DABRX,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +110075END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerras218309b2013-09-06 13:23:44 +100076
77 /* Restore SPRG3 */
78 ld r3,PACA_SPRG3(r13)
79 mtspr SPRN_SPRG3,r3
80
Paul Mackerras218309b2013-09-06 13:23:44 +100081 /* Reload the host's PMU registers */
82 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
83 lbz r4, LPPACA_PMCINUSE(r3)
84 cmpwi r4, 0
85 beq 23f /* skip if not */
86 lwz r3, HSTATE_PMC(r13)
87 lwz r4, HSTATE_PMC + 4(r13)
88 lwz r5, HSTATE_PMC + 8(r13)
89 lwz r6, HSTATE_PMC + 12(r13)
90 lwz r8, HSTATE_PMC + 16(r13)
91 lwz r9, HSTATE_PMC + 20(r13)
92BEGIN_FTR_SECTION
93 lwz r10, HSTATE_PMC + 24(r13)
94 lwz r11, HSTATE_PMC + 28(r13)
95END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
96 mtspr SPRN_PMC1, r3
97 mtspr SPRN_PMC2, r4
98 mtspr SPRN_PMC3, r5
99 mtspr SPRN_PMC4, r6
100 mtspr SPRN_PMC5, r8
101 mtspr SPRN_PMC6, r9
102BEGIN_FTR_SECTION
103 mtspr SPRN_PMC7, r10
104 mtspr SPRN_PMC8, r11
105END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
106 ld r3, HSTATE_MMCR(r13)
107 ld r4, HSTATE_MMCR + 8(r13)
108 ld r5, HSTATE_MMCR + 16(r13)
109 mtspr SPRN_MMCR1, r4
110 mtspr SPRN_MMCRA, r5
111 mtspr SPRN_MMCR0, r3
112 isync
11323:
114
115 /*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100116 * Reload DEC. HDEC interrupts were disabled when
117 * we reloaded the host's LPCR value.
118 */
119 ld r3, HSTATE_DECEXP(r13)
120 mftb r4
121 subf r4, r4, r3
122 mtspr SPRN_DEC, r4
123
124 /*
Paul Mackerras218309b2013-09-06 13:23:44 +1000125 * For external and machine check interrupts, we need
126 * to call the Linux handler to process the interrupt.
127 * We do that by jumping to absolute address 0x500 for
128 * external interrupts, or the machine_check_fwnmi label
129 * for machine checks (since firmware might have patched
130 * the vector area at 0x200). The [h]rfid at the end of the
131 * handler will return to the book3s_hv_interrupts.S code.
132 * For other interrupts we do the rfid to get back
133 * to the book3s_hv_interrupts.S code here.
134 */
135 ld r8, 112+PPC_LR_STKOFF(r1)
136 addi r1, r1, 112
137 ld r7, HSTATE_HOST_MSR(r13)
138
139 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
140 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
141BEGIN_FTR_SECTION
142 beq 11f
143END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
144
145 /* RFI into the highmem handler, or branch to interrupt handler */
146 mfmsr r6
147 li r0, MSR_RI
148 andc r6, r6, r0
149 mtmsrd r6, 1 /* Clear RI in MSR */
150 mtsrr0 r8
151 mtsrr1 r7
152 beqa 0x500 /* external interrupt (PPC970) */
153 beq cr1, 13f /* machine check */
154 RFI
155
156 /* On POWER7, we have external interrupts set to use HSRR0/1 */
15711: mtspr SPRN_HSRR0, r8
158 mtspr SPRN_HSRR1, r7
159 ba 0x500
160
16113: b machine_check_fwnmi
162
Paul Mackerrasde56a942011-06-29 00:21:34 +0000163
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100164kvmppc_primary_no_guest:
165 /* We handle this much like a ceded vcpu */
166 /* set our bit in napping_threads */
167 ld r5, HSTATE_KVM_VCORE(r13)
168 lbz r7, HSTATE_PTID(r13)
169 li r0, 1
170 sld r0, r0, r7
171 addi r6, r5, VCORE_NAPPING_THREADS
1721: lwarx r3, 0, r6
173 or r3, r3, r0
174 stwcx. r3, 0, r6
175 bne 1b
176 /* order napping_threads update vs testing entry_exit_count */
177 isync
178 li r12, 0
179 lwz r7, VCORE_ENTRY_EXIT(r5)
180 cmpwi r7, 0x100
181 bge kvm_novcpu_exit /* another thread already exiting */
182 li r3, NAPPING_NOVCPU
183 stb r3, HSTATE_NAPPING(r13)
184 li r3, 1
185 stb r3, HSTATE_HWTHREAD_REQ(r13)
186
187 b kvm_do_nap
188
189kvm_novcpu_wakeup:
190 ld r1, HSTATE_HOST_R1(r13)
191 ld r5, HSTATE_KVM_VCORE(r13)
192 li r0, 0
193 stb r0, HSTATE_NAPPING(r13)
194 stb r0, HSTATE_HWTHREAD_REQ(r13)
195
196 /* see if any other thread is already exiting */
197 li r12, 0
198 lwz r0, VCORE_ENTRY_EXIT(r5)
199 cmpwi r0, 0x100
200 bge kvm_novcpu_exit
201
202 /* clear our bit in napping_threads */
203 lbz r7, HSTATE_PTID(r13)
204 li r0, 1
205 sld r0, r0, r7
206 addi r6, r5, VCORE_NAPPING_THREADS
2074: lwarx r3, 0, r6
208 andc r3, r3, r0
209 stwcx. r3, 0, r6
210 bne 4b
211
212 /* Check the wake reason in SRR1 to see why we got here */
213 mfspr r3, SPRN_SRR1
214 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
215 cmpwi r3, 4 /* was it an external interrupt? */
216 bne kvm_novcpu_exit /* if not, exit the guest */
217
218 /* extern interrupt - read and handle it */
219 li r12, BOOK3S_INTERRUPT_EXTERNAL
220 bl kvmppc_read_intr
221 cmpdi r3, 0
222 bge kvm_novcpu_exit
223 li r12, 0
224
225 /* Got an IPI but other vcpus aren't yet exiting, must be a latecomer */
226 ld r4, HSTATE_KVM_VCPU(r13)
227 cmpdi r4, 0
228 bne kvmppc_got_guest
229
230kvm_novcpu_exit:
231 b hdec_soon
232
Paul Mackerras371fefd2011-06-29 00:23:08 +0000233/*
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100234 * We come in here when wakened from nap mode.
Paul Mackerras371fefd2011-06-29 00:23:08 +0000235 * Relocation is off and most register values are lost.
236 * r13 points to the PACA.
237 */
238 .globl kvm_start_guest
239kvm_start_guest:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000240 ld r2,PACATOC(r13)
241
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000242 li r0,KVM_HWTHREAD_IN_KVM
243 stb r0,HSTATE_HWTHREAD_STATE(r13)
244
245 /* NV GPR values from power7_idle() will no longer be valid */
246 li r0,1
247 stb r0,PACA_NAPSTATELOST(r13)
248
Paul Mackerras4619ac82013-04-17 20:31:41 +0000249 /* were we napping due to cede? */
250 lbz r0,HSTATE_NAPPING(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100251 cmpwi r0,NAPPING_CEDE
252 beq kvm_end_cede
253 cmpwi r0,NAPPING_NOVCPU
254 beq kvm_novcpu_wakeup
255
256 ld r1,PACAEMERGSP(r13)
257 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras4619ac82013-04-17 20:31:41 +0000258
259 /*
260 * We weren't napping due to cede, so this must be a secondary
261 * thread being woken up to run a guest, or being woken up due
262 * to a stray IPI. (Or due to some machine check or hypervisor
263 * maintenance interrupt while the core is in KVM.)
264 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000265
266 /* Check the wake reason in SRR1 to see why we got here */
267 mfspr r3,SPRN_SRR1
268 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
269 cmpwi r3,4 /* was it an external interrupt? */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000270 bne 27f /* if not */
271 ld r5,HSTATE_XICS_PHYS(r13)
272 li r7,XICS_XIRR /* if it was an external interrupt, */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000273 lwzcix r8,r5,r7 /* get and ack the interrupt */
274 sync
275 clrldi. r9,r8,40 /* get interrupt source ID. */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000276 beq 28f /* none there? */
277 cmpwi r9,XICS_IPI /* was it an IPI? */
278 bne 29f
279 li r0,0xff
280 li r6,XICS_MFRR
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000281 stbcix r0,r5,r6 /* clear IPI */
Paul Mackerras4619ac82013-04-17 20:31:41 +0000282 stwcix r8,r5,r7 /* EOI the interrupt */
283 sync /* order loading of vcpu after that */
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000284
Paul Mackerras4619ac82013-04-17 20:31:41 +0000285 /* get vcpu pointer, NULL if we have no vcpu to run */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000286 ld r4,HSTATE_KVM_VCPU(r13)
287 cmpdi r4,0
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000288 /* if we have no vcpu to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000289 beq kvm_no_guest
Paul Mackerras218309b2013-09-06 13:23:44 +1000290 b 30f
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000291
Paul Mackerras4619ac82013-04-17 20:31:41 +000029227: /* XXX should handle hypervisor maintenance interrupts etc. here */
293 b kvm_no_guest
29428: /* SRR1 said external but ICP said nope?? */
295 b kvm_no_guest
29629: /* External non-IPI interrupt to offline secondary thread? help?? */
297 stw r8,HSTATE_SAVED_XIRR(r13)
298 b kvm_no_guest
Paul Mackerras371fefd2011-06-29 00:23:08 +0000299
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110030030:
301 /* Set HSTATE_DSCR(r13) to something sensible */
302 LOAD_REG_ADDR(r6, dscr_default)
303 ld r6, 0(r6)
304 std r6, HSTATE_DSCR(r13)
305
306 bl kvmppc_hv_entry
Paul Mackerras218309b2013-09-06 13:23:44 +1000307
308 /* Back from the guest, go back to nap */
309 /* Clear our vcpu pointer so we don't come back in early */
310 li r0, 0
311 std r0, HSTATE_KVM_VCPU(r13)
312 lwsync
313 /* Clear any pending IPI - we're an offline thread */
314 ld r5, HSTATE_XICS_PHYS(r13)
315 li r7, XICS_XIRR
316 lwzcix r3, r5, r7 /* ack any pending interrupt */
317 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
318 beq 37f
319 sync
320 li r0, 0xff
321 li r6, XICS_MFRR
322 stbcix r0, r5, r6 /* clear the IPI */
323 stwcix r3, r5, r7 /* EOI it */
32437: sync
325
326 /* increment the nap count and then go to nap mode */
327 ld r4, HSTATE_KVM_VCORE(r13)
328 addi r4, r4, VCORE_NAP_COUNT
329 lwsync /* make previous updates visible */
33051: lwarx r3, 0, r4
331 addi r3, r3, 1
332 stwcx. r3, 0, r4
333 bne 51b
334
335kvm_no_guest:
336 li r0, KVM_HWTHREAD_IN_NAP
337 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100338kvm_do_nap:
Paul Mackerras218309b2013-09-06 13:23:44 +1000339 li r3, LPCR_PECE0
340 mfspr r4, SPRN_LPCR
341 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
342 mtspr SPRN_LPCR, r4
343 isync
344 std r0, HSTATE_SCRATCH0(r13)
345 ptesync
346 ld r0, HSTATE_SCRATCH0(r13)
3471: cmpd r0, r0
348 bne 1b
349 nap
350 b .
351
352/******************************************************************************
353 * *
354 * Entry code *
355 * *
356 *****************************************************************************/
357
Paul Mackerrasde56a942011-06-29 00:21:34 +0000358.global kvmppc_hv_entry
359kvmppc_hv_entry:
360
361 /* Required state:
362 *
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100363 * R4 = vcpu pointer (or NULL)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000364 * MSR = ~IR|DR
365 * R13 = PACA
366 * R1 = host R1
367 * all other volatile GPRS = free
368 */
369 mflr r0
Paul Mackerras218309b2013-09-06 13:23:44 +1000370 std r0, PPC_LR_STKOFF(r1)
371 stdu r1, -112(r1)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000372
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100373 /* Save R1 in the PACA */
374 std r1, HSTATE_HOST_R1(r13)
375
376 li r6, KVM_GUEST_MODE_HOST_HV
377 stb r6, HSTATE_IN_GUEST(r13)
378
379 /* Clear out SLB */
380 li r6,0
381 slbmte r6,r6
382 slbia
383 ptesync
384
385BEGIN_FTR_SECTION
386 b 30f
387END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
388 /*
389 * POWER7 host -> guest partition switch code.
390 * We don't have to lock against concurrent tlbies,
391 * but we do have to coordinate across hardware threads.
392 */
393 /* Increment entry count iff exit count is zero. */
394 ld r5,HSTATE_KVM_VCORE(r13)
395 addi r9,r5,VCORE_ENTRY_EXIT
39621: lwarx r3,0,r9
397 cmpwi r3,0x100 /* any threads starting to exit? */
398 bge secondary_too_late /* if so we're too late to the party */
399 addi r3,r3,1
400 stwcx. r3,0,r9
401 bne 21b
402
403 /* Primary thread switches to guest partition. */
404 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
405 lbz r6,HSTATE_PTID(r13)
406 cmpwi r6,0
407 bne 20f
408 ld r6,KVM_SDR1(r9)
409 lwz r7,KVM_LPID(r9)
410 li r0,LPID_RSVD /* switch to reserved LPID */
411 mtspr SPRN_LPID,r0
412 ptesync
413 mtspr SPRN_SDR1,r6 /* switch to partition page table */
414 mtspr SPRN_LPID,r7
415 isync
416
417 /* See if we need to flush the TLB */
418 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
419 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
420 srdi r6,r6,6 /* doubleword number */
421 sldi r6,r6,3 /* address offset */
422 add r6,r6,r9
423 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
424 li r0,1
425 sld r0,r0,r7
426 ld r7,0(r6)
427 and. r7,r7,r0
428 beq 22f
42923: ldarx r7,0,r6 /* if set, clear the bit */
430 andc r7,r7,r0
431 stdcx. r7,0,r6
432 bne 23b
Paul Mackerrasca252052014-01-08 21:25:22 +1100433 /* Flush the TLB of any entries for this LPID */
434 /* use arch 2.07S as a proxy for POWER8 */
435BEGIN_FTR_SECTION
436 li r6,512 /* POWER8 has 512 sets */
437FTR_SECTION_ELSE
438 li r6,128 /* POWER7 has 128 sets */
439ALT_FTR_SECTION_END_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100440 mtctr r6
441 li r7,0x800 /* IS field = 0b10 */
442 ptesync
44328: tlbiel r7
444 addi r7,r7,0x1000
445 bdnz 28b
446 ptesync
447
448 /* Add timebase offset onto timebase */
44922: ld r8,VCORE_TB_OFFSET(r5)
450 cmpdi r8,0
451 beq 37f
452 mftb r6 /* current host timebase */
453 add r8,r8,r6
454 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
455 mftb r7 /* check if lower 24 bits overflowed */
456 clrldi r6,r6,40
457 clrldi r7,r7,40
458 cmpld r7,r6
459 bge 37f
460 addis r8,r8,0x100 /* if so, increment upper 40 bits */
461 mtspr SPRN_TBU40,r8
462
463 /* Load guest PCR value to select appropriate compat mode */
46437: ld r7, VCORE_PCR(r5)
465 cmpdi r7, 0
466 beq 38f
467 mtspr SPRN_PCR, r7
46838:
Michael Neulingb005255e2014-01-08 21:25:21 +1100469
470BEGIN_FTR_SECTION
471 /* DPDES is shared between threads */
472 ld r8, VCORE_DPDES(r5)
473 mtspr SPRN_DPDES, r8
474END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
475
Paul Mackerrase0b7ec02014-01-08 21:25:20 +1100476 li r0,1
477 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
478 b 10f
479
480 /* Secondary threads wait for primary to have done partition switch */
48120: lbz r0,VCORE_IN_GUEST(r5)
482 cmpwi r0,0
483 beq 20b
484
485 /* Set LPCR and RMOR. */
48610: ld r8,VCORE_LPCR(r5)
487 mtspr SPRN_LPCR,r8
488 ld r8,KVM_RMOR(r9)
489 mtspr SPRN_RMOR,r8
490 isync
491
492 /* Check if HDEC expires soon */
493 mfspr r3,SPRN_HDEC
494 cmpwi r3,512 /* 1 microsecond */
495 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
496 blt hdec_soon
497 b 31f
498
499 /*
500 * PPC970 host -> guest partition switch code.
501 * We have to lock against concurrent tlbies,
502 * using native_tlbie_lock to lock against host tlbies
503 * and kvm->arch.tlbie_lock to lock against guest tlbies.
504 * We also have to invalidate the TLB since its
505 * entries aren't tagged with the LPID.
506 */
50730: ld r5,HSTATE_KVM_VCORE(r13)
508 ld r9,VCORE_KVM(r5) /* pointer to struct kvm */
509
510 /* first take native_tlbie_lock */
511 .section ".toc","aw"
512toc_tlbie_lock:
513 .tc native_tlbie_lock[TC],native_tlbie_lock
514 .previous
515 ld r3,toc_tlbie_lock@toc(2)
516#ifdef __BIG_ENDIAN__
517 lwz r8,PACA_LOCK_TOKEN(r13)
518#else
519 lwz r8,PACAPACAINDEX(r13)
520#endif
52124: lwarx r0,0,r3
522 cmpwi r0,0
523 bne 24b
524 stwcx. r8,0,r3
525 bne 24b
526 isync
527
528 ld r5,HSTATE_KVM_VCORE(r13)
529 ld r7,VCORE_LPCR(r5) /* use vcore->lpcr to store HID4 */
530 li r0,0x18f
531 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
532 or r0,r7,r0
533 ptesync
534 sync
535 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
536 isync
537 li r0,0
538 stw r0,0(r3) /* drop native_tlbie_lock */
539
540 /* invalidate the whole TLB */
541 li r0,256
542 mtctr r0
543 li r6,0
54425: tlbiel r6
545 addi r6,r6,0x1000
546 bdnz 25b
547 ptesync
548
549 /* Take the guest's tlbie_lock */
550 addi r3,r9,KVM_TLBIE_LOCK
55124: lwarx r0,0,r3
552 cmpwi r0,0
553 bne 24b
554 stwcx. r8,0,r3
555 bne 24b
556 isync
557 ld r6,KVM_SDR1(r9)
558 mtspr SPRN_SDR1,r6 /* switch to partition page table */
559
560 /* Set up HID4 with the guest's LPID etc. */
561 sync
562 mtspr SPRN_HID4,r7
563 isync
564
565 /* drop the guest's tlbie_lock */
566 li r0,0
567 stw r0,0(r3)
568
569 /* Check if HDEC expires soon */
570 mfspr r3,SPRN_HDEC
571 cmpwi r3,10
572 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
573 blt hdec_soon
574
575 /* Enable HDEC interrupts */
576 mfspr r0,SPRN_HID0
577 li r3,1
578 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
579 sync
580 mtspr SPRN_HID0,r0
581 mfspr r0,SPRN_HID0
582 mfspr r0,SPRN_HID0
583 mfspr r0,SPRN_HID0
584 mfspr r0,SPRN_HID0
585 mfspr r0,SPRN_HID0
586 mfspr r0,SPRN_HID0
58731:
588 /* Do we have a guest vcpu to run? */
589 cmpdi r4, 0
590 beq kvmppc_primary_no_guest
591kvmppc_got_guest:
592
593 /* Load up guest SLB entries */
594 lwz r5,VCPU_SLB_MAX(r4)
595 cmpwi r5,0
596 beq 9f
597 mtctr r5
598 addi r6,r4,VCPU_SLB
5991: ld r8,VCPU_SLB_E(r6)
600 ld r9,VCPU_SLB_V(r6)
601 slbmte r9,r8
602 addi r6,r6,VCPU_SLB_SIZE
603 bdnz 1b
6049:
605 /* Increment yield count if they have a VPA */
606 ld r3, VCPU_VPA(r4)
607 cmpdi r3, 0
608 beq 25f
609 lwz r5, LPPACA_YIELDCOUNT(r3)
610 addi r5, r5, 1
611 stw r5, LPPACA_YIELDCOUNT(r3)
612 li r6, 1
613 stb r6, VCPU_VPA_DIRTY(r4)
61425:
615
616BEGIN_FTR_SECTION
617 /* Save purr/spurr */
618 mfspr r5,SPRN_PURR
619 mfspr r6,SPRN_SPURR
620 std r5,HSTATE_PURR(r13)
621 std r6,HSTATE_SPURR(r13)
622 ld r7,VCPU_PURR(r4)
623 ld r8,VCPU_SPURR(r4)
624 mtspr SPRN_PURR,r7
625 mtspr SPRN_SPURR,r8
626END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
627
Michael Neulingeee7ff92014-01-08 21:25:19 +1100628BEGIN_FTR_SECTION
Paul Mackerras89436332012-03-02 01:38:23 +0000629 /* Set partition DABR */
630 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
631 li r5,3
632 ld r6,VCPU_DABR(r4)
633 mtspr SPRN_DABRX,r5
634 mtspr SPRN_DABR,r6
Michael Neulingeee7ff92014-01-08 21:25:19 +1100635 BEGIN_FTR_SECTION_NESTED(89)
Paul Mackerras89436332012-03-02 01:38:23 +0000636 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +1100637 END_FTR_SECTION_NESTED(CPU_FTR_ARCH_206, CPU_FTR_ARCH_206, 89)
638END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000639
640 /* Load guest PMU registers */
641 /* R4 is live here (vcpu pointer) */
642 li r3, 1
643 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
644 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
645 isync
646 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
647 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
648 lwz r6, VCPU_PMC + 8(r4)
649 lwz r7, VCPU_PMC + 12(r4)
650 lwz r8, VCPU_PMC + 16(r4)
651 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerras9e368f22011-06-29 00:40:08 +0000652BEGIN_FTR_SECTION
653 lwz r10, VCPU_PMC + 24(r4)
654 lwz r11, VCPU_PMC + 28(r4)
655END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000656 mtspr SPRN_PMC1, r3
657 mtspr SPRN_PMC2, r5
658 mtspr SPRN_PMC3, r6
659 mtspr SPRN_PMC4, r7
660 mtspr SPRN_PMC5, r8
661 mtspr SPRN_PMC6, r9
Paul Mackerras9e368f22011-06-29 00:40:08 +0000662BEGIN_FTR_SECTION
663 mtspr SPRN_PMC7, r10
664 mtspr SPRN_PMC8, r11
665END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000666 ld r3, VCPU_MMCR(r4)
667 ld r5, VCPU_MMCR + 8(r4)
668 ld r6, VCPU_MMCR + 16(r4)
Paul Mackerras14941782013-09-06 13:11:18 +1000669 ld r7, VCPU_SIAR(r4)
670 ld r8, VCPU_SDAR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000671 mtspr SPRN_MMCR1, r5
672 mtspr SPRN_MMCRA, r6
Paul Mackerras14941782013-09-06 13:11:18 +1000673 mtspr SPRN_SIAR, r7
674 mtspr SPRN_SDAR, r8
Michael Neulingb005255e2014-01-08 21:25:21 +1100675BEGIN_FTR_SECTION
676 ld r5, VCPU_MMCR + 24(r4)
677 ld r6, VCPU_SIER(r4)
678 lwz r7, VCPU_PMC + 24(r4)
679 lwz r8, VCPU_PMC + 28(r4)
680 ld r9, VCPU_MMCR + 32(r4)
681 mtspr SPRN_MMCR2, r5
682 mtspr SPRN_SIER, r6
683 mtspr SPRN_SPMC1, r7
684 mtspr SPRN_SPMC2, r8
685 mtspr SPRN_MMCRS, r9
686END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000687 mtspr SPRN_MMCR0, r3
688 isync
689
690 /* Load up FP, VMX and VSX registers */
691 bl kvmppc_load_fp
692
Michael Neulingc75df6f2012-06-25 13:33:10 +0000693 ld r14, VCPU_GPR(R14)(r4)
694 ld r15, VCPU_GPR(R15)(r4)
695 ld r16, VCPU_GPR(R16)(r4)
696 ld r17, VCPU_GPR(R17)(r4)
697 ld r18, VCPU_GPR(R18)(r4)
698 ld r19, VCPU_GPR(R19)(r4)
699 ld r20, VCPU_GPR(R20)(r4)
700 ld r21, VCPU_GPR(R21)(r4)
701 ld r22, VCPU_GPR(R22)(r4)
702 ld r23, VCPU_GPR(R23)(r4)
703 ld r24, VCPU_GPR(R24)(r4)
704 ld r25, VCPU_GPR(R25)(r4)
705 ld r26, VCPU_GPR(R26)(r4)
706 ld r27, VCPU_GPR(R27)(r4)
707 ld r28, VCPU_GPR(R28)(r4)
708 ld r29, VCPU_GPR(R29)(r4)
709 ld r30, VCPU_GPR(R30)(r4)
710 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerras89436332012-03-02 01:38:23 +0000711
Paul Mackerras9e368f22011-06-29 00:40:08 +0000712BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000713 /* Switch DSCR to guest value */
714 ld r5, VCPU_DSCR(r4)
715 mtspr SPRN_DSCR, r5
Paul Mackerras9e368f22011-06-29 00:40:08 +0000716END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000717
Michael Neulingb005255e2014-01-08 21:25:21 +1100718BEGIN_FTR_SECTION
719 /* Skip next section on POWER7 or PPC970 */
720 b 8f
721END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
722 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
723 mfmsr r8
724 li r0, 1
725 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
726 mtmsrd r8
727
728 /* Load up POWER8-specific registers */
729 ld r5, VCPU_IAMR(r4)
730 lwz r6, VCPU_PSPB(r4)
731 ld r7, VCPU_FSCR(r4)
732 mtspr SPRN_IAMR, r5
733 mtspr SPRN_PSPB, r6
734 mtspr SPRN_FSCR, r7
735 ld r5, VCPU_DAWR(r4)
736 ld r6, VCPU_DAWRX(r4)
737 ld r7, VCPU_CIABR(r4)
738 ld r8, VCPU_TAR(r4)
739 mtspr SPRN_DAWR, r5
740 mtspr SPRN_DAWRX, r6
741 mtspr SPRN_CIABR, r7
742 mtspr SPRN_TAR, r8
743 ld r5, VCPU_IC(r4)
744 ld r6, VCPU_VTB(r4)
745 mtspr SPRN_IC, r5
746 mtspr SPRN_VTB, r6
747 ld r5, VCPU_TFHAR(r4)
748 ld r6, VCPU_TFIAR(r4)
749 ld r7, VCPU_TEXASR(r4)
750 ld r8, VCPU_EBBHR(r4)
751 mtspr SPRN_TFHAR, r5
752 mtspr SPRN_TFIAR, r6
753 mtspr SPRN_TEXASR, r7
754 mtspr SPRN_EBBHR, r8
755 ld r5, VCPU_EBBRR(r4)
756 ld r6, VCPU_BESCR(r4)
757 ld r7, VCPU_CSIGR(r4)
758 ld r8, VCPU_TACR(r4)
759 mtspr SPRN_EBBRR, r5
760 mtspr SPRN_BESCR, r6
761 mtspr SPRN_CSIGR, r7
762 mtspr SPRN_TACR, r8
763 ld r5, VCPU_TCSCR(r4)
764 ld r6, VCPU_ACOP(r4)
765 lwz r7, VCPU_GUEST_PID(r4)
766 ld r8, VCPU_WORT(r4)
767 mtspr SPRN_TCSCR, r5
768 mtspr SPRN_ACOP, r6
769 mtspr SPRN_PID, r7
770 mtspr SPRN_WORT, r8
7718:
772
Paul Mackerrasde56a942011-06-29 00:21:34 +0000773 /*
774 * Set the decrementer to the guest decrementer.
775 */
776 ld r8,VCPU_DEC_EXPIRES(r4)
777 mftb r7
778 subf r3,r7,r8
779 mtspr SPRN_DEC,r3
780 stw r3,VCPU_DEC(r4)
781
782 ld r5, VCPU_SPRG0(r4)
783 ld r6, VCPU_SPRG1(r4)
784 ld r7, VCPU_SPRG2(r4)
785 ld r8, VCPU_SPRG3(r4)
786 mtspr SPRN_SPRG0, r5
787 mtspr SPRN_SPRG1, r6
788 mtspr SPRN_SPRG2, r7
789 mtspr SPRN_SPRG3, r8
790
Paul Mackerrasde56a942011-06-29 00:21:34 +0000791 /* Load up DAR and DSISR */
792 ld r5, VCPU_DAR(r4)
793 lwz r6, VCPU_DSISR(r4)
794 mtspr SPRN_DAR, r5
795 mtspr SPRN_DSISR, r6
796
Paul Mackerras9e368f22011-06-29 00:40:08 +0000797BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000798 /* Restore AMR and UAMOR, set AMOR to all 1s */
799 ld r5,VCPU_AMR(r4)
800 ld r6,VCPU_UAMOR(r4)
801 li r7,-1
802 mtspr SPRN_AMR,r5
803 mtspr SPRN_UAMOR,r6
804 mtspr SPRN_AMOR,r7
Paul Mackerras9e368f22011-06-29 00:40:08 +0000805END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000806
Paul Mackerrasde56a942011-06-29 00:21:34 +0000807 /* Restore state of CTRL run bit; assume 1 on entry */
808 lwz r5,VCPU_CTRL(r4)
809 andi. r5,r5,1
810 bne 4f
811 mfspr r6,SPRN_CTRLF
812 clrrdi r6,r6,1
813 mtspr SPRN_CTRLT,r6
8144:
815 ld r6, VCPU_CTR(r4)
816 lwz r7, VCPU_XER(r4)
817
818 mtctr r6
819 mtxer r7
820
Paul Mackerras4619ac82013-04-17 20:31:41 +0000821 ld r10, VCPU_PC(r4)
822 ld r11, VCPU_MSR(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +1000823kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000824 ld r6, VCPU_SRR0(r4)
825 ld r7, VCPU_SRR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000826
Paul Mackerras4619ac82013-04-17 20:31:41 +0000827 /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000828 rldicl r11, r11, 63 - MSR_HV_LG, 1
829 rotldi r11, r11, 1 + MSR_HV_LG
830 ori r11, r11, MSR_ME
831
Paul Mackerras19ccb762011-07-23 17:42:46 +1000832 /* Check if we can deliver an external or decrementer interrupt now */
833 ld r0,VCPU_PENDING_EXC(r4)
Paul Mackerras4619ac82013-04-17 20:31:41 +0000834 lis r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
Paul Mackerras19ccb762011-07-23 17:42:46 +1000835 and r0,r0,r8
836 cmpdi cr1,r0,0
837 andi. r0,r11,MSR_EE
838 beq cr1,11f
839BEGIN_FTR_SECTION
840 mfspr r8,SPRN_LPCR
841 ori r8,r8,LPCR_MER
842 mtspr SPRN_LPCR,r8
843 isync
844END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
845 beq 5f
846 li r0,BOOK3S_INTERRUPT_EXTERNAL
84712: mr r6,r10
848 mr r10,r0
849 mr r7,r11
850 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
851 rotldi r11,r11,63
852 b 5f
85311: beq 5f
854 mfspr r0,SPRN_DEC
855 cmpwi r0,0
856 li r0,BOOK3S_INTERRUPT_DECREMENTER
857 blt 12b
858
859 /* Move SRR0 and SRR1 into the respective regs */
8605: mtspr SPRN_SRR0, r6
861 mtspr SPRN_SRR1, r7
Paul Mackerras19ccb762011-07-23 17:42:46 +1000862
Liu Ping Fan27025a62013-11-19 14:12:48 +0800863/*
864 * Required state:
865 * R4 = vcpu
866 * R10: value for HSRR0
867 * R11: value for HSRR1
868 * R13 = PACA
869 */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000870fast_guest_return:
Paul Mackerras4619ac82013-04-17 20:31:41 +0000871 li r0,0
872 stb r0,VCPU_CEDED(r4) /* cancel cede */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000873 mtspr SPRN_HSRR0,r10
874 mtspr SPRN_HSRR1,r11
875
876 /* Activate guest mode, so faults get handled by KVM */
Paul Mackerras44a3add2013-10-04 21:45:04 +1000877 li r9, KVM_GUEST_MODE_GUEST_HV
Paul Mackerrasde56a942011-06-29 00:21:34 +0000878 stb r9, HSTATE_IN_GUEST(r13)
879
880 /* Enter guest */
881
Paul Mackerras0acb9112013-02-04 18:10:51 +0000882BEGIN_FTR_SECTION
883 ld r5, VCPU_CFAR(r4)
884 mtspr SPRN_CFAR, r5
885END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000886BEGIN_FTR_SECTION
887 ld r0, VCPU_PPR(r4)
888END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000889
Paul Mackerrasde56a942011-06-29 00:21:34 +0000890 ld r5, VCPU_LR(r4)
891 lwz r6, VCPU_CR(r4)
892 mtlr r5
893 mtcr r6
894
Michael Neulingc75df6f2012-06-25 13:33:10 +0000895 ld r1, VCPU_GPR(R1)(r4)
896 ld r2, VCPU_GPR(R2)(r4)
897 ld r3, VCPU_GPR(R3)(r4)
898 ld r5, VCPU_GPR(R5)(r4)
899 ld r6, VCPU_GPR(R6)(r4)
900 ld r7, VCPU_GPR(R7)(r4)
901 ld r8, VCPU_GPR(R8)(r4)
902 ld r9, VCPU_GPR(R9)(r4)
903 ld r10, VCPU_GPR(R10)(r4)
904 ld r11, VCPU_GPR(R11)(r4)
905 ld r12, VCPU_GPR(R12)(r4)
906 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000907
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000908BEGIN_FTR_SECTION
909 mtspr SPRN_PPR, r0
910END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
911 ld r0, VCPU_GPR(R0)(r4)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000912 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000913
914 hrfid
915 b .
916
917/******************************************************************************
918 * *
919 * Exit code *
920 * *
921 *****************************************************************************/
922
923/*
924 * We come here from the first-level interrupt handlers.
925 */
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530926 .globl kvmppc_interrupt_hv
927kvmppc_interrupt_hv:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000928 /*
929 * Register contents:
930 * R12 = interrupt vector
931 * R13 = PACA
932 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
933 * guest R13 saved in SPRN_SCRATCH0
934 */
935 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
936 std r9, HSTATE_HOST_R2(r13)
Paul Mackerras44a3add2013-10-04 21:45:04 +1000937
938 lbz r9, HSTATE_IN_GUEST(r13)
939 cmpwi r9, KVM_GUEST_MODE_HOST_HV
940 beq kvmppc_bad_host_intr
Aneesh Kumar K.Vdd96b2c2013-10-07 22:17:55 +0530941#ifdef CONFIG_KVM_BOOK3S_PR_POSSIBLE
942 cmpwi r9, KVM_GUEST_MODE_GUEST
943 ld r9, HSTATE_HOST_R2(r13)
944 beq kvmppc_interrupt_pr
945#endif
Paul Mackerras44a3add2013-10-04 21:45:04 +1000946 /* We're now back in the host but in guest MMU context */
947 li r9, KVM_GUEST_MODE_HOST_HV
948 stb r9, HSTATE_IN_GUEST(r13)
949
Paul Mackerrasde56a942011-06-29 00:21:34 +0000950 ld r9, HSTATE_KVM_VCPU(r13)
951
952 /* Save registers */
953
Michael Neulingc75df6f2012-06-25 13:33:10 +0000954 std r0, VCPU_GPR(R0)(r9)
955 std r1, VCPU_GPR(R1)(r9)
956 std r2, VCPU_GPR(R2)(r9)
957 std r3, VCPU_GPR(R3)(r9)
958 std r4, VCPU_GPR(R4)(r9)
959 std r5, VCPU_GPR(R5)(r9)
960 std r6, VCPU_GPR(R6)(r9)
961 std r7, VCPU_GPR(R7)(r9)
962 std r8, VCPU_GPR(R8)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000963 ld r0, HSTATE_HOST_R2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000964 std r0, VCPU_GPR(R9)(r9)
965 std r10, VCPU_GPR(R10)(r9)
966 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000967 ld r3, HSTATE_SCRATCH0(r13)
968 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000969 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000970 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000971BEGIN_FTR_SECTION
972 ld r3, HSTATE_CFAR(r13)
973 std r3, VCPU_CFAR(r9)
974END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerras4b8473c2013-09-20 14:52:39 +1000975BEGIN_FTR_SECTION
976 ld r4, HSTATE_PPR(r13)
977 std r4, VCPU_PPR(r9)
978END_FTR_SECTION_IFSET(CPU_FTR_HAS_PPR)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000979
980 /* Restore R1/R2 so we can handle faults */
981 ld r1, HSTATE_HOST_R1(r13)
982 ld r2, PACATOC(r13)
983
984 mfspr r10, SPRN_SRR0
985 mfspr r11, SPRN_SRR1
986 std r10, VCPU_SRR0(r9)
987 std r11, VCPU_SRR1(r9)
988 andi. r0, r12, 2 /* need to read HSRR0/1? */
989 beq 1f
990 mfspr r10, SPRN_HSRR0
991 mfspr r11, SPRN_HSRR1
992 clrrdi r12, r12, 2
9931: std r10, VCPU_PC(r9)
994 std r11, VCPU_MSR(r9)
995
996 GET_SCRATCH0(r3)
997 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +0000998 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000999 std r4, VCPU_LR(r9)
1000
Paul Mackerrasde56a942011-06-29 00:21:34 +00001001 stw r12,VCPU_TRAP(r9)
1002
Paul Mackerras697d3892011-12-12 12:36:37 +00001003 /* Save HEIR (HV emulation assist reg) in last_inst
1004 if this is an HEI (HV emulation interrupt, e40) */
1005 li r3,KVM_INST_FETCH_FAILED
1006BEGIN_FTR_SECTION
1007 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
1008 bne 11f
1009 mfspr r3,SPRN_HEIR
1010END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
101111: stw r3,VCPU_LAST_INST(r9)
1012
1013 /* these are volatile across C function calls */
1014 mfctr r3
1015 mfxer r4
1016 std r3, VCPU_CTR(r9)
1017 stw r4, VCPU_XER(r9)
1018
1019BEGIN_FTR_SECTION
1020 /* If this is a page table miss then see if it's theirs or ours */
1021 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1022 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +00001023 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1024 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +00001025END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
1026
Paul Mackerrasde56a942011-06-29 00:21:34 +00001027 /* See if this is a leftover HDEC interrupt */
1028 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1029 bne 2f
1030 mfspr r3,SPRN_HDEC
1031 cmpwi r3,0
1032 bge ignore_hdec
10332:
Paul Mackerras697d3892011-12-12 12:36:37 +00001034 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001035 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
1036 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +00001037
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001038 /* Only handle external interrupts here on arch 206 and later */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001039BEGIN_FTR_SECTION
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001040 b ext_interrupt_to_host
1041END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1042
1043 /* External interrupt ? */
1044 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
1045 bne+ ext_interrupt_to_host
1046
1047 /* External interrupt, first check for host_ipi. If this is
1048 * set, we know the host wants us out so let's do it now
1049 */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001050do_ext_interrupt:
Paul Mackerrasc9342432013-09-06 13:24:13 +10001051 bl kvmppc_read_intr
1052 cmpdi r3, 0
1053 bgt ext_interrupt_to_host
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001054
1055 /* Allright, looks like an IPI for the guest, we need to set MER */
Paul Mackerras4619ac82013-04-17 20:31:41 +00001056 /* Check if any CPU is heading out to the host, if so head out too */
1057 ld r5, HSTATE_KVM_VCORE(r13)
1058 lwz r0, VCORE_ENTRY_EXIT(r5)
1059 cmpwi r0, 0x100
1060 bge ext_interrupt_to_host
1061
1062 /* See if there is a pending interrupt for the guest */
1063 mfspr r8, SPRN_LPCR
1064 ld r0, VCPU_PENDING_EXC(r9)
1065 /* Insert EXTERNAL_LEVEL bit into LPCR at the MER bit position */
1066 rldicl. r0, r0, 64 - BOOK3S_IRQPRIO_EXTERNAL_LEVEL, 63
1067 rldimi r8, r0, LPCR_MER_SH, 63 - LPCR_MER_SH
1068 beq 2f
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001069
1070 /* And if the guest EE is set, we can deliver immediately, else
1071 * we return to the guest with MER set
1072 */
1073 andi. r0, r11, MSR_EE
Paul Mackerras4619ac82013-04-17 20:31:41 +00001074 beq 2f
1075 mtspr SPRN_SRR0, r10
1076 mtspr SPRN_SRR1, r11
1077 li r10, BOOK3S_INTERRUPT_EXTERNAL
1078 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1079 rotldi r11, r11, 63
10802: mr r4, r9
1081 mtspr SPRN_LPCR, r8
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001082 b fast_guest_return
1083
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001084ext_interrupt_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001085
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001086guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001087 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001088 mfdar r6
1089 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +00001090 std r6, VCPU_DAR(r9)
1091 stw r7, VCPU_DSISR(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001092BEGIN_FTR_SECTION
Paul Mackerras697d3892011-12-12 12:36:37 +00001093 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001094 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
1095 beq 6f
Paul Mackerras9e368f22011-06-29 00:40:08 +00001096END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerras697d3892011-12-12 12:36:37 +00001097 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001098 stw r7, VCPU_FAULT_DSISR(r9)
1099
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001100 /* See if it is a machine check */
1101 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1102 beq machine_check_realmode
1103mc_cont:
1104
Paul Mackerrasde56a942011-06-29 00:21:34 +00001105 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras697d3892011-12-12 12:36:37 +000011066: mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +00001107 stw r6,VCPU_CTRL(r9)
1108 andi. r0,r6,1
1109 bne 4f
1110 ori r6,r6,1
1111 mtspr SPRN_CTRLT,r6
11124:
1113 /* Read the guest SLB and save it away */
1114 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
1115 mtctr r0
1116 li r6,0
1117 addi r7,r9,VCPU_SLB
1118 li r5,0
11191: slbmfee r8,r6
1120 andis. r0,r8,SLB_ESID_V@h
1121 beq 2f
1122 add r8,r8,r6 /* put index in */
1123 slbmfev r3,r6
1124 std r8,VCPU_SLB_E(r7)
1125 std r3,VCPU_SLB_V(r7)
1126 addi r7,r7,VCPU_SLB_SIZE
1127 addi r5,r5,1
11282: addi r6,r6,1
1129 bdnz 1b
1130 stw r5,VCPU_SLB_MAX(r9)
1131
1132 /*
1133 * Save the guest PURR/SPURR
1134 */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001135BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001136 mfspr r5,SPRN_PURR
1137 mfspr r6,SPRN_SPURR
1138 ld r7,VCPU_PURR(r9)
1139 ld r8,VCPU_SPURR(r9)
1140 std r5,VCPU_PURR(r9)
1141 std r6,VCPU_SPURR(r9)
1142 subf r5,r7,r5
1143 subf r6,r8,r6
1144
1145 /*
1146 * Restore host PURR/SPURR and add guest times
1147 * so that the time in the guest gets accounted.
1148 */
1149 ld r3,HSTATE_PURR(r13)
1150 ld r4,HSTATE_SPURR(r13)
1151 add r3,r3,r5
1152 add r4,r4,r6
1153 mtspr SPRN_PURR,r3
1154 mtspr SPRN_SPURR,r4
Paul Mackerras9e368f22011-06-29 00:40:08 +00001155END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001156
Paul Mackerras93b0f4d2013-09-06 13:17:46 +10001157 /* Save DEC */
1158 mfspr r5,SPRN_DEC
1159 mftb r6
1160 extsw r5,r5
1161 add r5,r5,r6
1162 std r5,VCPU_DEC_EXPIRES(r9)
1163
Michael Neulingb005255e2014-01-08 21:25:21 +11001164BEGIN_FTR_SECTION
1165 b 8f
1166END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_207S)
1167 /* Turn on TM so we can access TFHAR/TFIAR/TEXASR */
1168 mfmsr r8
1169 li r0, 1
1170 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
1171 mtmsrd r8
1172
1173 /* Save POWER8-specific registers */
1174 mfspr r5, SPRN_IAMR
1175 mfspr r6, SPRN_PSPB
1176 mfspr r7, SPRN_FSCR
1177 std r5, VCPU_IAMR(r9)
1178 stw r6, VCPU_PSPB(r9)
1179 std r7, VCPU_FSCR(r9)
1180 mfspr r5, SPRN_IC
1181 mfspr r6, SPRN_VTB
1182 mfspr r7, SPRN_TAR
1183 std r5, VCPU_IC(r9)
1184 std r6, VCPU_VTB(r9)
1185 std r7, VCPU_TAR(r9)
1186 mfspr r5, SPRN_TFHAR
1187 mfspr r6, SPRN_TFIAR
1188 mfspr r7, SPRN_TEXASR
1189 mfspr r8, SPRN_EBBHR
1190 std r5, VCPU_TFHAR(r9)
1191 std r6, VCPU_TFIAR(r9)
1192 std r7, VCPU_TEXASR(r9)
1193 std r8, VCPU_EBBHR(r9)
1194 mfspr r5, SPRN_EBBRR
1195 mfspr r6, SPRN_BESCR
1196 mfspr r7, SPRN_CSIGR
1197 mfspr r8, SPRN_TACR
1198 std r5, VCPU_EBBRR(r9)
1199 std r6, VCPU_BESCR(r9)
1200 std r7, VCPU_CSIGR(r9)
1201 std r8, VCPU_TACR(r9)
1202 mfspr r5, SPRN_TCSCR
1203 mfspr r6, SPRN_ACOP
1204 mfspr r7, SPRN_PID
1205 mfspr r8, SPRN_WORT
1206 std r5, VCPU_TCSCR(r9)
1207 std r6, VCPU_ACOP(r9)
1208 stw r7, VCPU_GUEST_PID(r9)
1209 std r8, VCPU_WORT(r9)
12108:
1211
Paul Mackerrasde56a942011-06-29 00:21:34 +00001212 /* Save and reset AMR and UAMOR before turning on the MMU */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001213BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001214 mfspr r5,SPRN_AMR
1215 mfspr r6,SPRN_UAMOR
1216 std r5,VCPU_AMR(r9)
1217 std r6,VCPU_UAMOR(r9)
1218 li r6,0
1219 mtspr SPRN_AMR,r6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001220END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001221
Paul Mackerrasde56a942011-06-29 00:21:34 +00001222 /* Switch DSCR back to host value */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001223BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001224 mfspr r8, SPRN_DSCR
1225 ld r7, HSTATE_DSCR(r13)
Paul Mackerrascfc86022013-09-21 09:53:28 +10001226 std r8, VCPU_DSCR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001227 mtspr SPRN_DSCR, r7
Paul Mackerras9e368f22011-06-29 00:40:08 +00001228END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001229
1230 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001231 std r14, VCPU_GPR(R14)(r9)
1232 std r15, VCPU_GPR(R15)(r9)
1233 std r16, VCPU_GPR(R16)(r9)
1234 std r17, VCPU_GPR(R17)(r9)
1235 std r18, VCPU_GPR(R18)(r9)
1236 std r19, VCPU_GPR(R19)(r9)
1237 std r20, VCPU_GPR(R20)(r9)
1238 std r21, VCPU_GPR(R21)(r9)
1239 std r22, VCPU_GPR(R22)(r9)
1240 std r23, VCPU_GPR(R23)(r9)
1241 std r24, VCPU_GPR(R24)(r9)
1242 std r25, VCPU_GPR(R25)(r9)
1243 std r26, VCPU_GPR(R26)(r9)
1244 std r27, VCPU_GPR(R27)(r9)
1245 std r28, VCPU_GPR(R28)(r9)
1246 std r29, VCPU_GPR(R29)(r9)
1247 std r30, VCPU_GPR(R30)(r9)
1248 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001249
1250 /* Save SPRGs */
1251 mfspr r3, SPRN_SPRG0
1252 mfspr r4, SPRN_SPRG1
1253 mfspr r5, SPRN_SPRG2
1254 mfspr r6, SPRN_SPRG3
1255 std r3, VCPU_SPRG0(r9)
1256 std r4, VCPU_SPRG1(r9)
1257 std r5, VCPU_SPRG2(r9)
1258 std r6, VCPU_SPRG3(r9)
1259
Paul Mackerras89436332012-03-02 01:38:23 +00001260 /* save FP state */
1261 mr r3, r9
Paul Mackerras595e4f72013-10-15 20:43:04 +11001262 bl kvmppc_save_fp
Paul Mackerras89436332012-03-02 01:38:23 +00001263
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001264 /* Increment yield count if they have a VPA */
1265 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1266 cmpdi r8, 0
1267 beq 25f
1268 lwz r3, LPPACA_YIELDCOUNT(r8)
1269 addi r3, r3, 1
1270 stw r3, LPPACA_YIELDCOUNT(r8)
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001271 li r3, 1
1272 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000127325:
1274 /* Save PMU registers if requested */
1275 /* r8 and cr0.eq are live here */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001276 li r3, 1
1277 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1278 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1279 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001280 mfspr r6, SPRN_MMCRA
1281BEGIN_FTR_SECTION
1282 /* On P7, clear MMCRA in order to disable SDAR updates */
1283 li r7, 0
1284 mtspr SPRN_MMCRA, r7
1285END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001286 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001287 beq 21f /* if no VPA, save PMU stuff anyway */
1288 lbz r7, LPPACA_PMCINUSE(r8)
1289 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1290 bne 21f
1291 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1292 b 22f
129321: mfspr r5, SPRN_MMCR1
Paul Mackerras14941782013-09-06 13:11:18 +10001294 mfspr r7, SPRN_SIAR
1295 mfspr r8, SPRN_SDAR
Paul Mackerrasde56a942011-06-29 00:21:34 +00001296 std r4, VCPU_MMCR(r9)
1297 std r5, VCPU_MMCR + 8(r9)
1298 std r6, VCPU_MMCR + 16(r9)
Paul Mackerras14941782013-09-06 13:11:18 +10001299 std r7, VCPU_SIAR(r9)
1300 std r8, VCPU_SDAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001301 mfspr r3, SPRN_PMC1
1302 mfspr r4, SPRN_PMC2
1303 mfspr r5, SPRN_PMC3
1304 mfspr r6, SPRN_PMC4
1305 mfspr r7, SPRN_PMC5
1306 mfspr r8, SPRN_PMC6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001307BEGIN_FTR_SECTION
1308 mfspr r10, SPRN_PMC7
1309 mfspr r11, SPRN_PMC8
1310END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001311 stw r3, VCPU_PMC(r9)
1312 stw r4, VCPU_PMC + 4(r9)
1313 stw r5, VCPU_PMC + 8(r9)
1314 stw r6, VCPU_PMC + 12(r9)
1315 stw r7, VCPU_PMC + 16(r9)
1316 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001317BEGIN_FTR_SECTION
1318 stw r10, VCPU_PMC + 24(r9)
1319 stw r11, VCPU_PMC + 28(r9)
1320END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Michael Neulingb005255e2014-01-08 21:25:21 +11001321BEGIN_FTR_SECTION
1322 mfspr r4, SPRN_MMCR2
1323 mfspr r5, SPRN_SIER
1324 mfspr r6, SPRN_SPMC1
1325 mfspr r7, SPRN_SPMC2
1326 mfspr r8, SPRN_MMCRS
1327 std r4, VCPU_MMCR + 24(r9)
1328 std r5, VCPU_SIER(r9)
1329 stw r6, VCPU_PMC + 24(r9)
1330 stw r7, VCPU_PMC + 28(r9)
1331 std r8, VCPU_MMCR + 32(r9)
1332 lis r4, 0x8000
1333 mtspr SPRN_MMCRS, r4
1334END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000133522:
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001336 /* Clear out SLB */
1337 li r5,0
1338 slbmte r5,r5
1339 slbia
1340 ptesync
1341
1342hdec_soon: /* r12 = trap, r13 = paca */
1343BEGIN_FTR_SECTION
1344 b 32f
1345END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
1346 /*
1347 * POWER7 guest -> host partition switch code.
1348 * We don't have to lock against tlbies but we do
1349 * have to coordinate the hardware threads.
1350 */
1351 /* Increment the threads-exiting-guest count in the 0xff00
1352 bits of vcore->entry_exit_count */
1353 lwsync
Paul Mackerras218309b2013-09-06 13:23:44 +10001354 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001355 addi r6,r5,VCORE_ENTRY_EXIT
135641: lwarx r3,0,r6
1357 addi r0,r3,0x100
1358 stwcx. r0,0,r6
1359 bne 41b
1360 lwsync
1361
1362 /*
1363 * At this point we have an interrupt that we have to pass
1364 * up to the kernel or qemu; we can't handle it in real mode.
1365 * Thus we have to do a partition switch, so we have to
1366 * collect the other threads, if we are the first thread
1367 * to take an interrupt. To do this, we set the HDEC to 0,
1368 * which causes an HDEC interrupt in all threads within 2ns
1369 * because the HDEC register is shared between all 4 threads.
1370 * However, we don't need to bother if this is an HDEC
1371 * interrupt, since the other threads will already be on their
1372 * way here in that case.
1373 */
1374 cmpwi r3,0x100 /* Are we the first here? */
1375 bge 43f
1376 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
1377 beq 40f
1378 li r0,0
1379 mtspr SPRN_HDEC,r0
138040:
1381 /*
1382 * Send an IPI to any napping threads, since an HDEC interrupt
1383 * doesn't wake CPUs up from nap.
1384 */
1385 lwz r3,VCORE_NAPPING_THREADS(r5)
1386 lbz r4,HSTATE_PTID(r13)
1387 li r0,1
1388 sld r0,r0,r4
1389 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
1390 beq 43f
1391 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
1392 subf r6,r4,r13
139342: andi. r0,r3,1
1394 beq 44f
1395 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
1396 li r0,IPI_PRIORITY
1397 li r7,XICS_MFRR
1398 stbcix r0,r7,r8 /* trigger the IPI */
139944: srdi. r3,r3,1
1400 addi r6,r6,PACA_SIZE
1401 bne 42b
1402
1403secondary_too_late:
1404 /* Secondary threads wait for primary to do partition switch */
140543: ld r5,HSTATE_KVM_VCORE(r13)
1406 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1407 lbz r3,HSTATE_PTID(r13)
1408 cmpwi r3,0
1409 beq 15f
Paul Mackerras218309b2013-09-06 13:23:44 +10001410 HMT_LOW
141113: lbz r3,VCORE_IN_GUEST(r5)
1412 cmpwi r3,0
1413 bne 13b
1414 HMT_MEDIUM
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001415 b 16f
1416
1417 /* Primary thread waits for all the secondaries to exit guest */
141815: lwz r3,VCORE_ENTRY_EXIT(r5)
1419 srwi r0,r3,8
1420 clrldi r3,r3,56
1421 cmpw r3,r0
1422 bne 15b
1423 isync
1424
1425 /* Primary thread switches back to host partition */
1426 ld r6,KVM_HOST_SDR1(r4)
1427 lwz r7,KVM_HOST_LPID(r4)
1428 li r8,LPID_RSVD /* switch to reserved LPID */
1429 mtspr SPRN_LPID,r8
1430 ptesync
1431 mtspr SPRN_SDR1,r6 /* switch to partition page table */
1432 mtspr SPRN_LPID,r7
1433 isync
1434
Michael Neulingb005255e2014-01-08 21:25:21 +11001435BEGIN_FTR_SECTION
1436 /* DPDES is shared between threads */
1437 mfspr r7, SPRN_DPDES
1438 std r7, VCORE_DPDES(r5)
1439 /* clear DPDES so we don't get guest doorbells in the host */
1440 li r8, 0
1441 mtspr SPRN_DPDES, r8
1442END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
1443
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001444 /* Subtract timebase offset from timebase */
1445 ld r8,VCORE_TB_OFFSET(r5)
1446 cmpdi r8,0
1447 beq 17f
1448 mftb r6 /* current host timebase */
1449 subf r8,r8,r6
1450 mtspr SPRN_TBU40,r8 /* update upper 40 bits */
1451 mftb r7 /* check if lower 24 bits overflowed */
1452 clrldi r6,r6,40
1453 clrldi r7,r7,40
1454 cmpld r7,r6
1455 bge 17f
1456 addis r8,r8,0x100 /* if so, increment upper 40 bits */
1457 mtspr SPRN_TBU40,r8
1458
1459 /* Reset PCR */
146017: ld r0, VCORE_PCR(r5)
1461 cmpdi r0, 0
1462 beq 18f
1463 li r0, 0
1464 mtspr SPRN_PCR, r0
146518:
1466 /* Signal secondary CPUs to continue */
1467 stb r0,VCORE_IN_GUEST(r5)
1468 lis r8,0x7fff /* MAX_INT@h */
1469 mtspr SPRN_HDEC,r8
1470
147116: ld r8,KVM_HOST_LPCR(r4)
1472 mtspr SPRN_LPCR,r8
1473 isync
1474 b 33f
1475
1476 /*
1477 * PPC970 guest -> host partition switch code.
1478 * We have to lock against concurrent tlbies, and
1479 * we have to flush the whole TLB.
1480 */
148132: ld r5,HSTATE_KVM_VCORE(r13)
1482 ld r4,VCORE_KVM(r5) /* pointer to struct kvm */
1483
1484 /* Take the guest's tlbie_lock */
1485#ifdef __BIG_ENDIAN__
1486 lwz r8,PACA_LOCK_TOKEN(r13)
1487#else
1488 lwz r8,PACAPACAINDEX(r13)
1489#endif
1490 addi r3,r4,KVM_TLBIE_LOCK
149124: lwarx r0,0,r3
1492 cmpwi r0,0
1493 bne 24b
1494 stwcx. r8,0,r3
1495 bne 24b
1496 isync
1497
1498 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
1499 li r0,0x18f
1500 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
1501 or r0,r7,r0
1502 ptesync
1503 sync
1504 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
1505 isync
1506 li r0,0
1507 stw r0,0(r3) /* drop guest tlbie_lock */
1508
1509 /* invalidate the whole TLB */
1510 li r0,256
1511 mtctr r0
1512 li r6,0
151325: tlbiel r6
1514 addi r6,r6,0x1000
1515 bdnz 25b
1516 ptesync
1517
1518 /* take native_tlbie_lock */
1519 ld r3,toc_tlbie_lock@toc(2)
152024: lwarx r0,0,r3
1521 cmpwi r0,0
1522 bne 24b
1523 stwcx. r8,0,r3
1524 bne 24b
1525 isync
1526
1527 ld r6,KVM_HOST_SDR1(r4)
1528 mtspr SPRN_SDR1,r6 /* switch to host page table */
1529
1530 /* Set up host HID4 value */
1531 sync
1532 mtspr SPRN_HID4,r7
1533 isync
1534 li r0,0
1535 stw r0,0(r3) /* drop native_tlbie_lock */
1536
1537 lis r8,0x7fff /* MAX_INT@h */
1538 mtspr SPRN_HDEC,r8
1539
1540 /* Disable HDEC interrupts */
1541 mfspr r0,SPRN_HID0
1542 li r3,0
1543 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1544 sync
1545 mtspr SPRN_HID0,r0
1546 mfspr r0,SPRN_HID0
1547 mfspr r0,SPRN_HID0
1548 mfspr r0,SPRN_HID0
1549 mfspr r0,SPRN_HID0
1550 mfspr r0,SPRN_HID0
1551 mfspr r0,SPRN_HID0
1552
1553 /* load host SLB entries */
155433: ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001555
Paul Mackerras218309b2013-09-06 13:23:44 +10001556 .rept SLB_NUM_BOLTED
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001557 ld r5,SLBSHADOW_SAVEAREA(r8)
1558 ld r6,SLBSHADOW_SAVEAREA+8(r8)
Paul Mackerras218309b2013-09-06 13:23:44 +10001559 andis. r7,r5,SLB_ESID_V@h
1560 beq 1f
1561 slbmte r6,r5
Paul Mackerrase0b7ec02014-01-08 21:25:20 +110015621: addi r8,r8,16
Paul Mackerras218309b2013-09-06 13:23:44 +10001563 .endr
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001564
1565 /* Unset guest mode */
1566 li r0, KVM_GUEST_MODE_NONE
1567 stb r0, HSTATE_IN_GUEST(r13)
1568
1569 ld r0, 112+PPC_LR_STKOFF(r1)
1570 addi r1, r1, 112
1571 mtlr r0
1572 blr
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001573
Paul Mackerras697d3892011-12-12 12:36:37 +00001574/*
1575 * Check whether an HDSI is an HPTE not found fault or something else.
1576 * If it is an HPTE not found fault that is due to the guest accessing
1577 * a page that they have mapped but which we have paged out, then
1578 * we continue on with the guest exit path. In all other cases,
1579 * reflect the HDSI to the guest as a DSI.
1580 */
1581kvmppc_hdsi:
1582 mfspr r4, SPRN_HDAR
1583 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001584 /* HPTE not found fault or protection fault? */
1585 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001586 beq 1f /* if not, send it to the guest */
1587 andi. r0, r11, MSR_DR /* data relocation enabled? */
1588 beq 3f
1589 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001590 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras697d3892011-12-12 12:36:37 +00001591 bne 1f /* if no SLB entry found */
15924: std r4, VCPU_FAULT_DAR(r9)
1593 stw r6, VCPU_FAULT_DSISR(r9)
1594
1595 /* Search the hash table. */
1596 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001597 li r7, 1 /* data fault */
Paul Mackerras697d3892011-12-12 12:36:37 +00001598 bl .kvmppc_hpte_hv_fault
1599 ld r9, HSTATE_KVM_VCPU(r13)
1600 ld r10, VCPU_PC(r9)
1601 ld r11, VCPU_MSR(r9)
1602 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1603 cmpdi r3, 0 /* retry the instruction */
1604 beq 6f
1605 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001606 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001607 cmpdi r3, -2 /* MMIO emulation; need instr word */
1608 beq 2f
1609
1610 /* Synthesize a DSI for the guest */
1611 ld r4, VCPU_FAULT_DAR(r9)
1612 mr r6, r3
16131: mtspr SPRN_DAR, r4
1614 mtspr SPRN_DSISR, r6
1615 mtspr SPRN_SRR0, r10
1616 mtspr SPRN_SRR1, r11
1617 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1618 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1619 rotldi r11, r11, 63
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001620fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000016216: ld r7, VCPU_CTR(r9)
1622 lwz r8, VCPU_XER(r9)
1623 mtctr r7
1624 mtxer r8
1625 mr r4, r9
1626 b fast_guest_return
1627
16283: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1629 ld r5, KVM_VRMA_SLB_V(r5)
1630 b 4b
1631
1632 /* If this is for emulated MMIO, load the instruction word */
16332: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1634
1635 /* Set guest mode to 'jump over instruction' so if lwz faults
1636 * we'll just continue at the next IP. */
1637 li r0, KVM_GUEST_MODE_SKIP
1638 stb r0, HSTATE_IN_GUEST(r13)
1639
1640 /* Do the access with MSR:DR enabled */
1641 mfmsr r3
1642 ori r4, r3, MSR_DR /* Enable paging for data */
1643 mtmsrd r4
1644 lwz r8, 0(r10)
1645 mtmsrd r3
1646
1647 /* Store the result */
1648 stw r8, VCPU_LAST_INST(r9)
1649
1650 /* Unset guest mode. */
Paul Mackerras44a3add2013-10-04 21:45:04 +10001651 li r0, KVM_GUEST_MODE_HOST_HV
Paul Mackerras697d3892011-12-12 12:36:37 +00001652 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001653 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001654
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001655/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001656 * Similarly for an HISI, reflect it to the guest as an ISI unless
1657 * it is an HPTE not found fault for a page that we have paged out.
1658 */
1659kvmppc_hisi:
1660 andis. r0, r11, SRR1_ISI_NOPT@h
1661 beq 1f
1662 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1663 beq 3f
1664 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001665 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001666 bne 1f /* if no SLB entry found */
16674:
1668 /* Search the hash table. */
1669 mr r3, r9 /* vcpu pointer */
1670 mr r4, r10
1671 mr r6, r11
1672 li r7, 0 /* instruction fault */
1673 bl .kvmppc_hpte_hv_fault
1674 ld r9, HSTATE_KVM_VCPU(r13)
1675 ld r10, VCPU_PC(r9)
1676 ld r11, VCPU_MSR(r9)
1677 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1678 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001679 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001680 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001681 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001682
1683 /* Synthesize an ISI for the guest */
1684 mr r11, r3
16851: mtspr SPRN_SRR0, r10
1686 mtspr SPRN_SRR1, r11
1687 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1688 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1689 rotldi r11, r11, 63
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001690 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001691
16923: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1693 ld r5, KVM_VRMA_SLB_V(r6)
1694 b 4b
1695
1696/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001697 * Try to handle an hcall in real mode.
1698 * Returns to the guest if we handle it, or continues on up to
1699 * the kernel if we can't (i.e. if we don't have a handler for
1700 * it, or if the handler returns H_TOO_HARD).
1701 */
1702 .globl hcall_try_real_mode
1703hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001704 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001705 andi. r0,r11,MSR_PR
Liu Ping Fan27025a62013-11-19 14:12:48 +08001706 /* sc 1 from userspace - reflect to guest syscall */
1707 bne sc_1_fast_return
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001708 clrrdi r3,r3,2
1709 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001710 bge guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001711 LOAD_REG_ADDR(r4, hcall_real_table)
Paul Mackerras4baa1d82013-07-08 20:09:53 +10001712 lwax r3,r3,r4
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001713 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001714 beq guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001715 add r3,r3,r4
1716 mtctr r3
1717 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001718 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001719 bctrl
1720 cmpdi r3,H_TOO_HARD
1721 beq hcall_real_fallback
1722 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001723 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001724 ld r10,VCPU_PC(r4)
1725 ld r11,VCPU_MSR(r4)
1726 b fast_guest_return
1727
Liu Ping Fan27025a62013-11-19 14:12:48 +08001728sc_1_fast_return:
1729 mtspr SPRN_SRR0,r10
1730 mtspr SPRN_SRR1,r11
1731 li r10, BOOK3S_INTERRUPT_SYSCALL
1732 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1733 rotldi r11, r11, 63
1734 mr r4,r9
1735 b fast_guest_return
1736
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001737 /* We've attempted a real mode hcall, but it's punted it back
1738 * to userspace. We need to restore some clobbered volatiles
1739 * before resuming the pass-it-to-qemu path */
1740hcall_real_fallback:
1741 li r12,BOOK3S_INTERRUPT_SYSCALL
1742 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001743
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001744 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001745
1746 .globl hcall_real_table
1747hcall_real_table:
1748 .long 0 /* 0 - unused */
1749 .long .kvmppc_h_remove - hcall_real_table
1750 .long .kvmppc_h_enter - hcall_real_table
1751 .long .kvmppc_h_read - hcall_real_table
1752 .long 0 /* 0x10 - H_CLEAR_MOD */
1753 .long 0 /* 0x14 - H_CLEAR_REF */
1754 .long .kvmppc_h_protect - hcall_real_table
1755 .long 0 /* 0x1c - H_GET_TCE */
David Gibson54738c02011-06-29 00:22:41 +00001756 .long .kvmppc_h_put_tce - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001757 .long 0 /* 0x24 - H_SET_SPRG0 */
1758 .long .kvmppc_h_set_dabr - hcall_real_table
1759 .long 0 /* 0x2c */
1760 .long 0 /* 0x30 */
1761 .long 0 /* 0x34 */
1762 .long 0 /* 0x38 */
1763 .long 0 /* 0x3c */
1764 .long 0 /* 0x40 */
1765 .long 0 /* 0x44 */
1766 .long 0 /* 0x48 */
1767 .long 0 /* 0x4c */
1768 .long 0 /* 0x50 */
1769 .long 0 /* 0x54 */
1770 .long 0 /* 0x58 */
1771 .long 0 /* 0x5c */
1772 .long 0 /* 0x60 */
Benjamin Herrenschmidte7d26f22013-04-17 20:31:15 +00001773#ifdef CONFIG_KVM_XICS
1774 .long .kvmppc_rm_h_eoi - hcall_real_table
1775 .long .kvmppc_rm_h_cppr - hcall_real_table
1776 .long .kvmppc_rm_h_ipi - hcall_real_table
1777 .long 0 /* 0x70 - H_IPOLL */
1778 .long .kvmppc_rm_h_xirr - hcall_real_table
1779#else
1780 .long 0 /* 0x64 - H_EOI */
1781 .long 0 /* 0x68 - H_CPPR */
1782 .long 0 /* 0x6c - H_IPI */
1783 .long 0 /* 0x70 - H_IPOLL */
1784 .long 0 /* 0x74 - H_XIRR */
1785#endif
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001786 .long 0 /* 0x78 */
1787 .long 0 /* 0x7c */
1788 .long 0 /* 0x80 */
1789 .long 0 /* 0x84 */
1790 .long 0 /* 0x88 */
1791 .long 0 /* 0x8c */
1792 .long 0 /* 0x90 */
1793 .long 0 /* 0x94 */
1794 .long 0 /* 0x98 */
1795 .long 0 /* 0x9c */
1796 .long 0 /* 0xa0 */
1797 .long 0 /* 0xa4 */
1798 .long 0 /* 0xa8 */
1799 .long 0 /* 0xac */
1800 .long 0 /* 0xb0 */
1801 .long 0 /* 0xb4 */
1802 .long 0 /* 0xb8 */
1803 .long 0 /* 0xbc */
1804 .long 0 /* 0xc0 */
1805 .long 0 /* 0xc4 */
1806 .long 0 /* 0xc8 */
1807 .long 0 /* 0xcc */
1808 .long 0 /* 0xd0 */
1809 .long 0 /* 0xd4 */
1810 .long 0 /* 0xd8 */
1811 .long 0 /* 0xdc */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001812 .long .kvmppc_h_cede - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001813 .long 0 /* 0xe4 */
1814 .long 0 /* 0xe8 */
1815 .long 0 /* 0xec */
1816 .long 0 /* 0xf0 */
1817 .long 0 /* 0xf4 */
1818 .long 0 /* 0xf8 */
1819 .long 0 /* 0xfc */
1820 .long 0 /* 0x100 */
1821 .long 0 /* 0x104 */
1822 .long 0 /* 0x108 */
1823 .long 0 /* 0x10c */
1824 .long 0 /* 0x110 */
1825 .long 0 /* 0x114 */
1826 .long 0 /* 0x118 */
1827 .long 0 /* 0x11c */
1828 .long 0 /* 0x120 */
1829 .long .kvmppc_h_bulk_remove - hcall_real_table
1830hcall_real_table_end:
1831
Paul Mackerrasde56a942011-06-29 00:21:34 +00001832ignore_hdec:
1833 mr r4,r9
1834 b fast_guest_return
1835
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001836_GLOBAL(kvmppc_h_set_dabr)
Michael Neulingeee7ff92014-01-08 21:25:19 +11001837BEGIN_FTR_SECTION
1838 b 2f
1839END_FTR_SECTION_IFSET(CPU_FTR_ARCH_207S)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001840 std r4,VCPU_DABR(r3)
Paul Mackerras89436332012-03-02 01:38:23 +00001841 /* Work around P7 bug where DABR can get corrupted on mtspr */
18421: mtspr SPRN_DABR,r4
1843 mfspr r5, SPRN_DABR
1844 cmpd r4, r5
1845 bne 1b
1846 isync
Michael Neulingeee7ff92014-01-08 21:25:19 +110018472: li r3,0
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001848 blr
1849
Paul Mackerras19ccb762011-07-23 17:42:46 +10001850_GLOBAL(kvmppc_h_cede)
1851 ori r11,r11,MSR_EE
1852 std r11,VCPU_MSR(r3)
1853 li r0,1
1854 stb r0,VCPU_CEDED(r3)
1855 sync /* order setting ceded vs. testing prodded */
1856 lbz r5,VCPU_PRODDED(r3)
1857 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00001858 bne kvm_cede_prodded
Paul Mackerras19ccb762011-07-23 17:42:46 +10001859 li r0,0 /* set trap to 0 to say hcall is handled */
1860 stw r0,VCPU_TRAP(r3)
1861 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00001862 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001863BEGIN_FTR_SECTION
Paul Mackerras04f995a2012-08-06 00:03:28 +00001864 b kvm_cede_exit /* just send it up to host on 970 */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001865END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1866
1867 /*
1868 * Set our bit in the bitmask of napping threads unless all the
1869 * other threads are already napping, in which case we send this
1870 * up to the host.
1871 */
1872 ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001873 lbz r6,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001874 lwz r8,VCORE_ENTRY_EXIT(r5)
1875 clrldi r8,r8,56
1876 li r0,1
1877 sld r0,r0,r6
1878 addi r6,r5,VCORE_NAPPING_THREADS
187931: lwarx r4,0,r6
1880 or r4,r4,r0
Michael Neulingc75df6f2012-06-25 13:33:10 +00001881 PPC_POPCNTW(R7,R4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001882 cmpw r7,r8
Paul Mackerras04f995a2012-08-06 00:03:28 +00001883 bge kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10001884 stwcx. r4,0,r6
1885 bne 31b
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001886 li r0,NAPPING_CEDE
Paul Mackerras19ccb762011-07-23 17:42:46 +10001887 stb r0,HSTATE_NAPPING(r13)
1888 /* order napping_threads update vs testing entry_exit_count */
1889 lwsync
1890 mr r4,r3
1891 lwz r7,VCORE_ENTRY_EXIT(r5)
1892 cmpwi r7,0x100
1893 bge 33f /* another thread already exiting */
1894
1895/*
1896 * Although not specifically required by the architecture, POWER7
1897 * preserves the following registers in nap mode, even if an SMT mode
1898 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1899 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1900 */
1901 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001902 std r14, VCPU_GPR(R14)(r3)
1903 std r15, VCPU_GPR(R15)(r3)
1904 std r16, VCPU_GPR(R16)(r3)
1905 std r17, VCPU_GPR(R17)(r3)
1906 std r18, VCPU_GPR(R18)(r3)
1907 std r19, VCPU_GPR(R19)(r3)
1908 std r20, VCPU_GPR(R20)(r3)
1909 std r21, VCPU_GPR(R21)(r3)
1910 std r22, VCPU_GPR(R22)(r3)
1911 std r23, VCPU_GPR(R23)(r3)
1912 std r24, VCPU_GPR(R24)(r3)
1913 std r25, VCPU_GPR(R25)(r3)
1914 std r26, VCPU_GPR(R26)(r3)
1915 std r27, VCPU_GPR(R27)(r3)
1916 std r28, VCPU_GPR(R28)(r3)
1917 std r29, VCPU_GPR(R29)(r3)
1918 std r30, VCPU_GPR(R30)(r3)
1919 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001920
1921 /* save FP state */
Paul Mackerras595e4f72013-10-15 20:43:04 +11001922 bl kvmppc_save_fp
Paul Mackerras19ccb762011-07-23 17:42:46 +10001923
1924 /*
1925 * Take a nap until a decrementer or external interrupt occurs,
1926 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1927 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001928 li r0,1
1929 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001930 mfspr r5,SPRN_LPCR
1931 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1932 mtspr SPRN_LPCR,r5
1933 isync
1934 li r0, 0
1935 std r0, HSTATE_SCRATCH0(r13)
1936 ptesync
1937 ld r0, HSTATE_SCRATCH0(r13)
19381: cmpd r0, r0
1939 bne 1b
1940 nap
1941 b .
1942
1943kvm_end_cede:
Paul Mackerras4619ac82013-04-17 20:31:41 +00001944 /* get vcpu pointer */
1945 ld r4, HSTATE_KVM_VCPU(r13)
1946
Paul Mackerras19ccb762011-07-23 17:42:46 +10001947 /* Woken by external or decrementer interrupt */
1948 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001949
Paul Mackerras19ccb762011-07-23 17:42:46 +10001950 /* load up FP state */
1951 bl kvmppc_load_fp
1952
1953 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001954 ld r14, VCPU_GPR(R14)(r4)
1955 ld r15, VCPU_GPR(R15)(r4)
1956 ld r16, VCPU_GPR(R16)(r4)
1957 ld r17, VCPU_GPR(R17)(r4)
1958 ld r18, VCPU_GPR(R18)(r4)
1959 ld r19, VCPU_GPR(R19)(r4)
1960 ld r20, VCPU_GPR(R20)(r4)
1961 ld r21, VCPU_GPR(R21)(r4)
1962 ld r22, VCPU_GPR(R22)(r4)
1963 ld r23, VCPU_GPR(R23)(r4)
1964 ld r24, VCPU_GPR(R24)(r4)
1965 ld r25, VCPU_GPR(R25)(r4)
1966 ld r26, VCPU_GPR(R26)(r4)
1967 ld r27, VCPU_GPR(R27)(r4)
1968 ld r28, VCPU_GPR(R28)(r4)
1969 ld r29, VCPU_GPR(R29)(r4)
1970 ld r30, VCPU_GPR(R30)(r4)
1971 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001972
1973 /* clear our bit in vcore->napping_threads */
197433: ld r5,HSTATE_KVM_VCORE(r13)
Paul Mackerrase0b7ec02014-01-08 21:25:20 +11001975 lbz r3,HSTATE_PTID(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001976 li r0,1
1977 sld r0,r0,r3
1978 addi r6,r5,VCORE_NAPPING_THREADS
197932: lwarx r7,0,r6
1980 andc r7,r7,r0
1981 stwcx. r7,0,r6
1982 bne 32b
1983 li r0,0
1984 stb r0,HSTATE_NAPPING(r13)
1985
Paul Mackerras4619ac82013-04-17 20:31:41 +00001986 /* Check the wake reason in SRR1 to see why we got here */
1987 mfspr r3, SPRN_SRR1
1988 rlwinm r3, r3, 44-31, 0x7 /* extract wake reason field */
1989 cmpwi r3, 4 /* was it an external interrupt? */
1990 li r12, BOOK3S_INTERRUPT_EXTERNAL
1991 mr r9, r4
1992 ld r10, VCPU_PC(r9)
1993 ld r11, VCPU_MSR(r9)
1994 beq do_ext_interrupt /* if so */
1995
Paul Mackerras19ccb762011-07-23 17:42:46 +10001996 /* see if any other thread is already exiting */
1997 lwz r0,VCORE_ENTRY_EXIT(r5)
1998 cmpwi r0,0x100
1999 blt kvmppc_cede_reentry /* if not go back to guest */
2000
2001 /* some threads are exiting, so go to the guest exit path */
2002 b hcall_real_fallback
2003
2004 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002005kvm_cede_prodded:
2006 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10002007 stb r0,VCPU_PRODDED(r3)
2008 sync /* order testing prodded vs. clearing ceded */
2009 stb r0,VCPU_CEDED(r3)
2010 li r3,H_SUCCESS
2011 blr
2012
2013 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00002014kvm_cede_exit:
Paul Mackerras4619ac82013-04-17 20:31:41 +00002015 b hcall_real_fallback
Paul Mackerras19ccb762011-07-23 17:42:46 +10002016
Paul Mackerrasb4072df2012-11-23 22:37:50 +00002017 /* Try to handle a machine check in real mode */
2018machine_check_realmode:
2019 mr r3, r9 /* get vcpu pointer */
2020 bl .kvmppc_realmode_machine_check
2021 nop
2022 cmpdi r3, 0 /* continue exiting from guest? */
2023 ld r9, HSTATE_KVM_VCPU(r13)
2024 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
2025 beq mc_cont
2026 /* If not, deliver a machine check. SRR0/1 are already set */
2027 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
2028 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
2029 rotldi r11, r11, 63
2030 b fast_interrupt_c_return
2031
Paul Mackerrasde56a942011-06-29 00:21:34 +00002032/*
Paul Mackerrasc9342432013-09-06 13:24:13 +10002033 * Determine what sort of external interrupt is pending (if any).
2034 * Returns:
2035 * 0 if no interrupt is pending
2036 * 1 if an interrupt is pending that needs to be handled by the host
2037 * -1 if there was a guest wakeup IPI (which has now been cleared)
2038 */
2039kvmppc_read_intr:
2040 /* see if a host IPI is pending */
2041 li r3, 1
2042 lbz r0, HSTATE_HOST_IPI(r13)
2043 cmpwi r0, 0
2044 bne 1f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002045
Paul Mackerrasc9342432013-09-06 13:24:13 +10002046 /* Now read the interrupt from the ICP */
2047 ld r6, HSTATE_XICS_PHYS(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00002048 li r7, XICS_XIRR
Paul Mackerrasc9342432013-09-06 13:24:13 +10002049 cmpdi r6, 0
2050 beq- 1f
2051 lwzcix r0, r6, r7
2052 rlwinm. r3, r0, 0, 0xffffff
Paul Mackerrasde56a942011-06-29 00:21:34 +00002053 sync
Paul Mackerrasc9342432013-09-06 13:24:13 +10002054 beq 1f /* if nothing pending in the ICP */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002055
Paul Mackerrasc9342432013-09-06 13:24:13 +10002056 /* We found something in the ICP...
2057 *
2058 * If it's not an IPI, stash it in the PACA and return to
2059 * the host, we don't (yet) handle directing real external
2060 * interrupts directly to the guest
2061 */
2062 cmpwi r3, XICS_IPI /* if there is, is it an IPI? */
2063 li r3, 1
2064 bne 42f
Paul Mackerrasde56a942011-06-29 00:21:34 +00002065
Paul Mackerrasc9342432013-09-06 13:24:13 +10002066 /* It's an IPI, clear the MFRR and EOI it */
2067 li r3, 0xff
2068 li r8, XICS_MFRR
2069 stbcix r3, r6, r8 /* clear the IPI */
2070 stwcix r0, r6, r7 /* EOI it */
2071 sync
Paul Mackerrasde56a942011-06-29 00:21:34 +00002072
Paul Mackerrasc9342432013-09-06 13:24:13 +10002073 /* We need to re-check host IPI now in case it got set in the
2074 * meantime. If it's clear, we bounce the interrupt to the
2075 * guest
2076 */
2077 lbz r0, HSTATE_HOST_IPI(r13)
2078 cmpwi r0, 0
2079 bne- 43f
2080
2081 /* OK, it's an IPI for us */
2082 li r3, -1
20831: blr
2084
208542: /* It's not an IPI and it's for the host, stash it in the PACA
2086 * before exit, it will be picked up by the host ICP driver
2087 */
2088 stw r0, HSTATE_SAVED_XIRR(r13)
2089 b 1b
2090
209143: /* We raced with the host, we need to resend that IPI, bummer */
2092 li r0, IPI_PRIORITY
2093 stbcix r0, r6, r8 /* set the IPI */
2094 sync
2095 b 1b
Paul Mackerrasde56a942011-06-29 00:21:34 +00002096
2097/*
2098 * Save away FP, VMX and VSX registers.
2099 * r3 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002100 * N.B. r30 and r31 are volatile across this function,
2101 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002102 */
Paul Mackerras595e4f72013-10-15 20:43:04 +11002103kvmppc_save_fp:
2104 mflr r30
2105 mr r31,r3
Paul Mackerrasde56a942011-06-29 00:21:34 +00002106 mfmsr r5
2107 ori r8,r5,MSR_FP
2108#ifdef CONFIG_ALTIVEC
2109BEGIN_FTR_SECTION
2110 oris r8,r8,MSR_VEC@h
2111END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2112#endif
2113#ifdef CONFIG_VSX
2114BEGIN_FTR_SECTION
2115 oris r8,r8,MSR_VSX@h
2116END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2117#endif
2118 mtmsrd r8
2119 isync
Paul Mackerras595e4f72013-10-15 20:43:04 +11002120 addi r3,r3,VCPU_FPRS
2121 bl .store_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002122#ifdef CONFIG_ALTIVEC
2123BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002124 addi r3,r31,VCPU_VRS
2125 bl .store_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002126END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2127#endif
2128 mfspr r6,SPRN_VRSAVE
2129 stw r6,VCPU_VRSAVE(r3)
Paul Mackerras595e4f72013-10-15 20:43:04 +11002130 mtlr r30
Paul Mackerras89436332012-03-02 01:38:23 +00002131 mtmsrd r5
Paul Mackerrasde56a942011-06-29 00:21:34 +00002132 isync
2133 blr
2134
2135/*
2136 * Load up FP, VMX and VSX registers
2137 * r4 = vcpu pointer
Paul Mackerras595e4f72013-10-15 20:43:04 +11002138 * N.B. r30 and r31 are volatile across this function,
2139 * thus it is not callable from C.
Paul Mackerrasde56a942011-06-29 00:21:34 +00002140 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00002141kvmppc_load_fp:
Paul Mackerras595e4f72013-10-15 20:43:04 +11002142 mflr r30
2143 mr r31,r4
Paul Mackerrasde56a942011-06-29 00:21:34 +00002144 mfmsr r9
2145 ori r8,r9,MSR_FP
2146#ifdef CONFIG_ALTIVEC
2147BEGIN_FTR_SECTION
2148 oris r8,r8,MSR_VEC@h
2149END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2150#endif
2151#ifdef CONFIG_VSX
2152BEGIN_FTR_SECTION
2153 oris r8,r8,MSR_VSX@h
2154END_FTR_SECTION_IFSET(CPU_FTR_VSX)
2155#endif
2156 mtmsrd r8
2157 isync
Paul Mackerras595e4f72013-10-15 20:43:04 +11002158 addi r3,r4,VCPU_FPRS
2159 bl .load_fp_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002160#ifdef CONFIG_ALTIVEC
2161BEGIN_FTR_SECTION
Paul Mackerras595e4f72013-10-15 20:43:04 +11002162 addi r3,r31,VCPU_VRS
2163 bl .load_vr_state
Paul Mackerrasde56a942011-06-29 00:21:34 +00002164END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
2165#endif
2166 lwz r7,VCPU_VRSAVE(r4)
2167 mtspr SPRN_VRSAVE,r7
Paul Mackerras595e4f72013-10-15 20:43:04 +11002168 mtlr r30
2169 mr r4,r31
Paul Mackerrasde56a942011-06-29 00:21:34 +00002170 blr
Paul Mackerras44a3add2013-10-04 21:45:04 +10002171
2172/*
2173 * We come here if we get any exception or interrupt while we are
2174 * executing host real mode code while in guest MMU context.
2175 * For now just spin, but we should do something better.
2176 */
2177kvmppc_bad_host_intr:
2178 b .