blob: 56f8927b0ddf5a3d03c3016bbcd16a845fe1cf3b [file] [log] [blame]
Paul Mackerrasde56a942011-06-29 00:21:34 +00001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
12 *
13 * Derived from book3s_rmhandlers.S and other files, which are:
14 *
15 * Copyright SUSE Linux Products GmbH 2009
16 *
17 * Authors: Alexander Graf <agraf@suse.de>
18 */
19
20#include <asm/ppc_asm.h>
21#include <asm/kvm_asm.h>
22#include <asm/reg.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100023#include <asm/mmu.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000024#include <asm/page.h>
Paul Mackerras177339d2011-07-23 17:41:11 +100025#include <asm/ptrace.h>
26#include <asm/hvcall.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000027#include <asm/asm-offsets.h>
28#include <asm/exception-64s.h>
Paul Mackerrasf0888f72012-02-03 00:54:17 +000029#include <asm/kvm_book3s_asm.h>
Paul Mackerrasb4072df2012-11-23 22:37:50 +000030#include <asm/mmu-hash64.h>
Paul Mackerrasde56a942011-06-29 00:21:34 +000031
32/*****************************************************************************
33 * *
34 * Real Mode handlers that need to be in the linear mapping *
35 * *
36 ****************************************************************************/
37
Paul Mackerrasde56a942011-06-29 00:21:34 +000038 .globl kvmppc_skip_interrupt
39kvmppc_skip_interrupt:
40 mfspr r13,SPRN_SRR0
41 addi r13,r13,4
42 mtspr SPRN_SRR0,r13
43 GET_SCRATCH0(r13)
44 rfid
45 b .
46
47 .globl kvmppc_skip_Hinterrupt
48kvmppc_skip_Hinterrupt:
49 mfspr r13,SPRN_HSRR0
50 addi r13,r13,4
51 mtspr SPRN_HSRR0,r13
52 GET_SCRATCH0(r13)
53 hrfid
54 b .
55
56/*
Paul Mackerras19ccb762011-07-23 17:42:46 +100057 * Call kvmppc_hv_entry in real mode.
Paul Mackerrasde56a942011-06-29 00:21:34 +000058 * Must be called with interrupts hard-disabled.
59 *
60 * Input Registers:
61 *
62 * LR = return address to continue at after eventually re-enabling MMU
63 */
64_GLOBAL(kvmppc_hv_entry_trampoline)
65 mfmsr r10
66 LOAD_REG_ADDR(r5, kvmppc_hv_entry)
67 li r0,MSR_RI
68 andc r0,r10,r0
69 li r6,MSR_IR | MSR_DR
70 andc r6,r10,r6
71 mtmsrd r0,1 /* clear RI in MSR */
72 mtsrr0 r5
73 mtsrr1 r6
74 RFI
75
Paul Mackerrasde56a942011-06-29 00:21:34 +000076/******************************************************************************
77 * *
78 * Entry code *
79 * *
80 *****************************************************************************/
81
Paul Mackerras371fefd2011-06-29 00:23:08 +000082/*
83 * We come in here when wakened from nap mode on a secondary hw thread.
84 * Relocation is off and most register values are lost.
85 * r13 points to the PACA.
86 */
87 .globl kvm_start_guest
88kvm_start_guest:
89 ld r1,PACAEMERGSP(r13)
90 subi r1,r1,STACK_FRAME_OVERHEAD
Paul Mackerras19ccb762011-07-23 17:42:46 +100091 ld r2,PACATOC(r13)
92
Paul Mackerrasf0888f72012-02-03 00:54:17 +000093 li r0,KVM_HWTHREAD_IN_KVM
94 stb r0,HSTATE_HWTHREAD_STATE(r13)
95
96 /* NV GPR values from power7_idle() will no longer be valid */
97 li r0,1
98 stb r0,PACA_NAPSTATELOST(r13)
99
100 /* get vcpu pointer, NULL if we have no vcpu to run */
101 ld r4,HSTATE_KVM_VCPU(r13)
102 cmpdi cr1,r4,0
103
104 /* Check the wake reason in SRR1 to see why we got here */
105 mfspr r3,SPRN_SRR1
106 rlwinm r3,r3,44-31,0x7 /* extract wake reason field */
107 cmpwi r3,4 /* was it an external interrupt? */
108 bne 27f
109
110 /*
111 * External interrupt - for now assume it is an IPI, since we
112 * should never get any other interrupts sent to offline threads.
113 * Only do this for secondary threads.
114 */
115 beq cr1,25f
116 lwz r3,VCPU_PTID(r4)
117 cmpwi r3,0
118 beq 27f
11925: ld r5,HSTATE_XICS_PHYS(r13)
120 li r0,0xff
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000121 li r6,XICS_MFRR
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000122 li r7,XICS_XIRR
123 lwzcix r8,r5,r7 /* get and ack the interrupt */
124 sync
125 clrldi. r9,r8,40 /* get interrupt source ID. */
126 beq 27f /* none there? */
127 cmpwi r9,XICS_IPI
128 bne 26f
129 stbcix r0,r5,r6 /* clear IPI */
13026: stwcix r8,r5,r7 /* EOI the interrupt */
131
13227: /* XXX should handle hypervisor maintenance interrupts etc. here */
133
Paul Mackerras7b444c62012-10-15 01:16:14 +0000134 /* reload vcpu pointer after clearing the IPI */
135 ld r4,HSTATE_KVM_VCPU(r13)
136 cmpdi r4,0
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000137 /* if we have no vcpu to run, go back to sleep */
Paul Mackerras7b444c62012-10-15 01:16:14 +0000138 beq kvm_no_guest
Paul Mackerrasf0888f72012-02-03 00:54:17 +0000139
Paul Mackerras19ccb762011-07-23 17:42:46 +1000140 /* were we napping due to cede? */
141 lbz r0,HSTATE_NAPPING(r13)
142 cmpwi r0,0
143 bne kvm_end_cede
Paul Mackerras371fefd2011-06-29 00:23:08 +0000144
Paul Mackerrasde56a942011-06-29 00:21:34 +0000145.global kvmppc_hv_entry
146kvmppc_hv_entry:
147
148 /* Required state:
149 *
150 * R4 = vcpu pointer
151 * MSR = ~IR|DR
152 * R13 = PACA
153 * R1 = host R1
154 * all other volatile GPRS = free
155 */
156 mflr r0
157 std r0, HSTATE_VMHANDLER(r13)
158
Paul Mackerras89436332012-03-02 01:38:23 +0000159 /* Set partition DABR */
160 /* Do this before re-enabling PMU to avoid P7 DABR corruption bug */
161 li r5,3
162 ld r6,VCPU_DABR(r4)
163 mtspr SPRN_DABRX,r5
164 mtspr SPRN_DABR,r6
165BEGIN_FTR_SECTION
166 isync
167END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000168
169 /* Load guest PMU registers */
170 /* R4 is live here (vcpu pointer) */
171 li r3, 1
172 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
173 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
174 isync
175 lwz r3, VCPU_PMC(r4) /* always load up guest PMU registers */
176 lwz r5, VCPU_PMC + 4(r4) /* to prevent information leak */
177 lwz r6, VCPU_PMC + 8(r4)
178 lwz r7, VCPU_PMC + 12(r4)
179 lwz r8, VCPU_PMC + 16(r4)
180 lwz r9, VCPU_PMC + 20(r4)
Paul Mackerras9e368f22011-06-29 00:40:08 +0000181BEGIN_FTR_SECTION
182 lwz r10, VCPU_PMC + 24(r4)
183 lwz r11, VCPU_PMC + 28(r4)
184END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000185 mtspr SPRN_PMC1, r3
186 mtspr SPRN_PMC2, r5
187 mtspr SPRN_PMC3, r6
188 mtspr SPRN_PMC4, r7
189 mtspr SPRN_PMC5, r8
190 mtspr SPRN_PMC6, r9
Paul Mackerras9e368f22011-06-29 00:40:08 +0000191BEGIN_FTR_SECTION
192 mtspr SPRN_PMC7, r10
193 mtspr SPRN_PMC8, r11
194END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000195 ld r3, VCPU_MMCR(r4)
196 ld r5, VCPU_MMCR + 8(r4)
197 ld r6, VCPU_MMCR + 16(r4)
198 mtspr SPRN_MMCR1, r5
199 mtspr SPRN_MMCRA, r6
200 mtspr SPRN_MMCR0, r3
201 isync
202
203 /* Load up FP, VMX and VSX registers */
204 bl kvmppc_load_fp
205
Michael Neulingc75df6f2012-06-25 13:33:10 +0000206 ld r14, VCPU_GPR(R14)(r4)
207 ld r15, VCPU_GPR(R15)(r4)
208 ld r16, VCPU_GPR(R16)(r4)
209 ld r17, VCPU_GPR(R17)(r4)
210 ld r18, VCPU_GPR(R18)(r4)
211 ld r19, VCPU_GPR(R19)(r4)
212 ld r20, VCPU_GPR(R20)(r4)
213 ld r21, VCPU_GPR(R21)(r4)
214 ld r22, VCPU_GPR(R22)(r4)
215 ld r23, VCPU_GPR(R23)(r4)
216 ld r24, VCPU_GPR(R24)(r4)
217 ld r25, VCPU_GPR(R25)(r4)
218 ld r26, VCPU_GPR(R26)(r4)
219 ld r27, VCPU_GPR(R27)(r4)
220 ld r28, VCPU_GPR(R28)(r4)
221 ld r29, VCPU_GPR(R29)(r4)
222 ld r30, VCPU_GPR(R30)(r4)
223 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerras89436332012-03-02 01:38:23 +0000224
Paul Mackerras9e368f22011-06-29 00:40:08 +0000225BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000226 /* Switch DSCR to guest value */
227 ld r5, VCPU_DSCR(r4)
228 mtspr SPRN_DSCR, r5
Paul Mackerras9e368f22011-06-29 00:40:08 +0000229END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000230
231 /*
232 * Set the decrementer to the guest decrementer.
233 */
234 ld r8,VCPU_DEC_EXPIRES(r4)
235 mftb r7
236 subf r3,r7,r8
237 mtspr SPRN_DEC,r3
238 stw r3,VCPU_DEC(r4)
239
240 ld r5, VCPU_SPRG0(r4)
241 ld r6, VCPU_SPRG1(r4)
242 ld r7, VCPU_SPRG2(r4)
243 ld r8, VCPU_SPRG3(r4)
244 mtspr SPRN_SPRG0, r5
245 mtspr SPRN_SPRG1, r6
246 mtspr SPRN_SPRG2, r7
247 mtspr SPRN_SPRG3, r8
248
249 /* Save R1 in the PACA */
250 std r1, HSTATE_HOST_R1(r13)
251
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000252 /* Increment yield count if they have a VPA */
253 ld r3, VCPU_VPA(r4)
254 cmpdi r3, 0
255 beq 25f
256 lwz r5, LPPACA_YIELDCOUNT(r3)
257 addi r5, r5, 1
258 stw r5, LPPACA_YIELDCOUNT(r3)
Paul Mackerrasc35635e2013-04-18 19:51:04 +0000259 li r6, 1
260 stb r6, VCPU_VPA_DIRTY(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +000026125:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000262 /* Load up DAR and DSISR */
263 ld r5, VCPU_DAR(r4)
264 lwz r6, VCPU_DSISR(r4)
265 mtspr SPRN_DAR, r5
266 mtspr SPRN_DSISR, r6
267
Paul Mackerras9e368f22011-06-29 00:40:08 +0000268BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000269 /* Restore AMR and UAMOR, set AMOR to all 1s */
270 ld r5,VCPU_AMR(r4)
271 ld r6,VCPU_UAMOR(r4)
272 li r7,-1
273 mtspr SPRN_AMR,r5
274 mtspr SPRN_UAMOR,r6
275 mtspr SPRN_AMOR,r7
Paul Mackerras9e368f22011-06-29 00:40:08 +0000276END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000277
278 /* Clear out SLB */
279 li r6,0
280 slbmte r6,r6
281 slbia
282 ptesync
283
Paul Mackerras9e368f22011-06-29 00:40:08 +0000284BEGIN_FTR_SECTION
285 b 30f
286END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
287 /*
288 * POWER7 host -> guest partition switch code.
289 * We don't have to lock against concurrent tlbies,
290 * but we do have to coordinate across hardware threads.
291 */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000292 /* Increment entry count iff exit count is zero. */
293 ld r5,HSTATE_KVM_VCORE(r13)
294 addi r9,r5,VCORE_ENTRY_EXIT
29521: lwarx r3,0,r9
296 cmpwi r3,0x100 /* any threads starting to exit? */
297 bge secondary_too_late /* if so we're too late to the party */
298 addi r3,r3,1
299 stwcx. r3,0,r9
300 bne 21b
301
302 /* Primary thread switches to guest partition. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000303 ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000304 lwz r6,VCPU_PTID(r4)
305 cmpwi r6,0
306 bne 20f
Paul Mackerrasde56a942011-06-29 00:21:34 +0000307 ld r6,KVM_SDR1(r9)
308 lwz r7,KVM_LPID(r9)
309 li r0,LPID_RSVD /* switch to reserved LPID */
310 mtspr SPRN_LPID,r0
311 ptesync
312 mtspr SPRN_SDR1,r6 /* switch to partition page table */
313 mtspr SPRN_LPID,r7
314 isync
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000315
316 /* See if we need to flush the TLB */
317 lhz r6,PACAPACAINDEX(r13) /* test_bit(cpu, need_tlb_flush) */
318 clrldi r7,r6,64-6 /* extract bit number (6 bits) */
319 srdi r6,r6,6 /* doubleword number */
320 sldi r6,r6,3 /* address offset */
321 add r6,r6,r9
322 addi r6,r6,KVM_NEED_FLUSH /* dword in kvm->arch.need_tlb_flush */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000323 li r0,1
Paul Mackerras1b400ba2012-11-21 23:28:08 +0000324 sld r0,r0,r7
325 ld r7,0(r6)
326 and. r7,r7,r0
327 beq 22f
32823: ldarx r7,0,r6 /* if set, clear the bit */
329 andc r7,r7,r0
330 stdcx. r7,0,r6
331 bne 23b
332 li r6,128 /* and flush the TLB */
333 mtctr r6
334 li r7,0x800 /* IS field = 0b10 */
335 ptesync
33628: tlbiel r7
337 addi r7,r7,0x1000
338 bdnz 28b
339 ptesync
340
34122: li r0,1
Paul Mackerras371fefd2011-06-29 00:23:08 +0000342 stb r0,VCORE_IN_GUEST(r5) /* signal secondaries to continue */
343 b 10f
344
345 /* Secondary threads wait for primary to have done partition switch */
34620: lbz r0,VCORE_IN_GUEST(r5)
347 cmpwi r0,0
348 beq 20b
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000349
Paul Mackerras19ccb762011-07-23 17:42:46 +1000350 /* Set LPCR and RMOR. */
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +000035110: ld r8,KVM_LPCR(r9)
Paul Mackerras19ccb762011-07-23 17:42:46 +1000352 mtspr SPRN_LPCR,r8
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000353 ld r8,KVM_RMOR(r9)
354 mtspr SPRN_RMOR,r8
Paul Mackerrasde56a942011-06-29 00:21:34 +0000355 isync
356
357 /* Check if HDEC expires soon */
358 mfspr r3,SPRN_HDEC
359 cmpwi r3,10
360 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
361 mr r9,r4
362 blt hdec_soon
363
Paul Mackerrasde56a942011-06-29 00:21:34 +0000364 /* Save purr/spurr */
365 mfspr r5,SPRN_PURR
366 mfspr r6,SPRN_SPURR
367 std r5,HSTATE_PURR(r13)
368 std r6,HSTATE_SPURR(r13)
369 ld r7,VCPU_PURR(r4)
370 ld r8,VCPU_SPURR(r4)
371 mtspr SPRN_PURR,r7
372 mtspr SPRN_SPURR,r8
Paul Mackerras9e368f22011-06-29 00:40:08 +0000373 b 31f
374
375 /*
376 * PPC970 host -> guest partition switch code.
377 * We have to lock against concurrent tlbies,
378 * using native_tlbie_lock to lock against host tlbies
379 * and kvm->arch.tlbie_lock to lock against guest tlbies.
380 * We also have to invalidate the TLB since its
381 * entries aren't tagged with the LPID.
382 */
38330: ld r9,VCPU_KVM(r4) /* pointer to struct kvm */
384
385 /* first take native_tlbie_lock */
386 .section ".toc","aw"
387toc_tlbie_lock:
388 .tc native_tlbie_lock[TC],native_tlbie_lock
389 .previous
390 ld r3,toc_tlbie_lock@toc(2)
391 lwz r8,PACA_LOCK_TOKEN(r13)
39224: lwarx r0,0,r3
393 cmpwi r0,0
394 bne 24b
395 stwcx. r8,0,r3
396 bne 24b
397 isync
398
399 ld r7,KVM_LPCR(r9) /* use kvm->arch.lpcr to store HID4 */
400 li r0,0x18f
401 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
402 or r0,r7,r0
403 ptesync
404 sync
405 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
406 isync
407 li r0,0
408 stw r0,0(r3) /* drop native_tlbie_lock */
409
410 /* invalidate the whole TLB */
411 li r0,256
412 mtctr r0
413 li r6,0
41425: tlbiel r6
415 addi r6,r6,0x1000
416 bdnz 25b
417 ptesync
418
419 /* Take the guest's tlbie_lock */
420 addi r3,r9,KVM_TLBIE_LOCK
42124: lwarx r0,0,r3
422 cmpwi r0,0
423 bne 24b
424 stwcx. r8,0,r3
425 bne 24b
426 isync
427 ld r6,KVM_SDR1(r9)
428 mtspr SPRN_SDR1,r6 /* switch to partition page table */
429
430 /* Set up HID4 with the guest's LPID etc. */
431 sync
432 mtspr SPRN_HID4,r7
433 isync
434
435 /* drop the guest's tlbie_lock */
436 li r0,0
437 stw r0,0(r3)
438
439 /* Check if HDEC expires soon */
440 mfspr r3,SPRN_HDEC
441 cmpwi r3,10
442 li r12,BOOK3S_INTERRUPT_HV_DECREMENTER
443 mr r9,r4
444 blt hdec_soon
445
446 /* Enable HDEC interrupts */
447 mfspr r0,SPRN_HID0
448 li r3,1
449 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
450 sync
451 mtspr SPRN_HID0,r0
452 mfspr r0,SPRN_HID0
453 mfspr r0,SPRN_HID0
454 mfspr r0,SPRN_HID0
455 mfspr r0,SPRN_HID0
456 mfspr r0,SPRN_HID0
457 mfspr r0,SPRN_HID0
Paul Mackerrasde56a942011-06-29 00:21:34 +0000458
459 /* Load up guest SLB entries */
Paul Mackerras9e368f22011-06-29 00:40:08 +000046031: lwz r5,VCPU_SLB_MAX(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000461 cmpwi r5,0
462 beq 9f
463 mtctr r5
464 addi r6,r4,VCPU_SLB
4651: ld r8,VCPU_SLB_E(r6)
466 ld r9,VCPU_SLB_V(r6)
467 slbmte r9,r8
468 addi r6,r6,VCPU_SLB_SIZE
469 bdnz 1b
4709:
471
472 /* Restore state of CTRL run bit; assume 1 on entry */
473 lwz r5,VCPU_CTRL(r4)
474 andi. r5,r5,1
475 bne 4f
476 mfspr r6,SPRN_CTRLF
477 clrrdi r6,r6,1
478 mtspr SPRN_CTRLT,r6
4794:
480 ld r6, VCPU_CTR(r4)
481 lwz r7, VCPU_XER(r4)
482
483 mtctr r6
484 mtxer r7
485
Paul Mackerras19ccb762011-07-23 17:42:46 +1000486kvmppc_cede_reentry: /* r4 = vcpu, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000487 ld r6, VCPU_SRR0(r4)
488 ld r7, VCPU_SRR1(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000489 ld r10, VCPU_PC(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +1000490 ld r11, VCPU_MSR(r4) /* r11 = vcpu->arch.msr & ~MSR_HV */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000491
Paul Mackerrasde56a942011-06-29 00:21:34 +0000492 rldicl r11, r11, 63 - MSR_HV_LG, 1
493 rotldi r11, r11, 1 + MSR_HV_LG
494 ori r11, r11, MSR_ME
495
Paul Mackerras19ccb762011-07-23 17:42:46 +1000496 /* Check if we can deliver an external or decrementer interrupt now */
497 ld r0,VCPU_PENDING_EXC(r4)
498 li r8,(1 << BOOK3S_IRQPRIO_EXTERNAL)
499 oris r8,r8,(1 << BOOK3S_IRQPRIO_EXTERNAL_LEVEL)@h
500 and r0,r0,r8
501 cmpdi cr1,r0,0
502 andi. r0,r11,MSR_EE
503 beq cr1,11f
504BEGIN_FTR_SECTION
505 mfspr r8,SPRN_LPCR
506 ori r8,r8,LPCR_MER
507 mtspr SPRN_LPCR,r8
508 isync
509END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
510 beq 5f
511 li r0,BOOK3S_INTERRUPT_EXTERNAL
51212: mr r6,r10
513 mr r10,r0
514 mr r7,r11
515 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
516 rotldi r11,r11,63
517 b 5f
51811: beq 5f
519 mfspr r0,SPRN_DEC
520 cmpwi r0,0
521 li r0,BOOK3S_INTERRUPT_DECREMENTER
522 blt 12b
523
524 /* Move SRR0 and SRR1 into the respective regs */
5255: mtspr SPRN_SRR0, r6
526 mtspr SPRN_SRR1, r7
527 li r0,0
528 stb r0,VCPU_CEDED(r4) /* cancel cede */
529
Paul Mackerrasde56a942011-06-29 00:21:34 +0000530fast_guest_return:
531 mtspr SPRN_HSRR0,r10
532 mtspr SPRN_HSRR1,r11
533
534 /* Activate guest mode, so faults get handled by KVM */
535 li r9, KVM_GUEST_MODE_GUEST
536 stb r9, HSTATE_IN_GUEST(r13)
537
538 /* Enter guest */
539
Paul Mackerras0acb9112013-02-04 18:10:51 +0000540BEGIN_FTR_SECTION
541 ld r5, VCPU_CFAR(r4)
542 mtspr SPRN_CFAR, r5
543END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
544
Paul Mackerrasde56a942011-06-29 00:21:34 +0000545 ld r5, VCPU_LR(r4)
546 lwz r6, VCPU_CR(r4)
547 mtlr r5
548 mtcr r6
549
Michael Neulingc75df6f2012-06-25 13:33:10 +0000550 ld r0, VCPU_GPR(R0)(r4)
551 ld r1, VCPU_GPR(R1)(r4)
552 ld r2, VCPU_GPR(R2)(r4)
553 ld r3, VCPU_GPR(R3)(r4)
554 ld r5, VCPU_GPR(R5)(r4)
555 ld r6, VCPU_GPR(R6)(r4)
556 ld r7, VCPU_GPR(R7)(r4)
557 ld r8, VCPU_GPR(R8)(r4)
558 ld r9, VCPU_GPR(R9)(r4)
559 ld r10, VCPU_GPR(R10)(r4)
560 ld r11, VCPU_GPR(R11)(r4)
561 ld r12, VCPU_GPR(R12)(r4)
562 ld r13, VCPU_GPR(R13)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000563
Michael Neulingc75df6f2012-06-25 13:33:10 +0000564 ld r4, VCPU_GPR(R4)(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000565
566 hrfid
567 b .
568
569/******************************************************************************
570 * *
571 * Exit code *
572 * *
573 *****************************************************************************/
574
575/*
576 * We come here from the first-level interrupt handlers.
577 */
578 .globl kvmppc_interrupt
579kvmppc_interrupt:
580 /*
581 * Register contents:
582 * R12 = interrupt vector
583 * R13 = PACA
584 * guest CR, R12 saved in shadow VCPU SCRATCH1/0
585 * guest R13 saved in SPRN_SCRATCH0
586 */
587 /* abuse host_r2 as third scratch area; we get r2 from PACATOC(r13) */
588 std r9, HSTATE_HOST_R2(r13)
589 ld r9, HSTATE_KVM_VCPU(r13)
590
591 /* Save registers */
592
Michael Neulingc75df6f2012-06-25 13:33:10 +0000593 std r0, VCPU_GPR(R0)(r9)
594 std r1, VCPU_GPR(R1)(r9)
595 std r2, VCPU_GPR(R2)(r9)
596 std r3, VCPU_GPR(R3)(r9)
597 std r4, VCPU_GPR(R4)(r9)
598 std r5, VCPU_GPR(R5)(r9)
599 std r6, VCPU_GPR(R6)(r9)
600 std r7, VCPU_GPR(R7)(r9)
601 std r8, VCPU_GPR(R8)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000602 ld r0, HSTATE_HOST_R2(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000603 std r0, VCPU_GPR(R9)(r9)
604 std r10, VCPU_GPR(R10)(r9)
605 std r11, VCPU_GPR(R11)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000606 ld r3, HSTATE_SCRATCH0(r13)
607 lwz r4, HSTATE_SCRATCH1(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +0000608 std r3, VCPU_GPR(R12)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000609 stw r4, VCPU_CR(r9)
Paul Mackerras0acb9112013-02-04 18:10:51 +0000610BEGIN_FTR_SECTION
611 ld r3, HSTATE_CFAR(r13)
612 std r3, VCPU_CFAR(r9)
613END_FTR_SECTION_IFSET(CPU_FTR_CFAR)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000614
615 /* Restore R1/R2 so we can handle faults */
616 ld r1, HSTATE_HOST_R1(r13)
617 ld r2, PACATOC(r13)
618
619 mfspr r10, SPRN_SRR0
620 mfspr r11, SPRN_SRR1
621 std r10, VCPU_SRR0(r9)
622 std r11, VCPU_SRR1(r9)
623 andi. r0, r12, 2 /* need to read HSRR0/1? */
624 beq 1f
625 mfspr r10, SPRN_HSRR0
626 mfspr r11, SPRN_HSRR1
627 clrrdi r12, r12, 2
6281: std r10, VCPU_PC(r9)
629 std r11, VCPU_MSR(r9)
630
631 GET_SCRATCH0(r3)
632 mflr r4
Michael Neulingc75df6f2012-06-25 13:33:10 +0000633 std r3, VCPU_GPR(R13)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000634 std r4, VCPU_LR(r9)
635
636 /* Unset guest mode */
637 li r0, KVM_GUEST_MODE_NONE
638 stb r0, HSTATE_IN_GUEST(r13)
639
640 stw r12,VCPU_TRAP(r9)
641
Paul Mackerras697d3892011-12-12 12:36:37 +0000642 /* Save HEIR (HV emulation assist reg) in last_inst
643 if this is an HEI (HV emulation interrupt, e40) */
644 li r3,KVM_INST_FETCH_FAILED
645BEGIN_FTR_SECTION
646 cmpwi r12,BOOK3S_INTERRUPT_H_EMUL_ASSIST
647 bne 11f
648 mfspr r3,SPRN_HEIR
649END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
65011: stw r3,VCPU_LAST_INST(r9)
651
652 /* these are volatile across C function calls */
653 mfctr r3
654 mfxer r4
655 std r3, VCPU_CTR(r9)
656 stw r4, VCPU_XER(r9)
657
658BEGIN_FTR_SECTION
659 /* If this is a page table miss then see if it's theirs or ours */
660 cmpwi r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
661 beq kvmppc_hdsi
Paul Mackerras342d3db2011-12-12 12:38:05 +0000662 cmpwi r12, BOOK3S_INTERRUPT_H_INST_STORAGE
663 beq kvmppc_hisi
Paul Mackerras697d3892011-12-12 12:36:37 +0000664END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
665
Paul Mackerrasde56a942011-06-29 00:21:34 +0000666 /* See if this is a leftover HDEC interrupt */
667 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
668 bne 2f
669 mfspr r3,SPRN_HDEC
670 cmpwi r3,0
671 bge ignore_hdec
6722:
Paul Mackerras697d3892011-12-12 12:36:37 +0000673 /* See if this is an hcall we can handle in real mode */
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000674 cmpwi r12,BOOK3S_INTERRUPT_SYSCALL
675 beq hcall_try_real_mode
Paul Mackerrasde56a942011-06-29 00:21:34 +0000676
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000677 /* Only handle external interrupts here on arch 206 and later */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000678BEGIN_FTR_SECTION
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000679 b ext_interrupt_to_host
680END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
681
682 /* External interrupt ? */
683 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
684 bne+ ext_interrupt_to_host
685
686 /* External interrupt, first check for host_ipi. If this is
687 * set, we know the host wants us out so let's do it now
688 */
689 lbz r0, HSTATE_HOST_IPI(r13)
690 cmpwi r0, 0
691 bne ext_interrupt_to_host
692
693 /* Now read the interrupt from the ICP */
694 ld r5, HSTATE_XICS_PHYS(r13)
695 li r7, XICS_XIRR
696 cmpdi r5, 0
697 beq- ext_interrupt_to_host
698 lwzcix r3, r5, r7
699 rlwinm. r0, r3, 0, 0xffffff
700 sync
701 bne 1f
702
703 /* Nothing pending in the ICP, check for mediated interrupts
704 * and bounce it to the guest
705 */
706 andi. r0, r11, MSR_EE
707 beq ext_interrupt_to_host /* shouldn't happen ?? */
708 mfspr r5, SPRN_LPCR
709 andi. r0, r5, LPCR_MER
Paul Mackerrasde56a942011-06-29 00:21:34 +0000710 bne bounce_ext_interrupt
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000711 b ext_interrupt_to_host /* shouldn't happen ?? */
712
7131: /* We found something in the ICP...
714 *
715 * If it's not an IPI, stash it in the PACA and return to
716 * the host, we don't (yet) handle directing real external
717 * interrupts directly to the guest
718 */
719 cmpwi r0, XICS_IPI
720 bne ext_stash_for_host
721
722 /* It's an IPI, clear the MFRR and EOI it */
723 li r0, 0xff
724 li r6, XICS_MFRR
725 stbcix r0, r5, r6 /* clear the IPI */
726 stwcix r3, r5, r7 /* EOI it */
727 sync
728
729 /* We need to re-check host IPI now in case it got set in the
730 * meantime. If it's clear, we bounce the interrupt to the
731 * guest
732 */
733 lbz r0, HSTATE_HOST_IPI(r13)
734 cmpwi r0, 0
735 bne- 1f
736
737 /* Allright, looks like an IPI for the guest, we need to set MER */
738 mfspr r8,SPRN_LPCR
739 ori r8,r8,LPCR_MER
740 mtspr SPRN_LPCR,r8
741
742 /* And if the guest EE is set, we can deliver immediately, else
743 * we return to the guest with MER set
744 */
745 andi. r0, r11, MSR_EE
746 bne bounce_ext_interrupt
747 mr r4, r9
748 b fast_guest_return
749
750 /* We raced with the host, we need to resend that IPI, bummer */
7511: li r0, IPI_PRIORITY
752 stbcix r0, r5, r6 /* set the IPI */
753 sync
754 b ext_interrupt_to_host
755
756ext_stash_for_host:
757 /* It's not an IPI and it's for the host, stash it in the PACA
758 * before exit, it will be picked up by the host ICP driver
759 */
760 stw r3, HSTATE_SAVED_XIRR(r13)
761ext_interrupt_to_host:
Paul Mackerrasde56a942011-06-29 00:21:34 +0000762
Paul Mackerrasb4072df2012-11-23 22:37:50 +0000763guest_exit_cont: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000764 /* Save DEC */
765 mfspr r5,SPRN_DEC
766 mftb r6
767 extsw r5,r5
768 add r5,r5,r6
769 std r5,VCPU_DEC_EXPIRES(r9)
770
Paul Mackerrasde56a942011-06-29 00:21:34 +0000771 /* Save more register state */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000772 mfdar r6
773 mfdsisr r7
Paul Mackerrasde56a942011-06-29 00:21:34 +0000774 std r6, VCPU_DAR(r9)
775 stw r7, VCPU_DSISR(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +0000776BEGIN_FTR_SECTION
Paul Mackerras697d3892011-12-12 12:36:37 +0000777 /* don't overwrite fault_dar/fault_dsisr if HDSI */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000778 cmpwi r12,BOOK3S_INTERRUPT_H_DATA_STORAGE
779 beq 6f
Paul Mackerras9e368f22011-06-29 00:40:08 +0000780END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerras697d3892011-12-12 12:36:37 +0000781 std r6, VCPU_FAULT_DAR(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000782 stw r7, VCPU_FAULT_DSISR(r9)
783
Paul Mackerrasb4072df2012-11-23 22:37:50 +0000784 /* See if it is a machine check */
785 cmpwi r12, BOOK3S_INTERRUPT_MACHINE_CHECK
786 beq machine_check_realmode
787mc_cont:
788
Paul Mackerrasde56a942011-06-29 00:21:34 +0000789 /* Save guest CTRL register, set runlatch to 1 */
Paul Mackerras697d3892011-12-12 12:36:37 +00007906: mfspr r6,SPRN_CTRLF
Paul Mackerrasde56a942011-06-29 00:21:34 +0000791 stw r6,VCPU_CTRL(r9)
792 andi. r0,r6,1
793 bne 4f
794 ori r6,r6,1
795 mtspr SPRN_CTRLT,r6
7964:
797 /* Read the guest SLB and save it away */
798 lwz r0,VCPU_SLB_NR(r9) /* number of entries in SLB */
799 mtctr r0
800 li r6,0
801 addi r7,r9,VCPU_SLB
802 li r5,0
8031: slbmfee r8,r6
804 andis. r0,r8,SLB_ESID_V@h
805 beq 2f
806 add r8,r8,r6 /* put index in */
807 slbmfev r3,r6
808 std r8,VCPU_SLB_E(r7)
809 std r3,VCPU_SLB_V(r7)
810 addi r7,r7,VCPU_SLB_SIZE
811 addi r5,r5,1
8122: addi r6,r6,1
813 bdnz 1b
814 stw r5,VCPU_SLB_MAX(r9)
815
816 /*
817 * Save the guest PURR/SPURR
818 */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000819BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +0000820 mfspr r5,SPRN_PURR
821 mfspr r6,SPRN_SPURR
822 ld r7,VCPU_PURR(r9)
823 ld r8,VCPU_SPURR(r9)
824 std r5,VCPU_PURR(r9)
825 std r6,VCPU_SPURR(r9)
826 subf r5,r7,r5
827 subf r6,r8,r6
828
829 /*
830 * Restore host PURR/SPURR and add guest times
831 * so that the time in the guest gets accounted.
832 */
833 ld r3,HSTATE_PURR(r13)
834 ld r4,HSTATE_SPURR(r13)
835 add r3,r3,r5
836 add r4,r4,r6
837 mtspr SPRN_PURR,r3
838 mtspr SPRN_SPURR,r4
Paul Mackerras9e368f22011-06-29 00:40:08 +0000839END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000840
841 /* Clear out SLB */
842 li r5,0
843 slbmte r5,r5
844 slbia
845 ptesync
846
Paul Mackerras19ccb762011-07-23 17:42:46 +1000847hdec_soon: /* r9 = vcpu, r12 = trap, r13 = paca */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000848BEGIN_FTR_SECTION
849 b 32f
850END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
851 /*
852 * POWER7 guest -> host partition switch code.
853 * We don't have to lock against tlbies but we do
854 * have to coordinate the hardware threads.
855 */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000856 /* Increment the threads-exiting-guest count in the 0xff00
857 bits of vcore->entry_exit_count */
858 lwsync
859 ld r5,HSTATE_KVM_VCORE(r13)
860 addi r6,r5,VCORE_ENTRY_EXIT
86141: lwarx r3,0,r6
862 addi r0,r3,0x100
863 stwcx. r0,0,r6
864 bne 41b
Paul Mackerras19ccb762011-07-23 17:42:46 +1000865 lwsync
Paul Mackerras371fefd2011-06-29 00:23:08 +0000866
867 /*
868 * At this point we have an interrupt that we have to pass
869 * up to the kernel or qemu; we can't handle it in real mode.
870 * Thus we have to do a partition switch, so we have to
871 * collect the other threads, if we are the first thread
872 * to take an interrupt. To do this, we set the HDEC to 0,
873 * which causes an HDEC interrupt in all threads within 2ns
874 * because the HDEC register is shared between all 4 threads.
875 * However, we don't need to bother if this is an HDEC
876 * interrupt, since the other threads will already be on their
877 * way here in that case.
878 */
Paul Mackerras19ccb762011-07-23 17:42:46 +1000879 cmpwi r3,0x100 /* Are we the first here? */
880 bge 43f
881 cmpwi r3,1 /* Are any other threads in the guest? */
882 ble 43f
Paul Mackerras371fefd2011-06-29 00:23:08 +0000883 cmpwi r12,BOOK3S_INTERRUPT_HV_DECREMENTER
884 beq 40f
Paul Mackerras371fefd2011-06-29 00:23:08 +0000885 li r0,0
886 mtspr SPRN_HDEC,r0
88740:
Paul Mackerras19ccb762011-07-23 17:42:46 +1000888 /*
889 * Send an IPI to any napping threads, since an HDEC interrupt
890 * doesn't wake CPUs up from nap.
891 */
892 lwz r3,VCORE_NAPPING_THREADS(r5)
893 lwz r4,VCPU_PTID(r9)
894 li r0,1
Michael Neuling2f584a12012-06-25 13:33:11 +0000895 sld r0,r0,r4
Paul Mackerras19ccb762011-07-23 17:42:46 +1000896 andc. r3,r3,r0 /* no sense IPI'ing ourselves */
897 beq 43f
898 mulli r4,r4,PACA_SIZE /* get paca for thread 0 */
899 subf r6,r4,r13
90042: andi. r0,r3,1
901 beq 44f
902 ld r8,HSTATE_XICS_PHYS(r6) /* get thread's XICS reg addr */
903 li r0,IPI_PRIORITY
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000904 li r7,XICS_MFRR
Paul Mackerras19ccb762011-07-23 17:42:46 +1000905 stbcix r0,r7,r8 /* trigger the IPI */
90644: srdi. r3,r3,1
907 addi r6,r6,PACA_SIZE
908 bne 42b
Paul Mackerras371fefd2011-06-29 00:23:08 +0000909
910 /* Secondary threads wait for primary to do partition switch */
Paul Mackerras19ccb762011-07-23 17:42:46 +100091143: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
Paul Mackerras371fefd2011-06-29 00:23:08 +0000912 ld r5,HSTATE_KVM_VCORE(r13)
913 lwz r3,VCPU_PTID(r9)
914 cmpwi r3,0
915 beq 15f
916 HMT_LOW
91713: lbz r3,VCORE_IN_GUEST(r5)
918 cmpwi r3,0
919 bne 13b
920 HMT_MEDIUM
921 b 16f
922
923 /* Primary thread waits for all the secondaries to exit guest */
92415: lwz r3,VCORE_ENTRY_EXIT(r5)
925 srwi r0,r3,8
926 clrldi r3,r3,56
927 cmpw r3,r0
928 bne 15b
929 isync
930
931 /* Primary thread switches back to host partition */
Paul Mackerrasde56a942011-06-29 00:21:34 +0000932 ld r6,KVM_HOST_SDR1(r4)
933 lwz r7,KVM_HOST_LPID(r4)
934 li r8,LPID_RSVD /* switch to reserved LPID */
935 mtspr SPRN_LPID,r8
936 ptesync
937 mtspr SPRN_SDR1,r6 /* switch to partition page table */
938 mtspr SPRN_LPID,r7
939 isync
Paul Mackerras371fefd2011-06-29 00:23:08 +0000940 li r0,0
941 stb r0,VCORE_IN_GUEST(r5)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000942 lis r8,0x7fff /* MAX_INT@h */
943 mtspr SPRN_HDEC,r8
944
Paul Mackerras371fefd2011-06-29 00:23:08 +000094516: ld r8,KVM_HOST_LPCR(r4)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000946 mtspr SPRN_LPCR,r8
947 isync
Paul Mackerras9e368f22011-06-29 00:40:08 +0000948 b 33f
949
950 /*
951 * PPC970 guest -> host partition switch code.
952 * We have to lock against concurrent tlbies, and
953 * we have to flush the whole TLB.
954 */
95532: ld r4,VCPU_KVM(r9) /* pointer to struct kvm */
956
957 /* Take the guest's tlbie_lock */
958 lwz r8,PACA_LOCK_TOKEN(r13)
959 addi r3,r4,KVM_TLBIE_LOCK
96024: lwarx r0,0,r3
961 cmpwi r0,0
962 bne 24b
963 stwcx. r8,0,r3
964 bne 24b
965 isync
966
967 ld r7,KVM_HOST_LPCR(r4) /* use kvm->arch.host_lpcr for HID4 */
968 li r0,0x18f
969 rotldi r0,r0,HID4_LPID5_SH /* all lpid bits in HID4 = 1 */
970 or r0,r7,r0
971 ptesync
972 sync
973 mtspr SPRN_HID4,r0 /* switch to reserved LPID */
974 isync
975 li r0,0
976 stw r0,0(r3) /* drop guest tlbie_lock */
977
978 /* invalidate the whole TLB */
979 li r0,256
980 mtctr r0
981 li r6,0
98225: tlbiel r6
983 addi r6,r6,0x1000
984 bdnz 25b
985 ptesync
986
987 /* take native_tlbie_lock */
988 ld r3,toc_tlbie_lock@toc(2)
98924: lwarx r0,0,r3
990 cmpwi r0,0
991 bne 24b
992 stwcx. r8,0,r3
993 bne 24b
994 isync
995
996 ld r6,KVM_HOST_SDR1(r4)
997 mtspr SPRN_SDR1,r6 /* switch to host page table */
998
999 /* Set up host HID4 value */
1000 sync
1001 mtspr SPRN_HID4,r7
1002 isync
1003 li r0,0
1004 stw r0,0(r3) /* drop native_tlbie_lock */
1005
1006 lis r8,0x7fff /* MAX_INT@h */
1007 mtspr SPRN_HDEC,r8
1008
1009 /* Disable HDEC interrupts */
1010 mfspr r0,SPRN_HID0
1011 li r3,0
1012 rldimi r0,r3, HID0_HDICE_SH, 64-HID0_HDICE_SH-1
1013 sync
1014 mtspr SPRN_HID0,r0
1015 mfspr r0,SPRN_HID0
1016 mfspr r0,SPRN_HID0
1017 mfspr r0,SPRN_HID0
1018 mfspr r0,SPRN_HID0
1019 mfspr r0,SPRN_HID0
1020 mfspr r0,SPRN_HID0
Paul Mackerrasde56a942011-06-29 00:21:34 +00001021
1022 /* load host SLB entries */
Paul Mackerras9e368f22011-06-29 00:40:08 +0000102333: ld r8,PACA_SLBSHADOWPTR(r13)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001024
1025 .rept SLB_NUM_BOLTED
1026 ld r5,SLBSHADOW_SAVEAREA(r8)
1027 ld r6,SLBSHADOW_SAVEAREA+8(r8)
1028 andis. r7,r5,SLB_ESID_V@h
1029 beq 1f
1030 slbmte r6,r5
10311: addi r8,r8,16
1032 .endr
1033
1034 /* Save and reset AMR and UAMOR before turning on the MMU */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001035BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001036 mfspr r5,SPRN_AMR
1037 mfspr r6,SPRN_UAMOR
1038 std r5,VCPU_AMR(r9)
1039 std r6,VCPU_UAMOR(r9)
1040 li r6,0
1041 mtspr SPRN_AMR,r6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001042END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001043
Paul Mackerrasde56a942011-06-29 00:21:34 +00001044 /* Switch DSCR back to host value */
Paul Mackerras9e368f22011-06-29 00:40:08 +00001045BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001046 mfspr r8, SPRN_DSCR
1047 ld r7, HSTATE_DSCR(r13)
1048 std r8, VCPU_DSCR(r7)
1049 mtspr SPRN_DSCR, r7
Paul Mackerras9e368f22011-06-29 00:40:08 +00001050END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001051
1052 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001053 std r14, VCPU_GPR(R14)(r9)
1054 std r15, VCPU_GPR(R15)(r9)
1055 std r16, VCPU_GPR(R16)(r9)
1056 std r17, VCPU_GPR(R17)(r9)
1057 std r18, VCPU_GPR(R18)(r9)
1058 std r19, VCPU_GPR(R19)(r9)
1059 std r20, VCPU_GPR(R20)(r9)
1060 std r21, VCPU_GPR(R21)(r9)
1061 std r22, VCPU_GPR(R22)(r9)
1062 std r23, VCPU_GPR(R23)(r9)
1063 std r24, VCPU_GPR(R24)(r9)
1064 std r25, VCPU_GPR(R25)(r9)
1065 std r26, VCPU_GPR(R26)(r9)
1066 std r27, VCPU_GPR(R27)(r9)
1067 std r28, VCPU_GPR(R28)(r9)
1068 std r29, VCPU_GPR(R29)(r9)
1069 std r30, VCPU_GPR(R30)(r9)
1070 std r31, VCPU_GPR(R31)(r9)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001071
1072 /* Save SPRGs */
1073 mfspr r3, SPRN_SPRG0
1074 mfspr r4, SPRN_SPRG1
1075 mfspr r5, SPRN_SPRG2
1076 mfspr r6, SPRN_SPRG3
1077 std r3, VCPU_SPRG0(r9)
1078 std r4, VCPU_SPRG1(r9)
1079 std r5, VCPU_SPRG2(r9)
1080 std r6, VCPU_SPRG3(r9)
1081
Paul Mackerras89436332012-03-02 01:38:23 +00001082 /* save FP state */
1083 mr r3, r9
1084 bl .kvmppc_save_fp
1085
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001086 /* Increment yield count if they have a VPA */
1087 ld r8, VCPU_VPA(r9) /* do they have a VPA? */
1088 cmpdi r8, 0
1089 beq 25f
1090 lwz r3, LPPACA_YIELDCOUNT(r8)
1091 addi r3, r3, 1
1092 stw r3, LPPACA_YIELDCOUNT(r8)
Paul Mackerrasc35635e2013-04-18 19:51:04 +00001093 li r3, 1
1094 stb r3, VCPU_VPA_DIRTY(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000109525:
1096 /* Save PMU registers if requested */
1097 /* r8 and cr0.eq are live here */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001098 li r3, 1
1099 sldi r3, r3, 31 /* MMCR0_FC (freeze counters) bit */
1100 mfspr r4, SPRN_MMCR0 /* save MMCR0 */
1101 mtspr SPRN_MMCR0, r3 /* freeze all counters, disable ints */
Paul Mackerras89436332012-03-02 01:38:23 +00001102 mfspr r6, SPRN_MMCRA
1103BEGIN_FTR_SECTION
1104 /* On P7, clear MMCRA in order to disable SDAR updates */
1105 li r7, 0
1106 mtspr SPRN_MMCRA, r7
1107END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001108 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001109 beq 21f /* if no VPA, save PMU stuff anyway */
1110 lbz r7, LPPACA_PMCINUSE(r8)
1111 cmpwi r7, 0 /* did they ask for PMU stuff to be saved? */
1112 bne 21f
1113 std r3, VCPU_MMCR(r9) /* if not, set saved MMCR0 to FC */
1114 b 22f
111521: mfspr r5, SPRN_MMCR1
Paul Mackerrasde56a942011-06-29 00:21:34 +00001116 std r4, VCPU_MMCR(r9)
1117 std r5, VCPU_MMCR + 8(r9)
1118 std r6, VCPU_MMCR + 16(r9)
1119 mfspr r3, SPRN_PMC1
1120 mfspr r4, SPRN_PMC2
1121 mfspr r5, SPRN_PMC3
1122 mfspr r6, SPRN_PMC4
1123 mfspr r7, SPRN_PMC5
1124 mfspr r8, SPRN_PMC6
Paul Mackerras9e368f22011-06-29 00:40:08 +00001125BEGIN_FTR_SECTION
1126 mfspr r10, SPRN_PMC7
1127 mfspr r11, SPRN_PMC8
1128END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001129 stw r3, VCPU_PMC(r9)
1130 stw r4, VCPU_PMC + 4(r9)
1131 stw r5, VCPU_PMC + 8(r9)
1132 stw r6, VCPU_PMC + 12(r9)
1133 stw r7, VCPU_PMC + 16(r9)
1134 stw r8, VCPU_PMC + 20(r9)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001135BEGIN_FTR_SECTION
1136 stw r10, VCPU_PMC + 24(r9)
1137 stw r11, VCPU_PMC + 28(r9)
1138END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +0000113922:
Paul Mackerrasde56a942011-06-29 00:21:34 +00001140
Paul Mackerras9e368f22011-06-29 00:40:08 +00001141 /* Secondary threads go off to take a nap on POWER7 */
1142BEGIN_FTR_SECTION
Paul Mackerras89436332012-03-02 01:38:23 +00001143 lwz r0,VCPU_PTID(r9)
Paul Mackerras371fefd2011-06-29 00:23:08 +00001144 cmpwi r0,0
1145 bne secondary_nap
Paul Mackerras9e368f22011-06-29 00:40:08 +00001146END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerras371fefd2011-06-29 00:23:08 +00001147
Paul Mackerras89436332012-03-02 01:38:23 +00001148 /* Restore host DABR and DABRX */
1149 ld r5,HSTATE_DABR(r13)
1150 li r6,7
1151 mtspr SPRN_DABR,r5
1152 mtspr SPRN_DABRX,r6
1153
Anton Blanchard18ad51d2012-07-04 20:37:11 +00001154 /* Restore SPRG3 */
Mihai Caraman01272622012-09-06 02:49:44 +00001155 ld r3,PACA_SPRG3(r13)
Anton Blanchard18ad51d2012-07-04 20:37:11 +00001156 mtspr SPRN_SPRG3,r3
1157
Paul Mackerrasde56a942011-06-29 00:21:34 +00001158 /*
1159 * Reload DEC. HDEC interrupts were disabled when
1160 * we reloaded the host's LPCR value.
1161 */
1162 ld r3, HSTATE_DECEXP(r13)
1163 mftb r4
1164 subf r4, r4, r3
1165 mtspr SPRN_DEC, r4
1166
1167 /* Reload the host's PMU registers */
1168 ld r3, PACALPPACAPTR(r13) /* is the host using the PMU? */
1169 lbz r4, LPPACA_PMCINUSE(r3)
1170 cmpwi r4, 0
1171 beq 23f /* skip if not */
1172 lwz r3, HSTATE_PMC(r13)
1173 lwz r4, HSTATE_PMC + 4(r13)
1174 lwz r5, HSTATE_PMC + 8(r13)
1175 lwz r6, HSTATE_PMC + 12(r13)
1176 lwz r8, HSTATE_PMC + 16(r13)
1177 lwz r9, HSTATE_PMC + 20(r13)
Paul Mackerras9e368f22011-06-29 00:40:08 +00001178BEGIN_FTR_SECTION
1179 lwz r10, HSTATE_PMC + 24(r13)
1180 lwz r11, HSTATE_PMC + 28(r13)
1181END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001182 mtspr SPRN_PMC1, r3
1183 mtspr SPRN_PMC2, r4
1184 mtspr SPRN_PMC3, r5
1185 mtspr SPRN_PMC4, r6
1186 mtspr SPRN_PMC5, r8
1187 mtspr SPRN_PMC6, r9
Paul Mackerras9e368f22011-06-29 00:40:08 +00001188BEGIN_FTR_SECTION
1189 mtspr SPRN_PMC7, r10
1190 mtspr SPRN_PMC8, r11
1191END_FTR_SECTION_IFSET(CPU_FTR_ARCH_201)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001192 ld r3, HSTATE_MMCR(r13)
1193 ld r4, HSTATE_MMCR + 8(r13)
1194 ld r5, HSTATE_MMCR + 16(r13)
1195 mtspr SPRN_MMCR1, r4
1196 mtspr SPRN_MMCRA, r5
1197 mtspr SPRN_MMCR0, r3
1198 isync
119923:
1200 /*
1201 * For external and machine check interrupts, we need
1202 * to call the Linux handler to process the interrupt.
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001203 * We do that by jumping to absolute address 0x500 for
1204 * external interrupts, or the machine_check_fwnmi label
1205 * for machine checks (since firmware might have patched
1206 * the vector area at 0x200). The [h]rfid at the end of the
Paul Mackerrasde56a942011-06-29 00:21:34 +00001207 * handler will return to the book3s_hv_interrupts.S code.
1208 * For other interrupts we do the rfid to get back
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001209 * to the book3s_hv_interrupts.S code here.
Paul Mackerrasde56a942011-06-29 00:21:34 +00001210 */
1211 ld r8, HSTATE_VMHANDLER(r13)
1212 ld r7, HSTATE_HOST_MSR(r13)
1213
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001214 cmpwi cr1, r12, BOOK3S_INTERRUPT_MACHINE_CHECK
Paul Mackerrasde56a942011-06-29 00:21:34 +00001215 cmpwi r12, BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001216BEGIN_FTR_SECTION
Paul Mackerrasde56a942011-06-29 00:21:34 +00001217 beq 11f
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001218END_FTR_SECTION_IFSET(CPU_FTR_ARCH_206)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001219
1220 /* RFI into the highmem handler, or branch to interrupt handler */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001221 mfmsr r6
Paul Mackerrasde56a942011-06-29 00:21:34 +00001222 li r0, MSR_RI
1223 andc r6, r6, r0
1224 mtmsrd r6, 1 /* Clear RI in MSR */
1225 mtsrr0 r8
1226 mtsrr1 r7
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001227 beqa 0x500 /* external interrupt (PPC970) */
1228 beq cr1, 13f /* machine check */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001229 RFI
1230
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001231 /* On POWER7, we have external interrupts set to use HSRR0/1 */
123211: mtspr SPRN_HSRR0, r8
Paul Mackerrasde56a942011-06-29 00:21:34 +00001233 mtspr SPRN_HSRR1, r7
1234 ba 0x500
1235
Paul Mackerrasb4072df2012-11-23 22:37:50 +0000123613: b machine_check_fwnmi
1237
Paul Mackerras697d3892011-12-12 12:36:37 +00001238/*
1239 * Check whether an HDSI is an HPTE not found fault or something else.
1240 * If it is an HPTE not found fault that is due to the guest accessing
1241 * a page that they have mapped but which we have paged out, then
1242 * we continue on with the guest exit path. In all other cases,
1243 * reflect the HDSI to the guest as a DSI.
1244 */
1245kvmppc_hdsi:
1246 mfspr r4, SPRN_HDAR
1247 mfspr r6, SPRN_HDSISR
Paul Mackerras4cf302b2011-12-12 12:38:51 +00001248 /* HPTE not found fault or protection fault? */
1249 andis. r0, r6, (DSISR_NOHPTE | DSISR_PROTFAULT)@h
Paul Mackerras697d3892011-12-12 12:36:37 +00001250 beq 1f /* if not, send it to the guest */
1251 andi. r0, r11, MSR_DR /* data relocation enabled? */
1252 beq 3f
1253 clrrdi r0, r4, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001254 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras697d3892011-12-12 12:36:37 +00001255 bne 1f /* if no SLB entry found */
12564: std r4, VCPU_FAULT_DAR(r9)
1257 stw r6, VCPU_FAULT_DSISR(r9)
1258
1259 /* Search the hash table. */
1260 mr r3, r9 /* vcpu pointer */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001261 li r7, 1 /* data fault */
Paul Mackerras697d3892011-12-12 12:36:37 +00001262 bl .kvmppc_hpte_hv_fault
1263 ld r9, HSTATE_KVM_VCPU(r13)
1264 ld r10, VCPU_PC(r9)
1265 ld r11, VCPU_MSR(r9)
1266 li r12, BOOK3S_INTERRUPT_H_DATA_STORAGE
1267 cmpdi r3, 0 /* retry the instruction */
1268 beq 6f
1269 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001270 beq guest_exit_cont
Paul Mackerras697d3892011-12-12 12:36:37 +00001271 cmpdi r3, -2 /* MMIO emulation; need instr word */
1272 beq 2f
1273
1274 /* Synthesize a DSI for the guest */
1275 ld r4, VCPU_FAULT_DAR(r9)
1276 mr r6, r3
12771: mtspr SPRN_DAR, r4
1278 mtspr SPRN_DSISR, r6
1279 mtspr SPRN_SRR0, r10
1280 mtspr SPRN_SRR1, r11
1281 li r10, BOOK3S_INTERRUPT_DATA_STORAGE
1282 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1283 rotldi r11, r11, 63
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001284fast_interrupt_c_return:
Paul Mackerras697d3892011-12-12 12:36:37 +000012856: ld r7, VCPU_CTR(r9)
1286 lwz r8, VCPU_XER(r9)
1287 mtctr r7
1288 mtxer r8
1289 mr r4, r9
1290 b fast_guest_return
1291
12923: ld r5, VCPU_KVM(r9) /* not relocated, use VRMA */
1293 ld r5, KVM_VRMA_SLB_V(r5)
1294 b 4b
1295
1296 /* If this is for emulated MMIO, load the instruction word */
12972: li r8, KVM_INST_FETCH_FAILED /* In case lwz faults */
1298
1299 /* Set guest mode to 'jump over instruction' so if lwz faults
1300 * we'll just continue at the next IP. */
1301 li r0, KVM_GUEST_MODE_SKIP
1302 stb r0, HSTATE_IN_GUEST(r13)
1303
1304 /* Do the access with MSR:DR enabled */
1305 mfmsr r3
1306 ori r4, r3, MSR_DR /* Enable paging for data */
1307 mtmsrd r4
1308 lwz r8, 0(r10)
1309 mtmsrd r3
1310
1311 /* Store the result */
1312 stw r8, VCPU_LAST_INST(r9)
1313
1314 /* Unset guest mode. */
1315 li r0, KVM_GUEST_MODE_NONE
1316 stb r0, HSTATE_IN_GUEST(r13)
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001317 b guest_exit_cont
Paul Mackerrasde56a942011-06-29 00:21:34 +00001318
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001319/*
Paul Mackerras342d3db2011-12-12 12:38:05 +00001320 * Similarly for an HISI, reflect it to the guest as an ISI unless
1321 * it is an HPTE not found fault for a page that we have paged out.
1322 */
1323kvmppc_hisi:
1324 andis. r0, r11, SRR1_ISI_NOPT@h
1325 beq 1f
1326 andi. r0, r11, MSR_IR /* instruction relocation enabled? */
1327 beq 3f
1328 clrrdi r0, r10, 28
Michael Neulingc75df6f2012-06-25 13:33:10 +00001329 PPC_SLBFEE_DOT(R5, R0) /* if so, look up SLB */
Paul Mackerras342d3db2011-12-12 12:38:05 +00001330 bne 1f /* if no SLB entry found */
13314:
1332 /* Search the hash table. */
1333 mr r3, r9 /* vcpu pointer */
1334 mr r4, r10
1335 mr r6, r11
1336 li r7, 0 /* instruction fault */
1337 bl .kvmppc_hpte_hv_fault
1338 ld r9, HSTATE_KVM_VCPU(r13)
1339 ld r10, VCPU_PC(r9)
1340 ld r11, VCPU_MSR(r9)
1341 li r12, BOOK3S_INTERRUPT_H_INST_STORAGE
1342 cmpdi r3, 0 /* retry the instruction */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001343 beq fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001344 cmpdi r3, -1 /* handle in kernel mode */
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001345 beq guest_exit_cont
Paul Mackerras342d3db2011-12-12 12:38:05 +00001346
1347 /* Synthesize an ISI for the guest */
1348 mr r11, r3
13491: mtspr SPRN_SRR0, r10
1350 mtspr SPRN_SRR1, r11
1351 li r10, BOOK3S_INTERRUPT_INST_STORAGE
1352 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1353 rotldi r11, r11, 63
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001354 b fast_interrupt_c_return
Paul Mackerras342d3db2011-12-12 12:38:05 +00001355
13563: ld r6, VCPU_KVM(r9) /* not relocated, use VRMA */
1357 ld r5, KVM_VRMA_SLB_V(r6)
1358 b 4b
1359
1360/*
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001361 * Try to handle an hcall in real mode.
1362 * Returns to the guest if we handle it, or continues on up to
1363 * the kernel if we can't (i.e. if we don't have a handler for
1364 * it, or if the handler returns H_TOO_HARD).
1365 */
1366 .globl hcall_try_real_mode
1367hcall_try_real_mode:
Michael Neulingc75df6f2012-06-25 13:33:10 +00001368 ld r3,VCPU_GPR(R3)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001369 andi. r0,r11,MSR_PR
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001370 bne guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001371 clrrdi r3,r3,2
1372 cmpldi r3,hcall_real_table_end - hcall_real_table
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001373 bge guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001374 LOAD_REG_ADDR(r4, hcall_real_table)
1375 lwzx r3,r3,r4
1376 cmpwi r3,0
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001377 beq guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001378 add r3,r3,r4
1379 mtctr r3
1380 mr r3,r9 /* get vcpu pointer */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001381 ld r4,VCPU_GPR(R4)(r9)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001382 bctrl
1383 cmpdi r3,H_TOO_HARD
1384 beq hcall_real_fallback
1385 ld r4,HSTATE_KVM_VCPU(r13)
Michael Neulingc75df6f2012-06-25 13:33:10 +00001386 std r3,VCPU_GPR(R3)(r4)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001387 ld r10,VCPU_PC(r4)
1388 ld r11,VCPU_MSR(r4)
1389 b fast_guest_return
1390
1391 /* We've attempted a real mode hcall, but it's punted it back
1392 * to userspace. We need to restore some clobbered volatiles
1393 * before resuming the pass-it-to-qemu path */
1394hcall_real_fallback:
1395 li r12,BOOK3S_INTERRUPT_SYSCALL
1396 ld r9, HSTATE_KVM_VCPU(r13)
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001397
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001398 b guest_exit_cont
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001399
1400 .globl hcall_real_table
1401hcall_real_table:
1402 .long 0 /* 0 - unused */
1403 .long .kvmppc_h_remove - hcall_real_table
1404 .long .kvmppc_h_enter - hcall_real_table
1405 .long .kvmppc_h_read - hcall_real_table
1406 .long 0 /* 0x10 - H_CLEAR_MOD */
1407 .long 0 /* 0x14 - H_CLEAR_REF */
1408 .long .kvmppc_h_protect - hcall_real_table
1409 .long 0 /* 0x1c - H_GET_TCE */
David Gibson54738c02011-06-29 00:22:41 +00001410 .long .kvmppc_h_put_tce - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001411 .long 0 /* 0x24 - H_SET_SPRG0 */
1412 .long .kvmppc_h_set_dabr - hcall_real_table
1413 .long 0 /* 0x2c */
1414 .long 0 /* 0x30 */
1415 .long 0 /* 0x34 */
1416 .long 0 /* 0x38 */
1417 .long 0 /* 0x3c */
1418 .long 0 /* 0x40 */
1419 .long 0 /* 0x44 */
1420 .long 0 /* 0x48 */
1421 .long 0 /* 0x4c */
1422 .long 0 /* 0x50 */
1423 .long 0 /* 0x54 */
1424 .long 0 /* 0x58 */
1425 .long 0 /* 0x5c */
1426 .long 0 /* 0x60 */
1427 .long 0 /* 0x64 */
1428 .long 0 /* 0x68 */
1429 .long 0 /* 0x6c */
1430 .long 0 /* 0x70 */
1431 .long 0 /* 0x74 */
1432 .long 0 /* 0x78 */
1433 .long 0 /* 0x7c */
1434 .long 0 /* 0x80 */
1435 .long 0 /* 0x84 */
1436 .long 0 /* 0x88 */
1437 .long 0 /* 0x8c */
1438 .long 0 /* 0x90 */
1439 .long 0 /* 0x94 */
1440 .long 0 /* 0x98 */
1441 .long 0 /* 0x9c */
1442 .long 0 /* 0xa0 */
1443 .long 0 /* 0xa4 */
1444 .long 0 /* 0xa8 */
1445 .long 0 /* 0xac */
1446 .long 0 /* 0xb0 */
1447 .long 0 /* 0xb4 */
1448 .long 0 /* 0xb8 */
1449 .long 0 /* 0xbc */
1450 .long 0 /* 0xc0 */
1451 .long 0 /* 0xc4 */
1452 .long 0 /* 0xc8 */
1453 .long 0 /* 0xcc */
1454 .long 0 /* 0xd0 */
1455 .long 0 /* 0xd4 */
1456 .long 0 /* 0xd8 */
1457 .long 0 /* 0xdc */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001458 .long .kvmppc_h_cede - hcall_real_table
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001459 .long 0 /* 0xe4 */
1460 .long 0 /* 0xe8 */
1461 .long 0 /* 0xec */
1462 .long 0 /* 0xf0 */
1463 .long 0 /* 0xf4 */
1464 .long 0 /* 0xf8 */
1465 .long 0 /* 0xfc */
1466 .long 0 /* 0x100 */
1467 .long 0 /* 0x104 */
1468 .long 0 /* 0x108 */
1469 .long 0 /* 0x10c */
1470 .long 0 /* 0x110 */
1471 .long 0 /* 0x114 */
1472 .long 0 /* 0x118 */
1473 .long 0 /* 0x11c */
1474 .long 0 /* 0x120 */
1475 .long .kvmppc_h_bulk_remove - hcall_real_table
1476hcall_real_table_end:
1477
Paul Mackerrasde56a942011-06-29 00:21:34 +00001478ignore_hdec:
1479 mr r4,r9
1480 b fast_guest_return
1481
1482bounce_ext_interrupt:
1483 mr r4,r9
1484 mtspr SPRN_SRR0,r10
1485 mtspr SPRN_SRR1,r11
1486 li r10,BOOK3S_INTERRUPT_EXTERNAL
Paul Mackerras19ccb762011-07-23 17:42:46 +10001487 li r11,(MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1488 rotldi r11,r11,63
Paul Mackerrasde56a942011-06-29 00:21:34 +00001489 b fast_guest_return
1490
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001491_GLOBAL(kvmppc_h_set_dabr)
1492 std r4,VCPU_DABR(r3)
Paul Mackerras89436332012-03-02 01:38:23 +00001493 /* Work around P7 bug where DABR can get corrupted on mtspr */
14941: mtspr SPRN_DABR,r4
1495 mfspr r5, SPRN_DABR
1496 cmpd r4, r5
1497 bne 1b
1498 isync
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001499 li r3,0
1500 blr
1501
Paul Mackerras19ccb762011-07-23 17:42:46 +10001502_GLOBAL(kvmppc_h_cede)
1503 ori r11,r11,MSR_EE
1504 std r11,VCPU_MSR(r3)
1505 li r0,1
1506 stb r0,VCPU_CEDED(r3)
1507 sync /* order setting ceded vs. testing prodded */
1508 lbz r5,VCPU_PRODDED(r3)
1509 cmpwi r5,0
Paul Mackerras04f995a2012-08-06 00:03:28 +00001510 bne kvm_cede_prodded
Paul Mackerras19ccb762011-07-23 17:42:46 +10001511 li r0,0 /* set trap to 0 to say hcall is handled */
1512 stw r0,VCPU_TRAP(r3)
1513 li r0,H_SUCCESS
Michael Neulingc75df6f2012-06-25 13:33:10 +00001514 std r0,VCPU_GPR(R3)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001515BEGIN_FTR_SECTION
Paul Mackerras04f995a2012-08-06 00:03:28 +00001516 b kvm_cede_exit /* just send it up to host on 970 */
Paul Mackerras19ccb762011-07-23 17:42:46 +10001517END_FTR_SECTION_IFCLR(CPU_FTR_ARCH_206)
1518
1519 /*
1520 * Set our bit in the bitmask of napping threads unless all the
1521 * other threads are already napping, in which case we send this
1522 * up to the host.
1523 */
1524 ld r5,HSTATE_KVM_VCORE(r13)
1525 lwz r6,VCPU_PTID(r3)
1526 lwz r8,VCORE_ENTRY_EXIT(r5)
1527 clrldi r8,r8,56
1528 li r0,1
1529 sld r0,r0,r6
1530 addi r6,r5,VCORE_NAPPING_THREADS
153131: lwarx r4,0,r6
1532 or r4,r4,r0
Michael Neulingc75df6f2012-06-25 13:33:10 +00001533 PPC_POPCNTW(R7,R4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001534 cmpw r7,r8
Paul Mackerras04f995a2012-08-06 00:03:28 +00001535 bge kvm_cede_exit
Paul Mackerras19ccb762011-07-23 17:42:46 +10001536 stwcx. r4,0,r6
1537 bne 31b
1538 li r0,1
1539 stb r0,HSTATE_NAPPING(r13)
1540 /* order napping_threads update vs testing entry_exit_count */
1541 lwsync
1542 mr r4,r3
1543 lwz r7,VCORE_ENTRY_EXIT(r5)
1544 cmpwi r7,0x100
1545 bge 33f /* another thread already exiting */
1546
1547/*
1548 * Although not specifically required by the architecture, POWER7
1549 * preserves the following registers in nap mode, even if an SMT mode
1550 * switch occurs: SLB entries, PURR, SPURR, AMOR, UAMOR, AMR, SPRG0-3,
1551 * DAR, DSISR, DABR, DABRX, DSCR, PMCx, MMCRx, SIAR, SDAR.
1552 */
1553 /* Save non-volatile GPRs */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001554 std r14, VCPU_GPR(R14)(r3)
1555 std r15, VCPU_GPR(R15)(r3)
1556 std r16, VCPU_GPR(R16)(r3)
1557 std r17, VCPU_GPR(R17)(r3)
1558 std r18, VCPU_GPR(R18)(r3)
1559 std r19, VCPU_GPR(R19)(r3)
1560 std r20, VCPU_GPR(R20)(r3)
1561 std r21, VCPU_GPR(R21)(r3)
1562 std r22, VCPU_GPR(R22)(r3)
1563 std r23, VCPU_GPR(R23)(r3)
1564 std r24, VCPU_GPR(R24)(r3)
1565 std r25, VCPU_GPR(R25)(r3)
1566 std r26, VCPU_GPR(R26)(r3)
1567 std r27, VCPU_GPR(R27)(r3)
1568 std r28, VCPU_GPR(R28)(r3)
1569 std r29, VCPU_GPR(R29)(r3)
1570 std r30, VCPU_GPR(R30)(r3)
1571 std r31, VCPU_GPR(R31)(r3)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001572
1573 /* save FP state */
1574 bl .kvmppc_save_fp
1575
1576 /*
1577 * Take a nap until a decrementer or external interrupt occurs,
1578 * with PECE1 (wake on decr) and PECE0 (wake on external) set in LPCR
1579 */
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001580 li r0,1
1581 stb r0,HSTATE_HWTHREAD_REQ(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001582 mfspr r5,SPRN_LPCR
1583 ori r5,r5,LPCR_PECE0 | LPCR_PECE1
1584 mtspr SPRN_LPCR,r5
1585 isync
1586 li r0, 0
1587 std r0, HSTATE_SCRATCH0(r13)
1588 ptesync
1589 ld r0, HSTATE_SCRATCH0(r13)
15901: cmpd r0, r0
1591 bne 1b
1592 nap
1593 b .
1594
1595kvm_end_cede:
1596 /* Woken by external or decrementer interrupt */
1597 ld r1, HSTATE_HOST_R1(r13)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001598
Paul Mackerras19ccb762011-07-23 17:42:46 +10001599 /* load up FP state */
1600 bl kvmppc_load_fp
1601
1602 /* Load NV GPRS */
Michael Neulingc75df6f2012-06-25 13:33:10 +00001603 ld r14, VCPU_GPR(R14)(r4)
1604 ld r15, VCPU_GPR(R15)(r4)
1605 ld r16, VCPU_GPR(R16)(r4)
1606 ld r17, VCPU_GPR(R17)(r4)
1607 ld r18, VCPU_GPR(R18)(r4)
1608 ld r19, VCPU_GPR(R19)(r4)
1609 ld r20, VCPU_GPR(R20)(r4)
1610 ld r21, VCPU_GPR(R21)(r4)
1611 ld r22, VCPU_GPR(R22)(r4)
1612 ld r23, VCPU_GPR(R23)(r4)
1613 ld r24, VCPU_GPR(R24)(r4)
1614 ld r25, VCPU_GPR(R25)(r4)
1615 ld r26, VCPU_GPR(R26)(r4)
1616 ld r27, VCPU_GPR(R27)(r4)
1617 ld r28, VCPU_GPR(R28)(r4)
1618 ld r29, VCPU_GPR(R29)(r4)
1619 ld r30, VCPU_GPR(R30)(r4)
1620 ld r31, VCPU_GPR(R31)(r4)
Paul Mackerras19ccb762011-07-23 17:42:46 +10001621
1622 /* clear our bit in vcore->napping_threads */
162333: ld r5,HSTATE_KVM_VCORE(r13)
1624 lwz r3,VCPU_PTID(r4)
1625 li r0,1
1626 sld r0,r0,r3
1627 addi r6,r5,VCORE_NAPPING_THREADS
162832: lwarx r7,0,r6
1629 andc r7,r7,r0
1630 stwcx. r7,0,r6
1631 bne 32b
1632 li r0,0
1633 stb r0,HSTATE_NAPPING(r13)
1634
1635 /* see if any other thread is already exiting */
1636 lwz r0,VCORE_ENTRY_EXIT(r5)
1637 cmpwi r0,0x100
1638 blt kvmppc_cede_reentry /* if not go back to guest */
1639
1640 /* some threads are exiting, so go to the guest exit path */
1641 b hcall_real_fallback
1642
1643 /* cede when already previously prodded case */
Paul Mackerras04f995a2012-08-06 00:03:28 +00001644kvm_cede_prodded:
1645 li r0,0
Paul Mackerras19ccb762011-07-23 17:42:46 +10001646 stb r0,VCPU_PRODDED(r3)
1647 sync /* order testing prodded vs. clearing ceded */
1648 stb r0,VCPU_CEDED(r3)
1649 li r3,H_SUCCESS
1650 blr
1651
1652 /* we've ceded but we want to give control to the host */
Paul Mackerras04f995a2012-08-06 00:03:28 +00001653kvm_cede_exit:
1654 li r3,H_TOO_HARD
Paul Mackerras19ccb762011-07-23 17:42:46 +10001655 blr
1656
Paul Mackerrasb4072df2012-11-23 22:37:50 +00001657 /* Try to handle a machine check in real mode */
1658machine_check_realmode:
1659 mr r3, r9 /* get vcpu pointer */
1660 bl .kvmppc_realmode_machine_check
1661 nop
1662 cmpdi r3, 0 /* continue exiting from guest? */
1663 ld r9, HSTATE_KVM_VCPU(r13)
1664 li r12, BOOK3S_INTERRUPT_MACHINE_CHECK
1665 beq mc_cont
1666 /* If not, deliver a machine check. SRR0/1 are already set */
1667 li r10, BOOK3S_INTERRUPT_MACHINE_CHECK
1668 li r11, (MSR_ME << 1) | 1 /* synthesize MSR_SF | MSR_ME */
1669 rotldi r11, r11, 63
1670 b fast_interrupt_c_return
1671
Paul Mackerras371fefd2011-06-29 00:23:08 +00001672secondary_too_late:
1673 ld r5,HSTATE_KVM_VCORE(r13)
1674 HMT_LOW
167513: lbz r3,VCORE_IN_GUEST(r5)
1676 cmpwi r3,0
1677 bne 13b
1678 HMT_MEDIUM
1679 ld r11,PACA_SLBSHADOWPTR(r13)
1680
1681 .rept SLB_NUM_BOLTED
1682 ld r5,SLBSHADOW_SAVEAREA(r11)
1683 ld r6,SLBSHADOW_SAVEAREA+8(r11)
1684 andis. r7,r5,SLB_ESID_V@h
1685 beq 1f
1686 slbmte r6,r5
16871: addi r11,r11,16
1688 .endr
Paul Mackerras371fefd2011-06-29 00:23:08 +00001689
1690secondary_nap:
Paul Mackerras7b444c62012-10-15 01:16:14 +00001691 /* Clear our vcpu pointer so we don't come back in early */
1692 li r0, 0
1693 std r0, HSTATE_KVM_VCPU(r13)
1694 lwsync
Paul Mackerras19ccb762011-07-23 17:42:46 +10001695 /* Clear any pending IPI - assume we're a secondary thread */
1696 ld r5, HSTATE_XICS_PHYS(r13)
1697 li r7, XICS_XIRR
1698 lwzcix r3, r5, r7 /* ack any pending interrupt */
1699 rlwinm. r0, r3, 0, 0xffffff /* any pending? */
1700 beq 37f
1701 sync
Paul Mackerras371fefd2011-06-29 00:23:08 +00001702 li r0, 0xff
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +00001703 li r6, XICS_MFRR
Paul Mackerras19ccb762011-07-23 17:42:46 +10001704 stbcix r0, r5, r6 /* clear the IPI */
1705 stwcix r3, r5, r7 /* EOI it */
170637: sync
Paul Mackerras371fefd2011-06-29 00:23:08 +00001707
1708 /* increment the nap count and then go to nap mode */
1709 ld r4, HSTATE_KVM_VCORE(r13)
1710 addi r4, r4, VCORE_NAP_COUNT
1711 lwsync /* make previous updates visible */
171251: lwarx r3, 0, r4
1713 addi r3, r3, 1
1714 stwcx. r3, 0, r4
1715 bne 51b
Paul Mackerras371fefd2011-06-29 00:23:08 +00001716
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001717kvm_no_guest:
1718 li r0, KVM_HWTHREAD_IN_NAP
1719 stb r0, HSTATE_HWTHREAD_STATE(r13)
Paul Mackerrasf0888f72012-02-03 00:54:17 +00001720
Paul Mackerras19ccb762011-07-23 17:42:46 +10001721 li r3, LPCR_PECE0
Paul Mackerras371fefd2011-06-29 00:23:08 +00001722 mfspr r4, SPRN_LPCR
Paul Mackerras19ccb762011-07-23 17:42:46 +10001723 rlwimi r4, r3, 0, LPCR_PECE0 | LPCR_PECE1
Paul Mackerras371fefd2011-06-29 00:23:08 +00001724 mtspr SPRN_LPCR, r4
Paul Mackerras19ccb762011-07-23 17:42:46 +10001725 isync
Paul Mackerras371fefd2011-06-29 00:23:08 +00001726 std r0, HSTATE_SCRATCH0(r13)
1727 ptesync
1728 ld r0, HSTATE_SCRATCH0(r13)
17291: cmpd r0, r0
1730 bne 1b
1731 nap
1732 b .
1733
Paul Mackerrasde56a942011-06-29 00:21:34 +00001734/*
1735 * Save away FP, VMX and VSX registers.
1736 * r3 = vcpu pointer
Paul Mackerrasa8606e22011-06-29 00:22:05 +00001737 */
Paul Mackerrasde56a942011-06-29 00:21:34 +00001738_GLOBAL(kvmppc_save_fp)
Paul Mackerras89436332012-03-02 01:38:23 +00001739 mfmsr r5
1740 ori r8,r5,MSR_FP
Paul Mackerrasde56a942011-06-29 00:21:34 +00001741#ifdef CONFIG_ALTIVEC
1742BEGIN_FTR_SECTION
1743 oris r8,r8,MSR_VEC@h
1744END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1745#endif
1746#ifdef CONFIG_VSX
1747BEGIN_FTR_SECTION
1748 oris r8,r8,MSR_VSX@h
1749END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1750#endif
1751 mtmsrd r8
1752 isync
1753#ifdef CONFIG_VSX
1754BEGIN_FTR_SECTION
1755 reg = 0
1756 .rept 32
1757 li r6,reg*16+VCPU_VSRS
Michael Neulingc75df6f2012-06-25 13:33:10 +00001758 STXVD2X(reg,R6,R3)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001759 reg = reg + 1
1760 .endr
1761FTR_SECTION_ELSE
1762#endif
1763 reg = 0
1764 .rept 32
1765 stfd reg,reg*8+VCPU_FPRS(r3)
1766 reg = reg + 1
1767 .endr
1768#ifdef CONFIG_VSX
1769ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1770#endif
1771 mffs fr0
1772 stfd fr0,VCPU_FPSCR(r3)
1773
1774#ifdef CONFIG_ALTIVEC
1775BEGIN_FTR_SECTION
1776 reg = 0
1777 .rept 32
1778 li r6,reg*16+VCPU_VRS
1779 stvx reg,r6,r3
1780 reg = reg + 1
1781 .endr
1782 mfvscr vr0
1783 li r6,VCPU_VSCR
1784 stvx vr0,r6,r3
1785END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1786#endif
1787 mfspr r6,SPRN_VRSAVE
1788 stw r6,VCPU_VRSAVE(r3)
Paul Mackerras89436332012-03-02 01:38:23 +00001789 mtmsrd r5
Paul Mackerrasde56a942011-06-29 00:21:34 +00001790 isync
1791 blr
1792
1793/*
1794 * Load up FP, VMX and VSX registers
1795 * r4 = vcpu pointer
1796 */
1797 .globl kvmppc_load_fp
1798kvmppc_load_fp:
1799 mfmsr r9
1800 ori r8,r9,MSR_FP
1801#ifdef CONFIG_ALTIVEC
1802BEGIN_FTR_SECTION
1803 oris r8,r8,MSR_VEC@h
1804END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1805#endif
1806#ifdef CONFIG_VSX
1807BEGIN_FTR_SECTION
1808 oris r8,r8,MSR_VSX@h
1809END_FTR_SECTION_IFSET(CPU_FTR_VSX)
1810#endif
1811 mtmsrd r8
1812 isync
1813 lfd fr0,VCPU_FPSCR(r4)
1814 MTFSF_L(fr0)
1815#ifdef CONFIG_VSX
1816BEGIN_FTR_SECTION
1817 reg = 0
1818 .rept 32
1819 li r7,reg*16+VCPU_VSRS
Michael Neulingc75df6f2012-06-25 13:33:10 +00001820 LXVD2X(reg,R7,R4)
Paul Mackerrasde56a942011-06-29 00:21:34 +00001821 reg = reg + 1
1822 .endr
1823FTR_SECTION_ELSE
1824#endif
1825 reg = 0
1826 .rept 32
1827 lfd reg,reg*8+VCPU_FPRS(r4)
1828 reg = reg + 1
1829 .endr
1830#ifdef CONFIG_VSX
1831ALT_FTR_SECTION_END_IFSET(CPU_FTR_VSX)
1832#endif
1833
1834#ifdef CONFIG_ALTIVEC
1835BEGIN_FTR_SECTION
1836 li r7,VCPU_VSCR
1837 lvx vr0,r7,r4
1838 mtvscr vr0
1839 reg = 0
1840 .rept 32
1841 li r7,reg*16+VCPU_VRS
1842 lvx reg,r7,r4
1843 reg = reg + 1
1844 .endr
1845END_FTR_SECTION_IFSET(CPU_FTR_ALTIVEC)
1846#endif
1847 lwz r7,VCPU_VRSAVE(r4)
1848 mtspr SPRN_VRSAVE,r7
1849 blr