blob: ba97789c41caae697a7c6124671962344641b048 [file] [log] [blame]
Simon Guo009c8722018-05-23 15:01:47 +08001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * Derived from book3s_hv_rmhandlers.S, which is:
12 *
13 * Copyright 2011 Paul Mackerras, IBM Corp. <paulus@au1.ibm.com>
14 *
15 */
16
17#include <asm/reg.h>
18#include <asm/ppc_asm.h>
19#include <asm/asm-offsets.h>
20#include <asm/export.h>
21#include <asm/tm.h>
22#include <asm/cputable.h>
23
24#ifdef CONFIG_PPC_TRANSACTIONAL_MEM
25#define VCPU_GPRS_TM(reg) (((reg) * ULONG_SIZE) + VCPU_GPR_TM)
26
27/*
28 * Save transactional state and TM-related registers.
29 * Called with r9 pointing to the vcpu struct.
30 * This can modify all checkpointed registers, but
31 * restores r1, r2 and r9 (vcpu pointer) before exit.
32 */
33_GLOBAL(kvmppc_save_tm)
34 mflr r0
35 std r0, PPC_LR_STKOFF(r1)
36
37 /* Turn on TM. */
38 mfmsr r8
39 li r0, 1
40 rldimi r8, r0, MSR_TM_LG, 63-MSR_TM_LG
41 mtmsrd r8
42
43#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
44 ld r5, VCPU_MSR(r9)
45 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
46 beq 1f /* TM not active in guest. */
47#endif
48
49 std r1, HSTATE_HOST_R1(r13)
50 li r3, TM_CAUSE_KVM_RESCHED
51
52#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
53BEGIN_FTR_SECTION
54 /* Emulation of the treclaim instruction needs TEXASR before treclaim */
55 mfspr r6, SPRN_TEXASR
56 std r6, VCPU_ORIG_TEXASR(r9)
57END_FTR_SECTION_IFSET(CPU_FTR_P9_TM_HV_ASSIST)
58#endif
59
60 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
61 li r5, 0
62 mtmsrd r5, 1
63
64 /* All GPRs are volatile at this point. */
65 TRECLAIM(R3)
66
67 /* Temporarily store r13 and r9 so we have some regs to play with */
68 SET_SCRATCH0(r13)
69 GET_PACA(r13)
70 std r9, PACATMSCRATCH(r13)
71#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
72 ld r9, HSTATE_KVM_VCPU(r13)
73#endif
74
75 /* Get a few more GPRs free. */
76 std r29, VCPU_GPRS_TM(29)(r9)
77 std r30, VCPU_GPRS_TM(30)(r9)
78 std r31, VCPU_GPRS_TM(31)(r9)
79
80 /* Save away PPR and DSCR soon so don't run with user values. */
81 mfspr r31, SPRN_PPR
82 HMT_MEDIUM
83 mfspr r30, SPRN_DSCR
84#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
85 ld r29, HSTATE_DSCR(r13)
86 mtspr SPRN_DSCR, r29
87#endif
88
89 /* Save all but r9, r13 & r29-r31 */
90 reg = 0
91 .rept 29
92 .if (reg != 9) && (reg != 13)
93 std reg, VCPU_GPRS_TM(reg)(r9)
94 .endif
95 reg = reg + 1
96 .endr
97 /* ... now save r13 */
98 GET_SCRATCH0(r4)
99 std r4, VCPU_GPRS_TM(13)(r9)
100 /* ... and save r9 */
101 ld r4, PACATMSCRATCH(r13)
102 std r4, VCPU_GPRS_TM(9)(r9)
103
104 /* Reload stack pointer and TOC. */
105 ld r1, HSTATE_HOST_R1(r13)
106 ld r2, PACATOC(r13)
107
108 /* Set MSR RI now we have r1 and r13 back. */
109 li r5, MSR_RI
110 mtmsrd r5, 1
111
112 /* Save away checkpinted SPRs. */
113 std r31, VCPU_PPR_TM(r9)
114 std r30, VCPU_DSCR_TM(r9)
115 mflr r5
116 mfcr r6
117 mfctr r7
118 mfspr r8, SPRN_AMR
119 mfspr r10, SPRN_TAR
120 mfxer r11
121 std r5, VCPU_LR_TM(r9)
122 stw r6, VCPU_CR_TM(r9)
123 std r7, VCPU_CTR_TM(r9)
124 std r8, VCPU_AMR_TM(r9)
125 std r10, VCPU_TAR_TM(r9)
126 std r11, VCPU_XER_TM(r9)
127
128 /* Restore r12 as trap number. */
129 lwz r12, VCPU_TRAP(r9)
130
131 /* Save FP/VSX. */
132 addi r3, r9, VCPU_FPRS_TM
133 bl store_fp_state
134 addi r3, r9, VCPU_VRS_TM
135 bl store_vr_state
136 mfspr r6, SPRN_VRSAVE
137 stw r6, VCPU_VRSAVE_TM(r9)
1381:
139 /*
140 * We need to save these SPRs after the treclaim so that the software
141 * error code is recorded correctly in the TEXASR. Also the user may
142 * change these outside of a transaction, so they must always be
143 * context switched.
144 */
145 mfspr r7, SPRN_TEXASR
146 std r7, VCPU_TEXASR(r9)
14711:
148 mfspr r5, SPRN_TFHAR
149 mfspr r6, SPRN_TFIAR
150 std r5, VCPU_TFHAR(r9)
151 std r6, VCPU_TFIAR(r9)
152
153 ld r0, PPC_LR_STKOFF(r1)
154 mtlr r0
155 blr
156
157/*
158 * Restore transactional state and TM-related registers.
159 * Called with r4 pointing to the vcpu struct.
160 * This potentially modifies all checkpointed registers.
161 * It restores r1, r2, r4 from the PACA.
162 */
163_GLOBAL(kvmppc_restore_tm)
164 mflr r0
165 std r0, PPC_LR_STKOFF(r1)
166
167 /* Turn on TM/FP/VSX/VMX so we can restore them. */
168 mfmsr r5
169 li r6, MSR_TM >> 32
170 sldi r6, r6, 32
171 or r5, r5, r6
172 ori r5, r5, MSR_FP
173 oris r5, r5, (MSR_VEC | MSR_VSX)@h
174 mtmsrd r5
175
176 /*
177 * The user may change these outside of a transaction, so they must
178 * always be context switched.
179 */
180 ld r5, VCPU_TFHAR(r4)
181 ld r6, VCPU_TFIAR(r4)
182 ld r7, VCPU_TEXASR(r4)
183 mtspr SPRN_TFHAR, r5
184 mtspr SPRN_TFIAR, r6
185 mtspr SPRN_TEXASR, r7
186
187#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
188 ld r5, VCPU_MSR(r4)
189 rldicl. r5, r5, 64 - MSR_TS_S_LG, 62
190 beqlr /* TM not active in guest */
191#endif
192 std r1, HSTATE_HOST_R1(r13)
193
194 /* Make sure the failure summary is set, otherwise we'll program check
195 * when we trechkpt. It's possible that this might have been not set
196 * on a kvmppc_set_one_reg() call but we shouldn't let this crash the
197 * host.
198 */
199 oris r7, r7, (TEXASR_FS)@h
200 mtspr SPRN_TEXASR, r7
201
202 /*
203 * We need to load up the checkpointed state for the guest.
204 * We need to do this early as it will blow away any GPRs, VSRs and
205 * some SPRs.
206 */
207
208 mr r31, r4
209 addi r3, r31, VCPU_FPRS_TM
210 bl load_fp_state
211 addi r3, r31, VCPU_VRS_TM
212 bl load_vr_state
213 mr r4, r31
214 lwz r7, VCPU_VRSAVE_TM(r4)
215 mtspr SPRN_VRSAVE, r7
216
217 ld r5, VCPU_LR_TM(r4)
218 lwz r6, VCPU_CR_TM(r4)
219 ld r7, VCPU_CTR_TM(r4)
220 ld r8, VCPU_AMR_TM(r4)
221 ld r9, VCPU_TAR_TM(r4)
222 ld r10, VCPU_XER_TM(r4)
223 mtlr r5
224 mtcr r6
225 mtctr r7
226 mtspr SPRN_AMR, r8
227 mtspr SPRN_TAR, r9
228 mtxer r10
229
230 /*
231 * Load up PPR and DSCR values but don't put them in the actual SPRs
232 * till the last moment to avoid running with userspace PPR and DSCR for
233 * too long.
234 */
235 ld r29, VCPU_DSCR_TM(r4)
236 ld r30, VCPU_PPR_TM(r4)
237
238 std r2, PACATMSCRATCH(r13) /* Save TOC */
239
240 /* Clear the MSR RI since r1, r13 are all going to be foobar. */
241 li r5, 0
242 mtmsrd r5, 1
243
244 /* Load GPRs r0-r28 */
245 reg = 0
246 .rept 29
247 ld reg, VCPU_GPRS_TM(reg)(r31)
248 reg = reg + 1
249 .endr
250
251 mtspr SPRN_DSCR, r29
252 mtspr SPRN_PPR, r30
253
254 /* Load final GPRs */
255 ld 29, VCPU_GPRS_TM(29)(r31)
256 ld 30, VCPU_GPRS_TM(30)(r31)
257 ld 31, VCPU_GPRS_TM(31)(r31)
258
259 /* TM checkpointed state is now setup. All GPRs are now volatile. */
260 TRECHKPT
261
262 /* Now let's get back the state we need. */
263 HMT_MEDIUM
264 GET_PACA(r13)
265#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
266 ld r29, HSTATE_DSCR(r13)
267 mtspr SPRN_DSCR, r29
268 ld r4, HSTATE_KVM_VCPU(r13)
269#endif
270 ld r1, HSTATE_HOST_R1(r13)
271 ld r2, PACATMSCRATCH(r13)
272
273 /* Set the MSR RI since we have our registers back. */
274 li r5, MSR_RI
275 mtmsrd r5, 1
276 ld r0, PPC_LR_STKOFF(r1)
277 mtlr r0
278 blr
279#endif /* CONFIG_PPC_TRANSACTIONAL_MEM */