blob: cff5a411e59545453ec580ece9475ea562556f77 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Deepthi Dharwar212bebb2013-08-22 15:23:52 +05302#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3#define _ASM_POWERPC_PLPAR_WRAPPERS_H
Michael Ellermana1218722005-11-03 15:33:31 +11004
Michael Ellerman5017e872018-03-08 13:54:40 +11005#ifdef CONFIG_PPC_PSERIES
6
Paul Gortmaker614f15b2011-07-22 18:04:33 -04007#include <linux/string.h>
Li Zhongfb912562012-10-17 21:30:13 +00008#include <linux/irqflags.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04009
Michael Ellermana1218722005-11-03 15:33:31 +110010#include <asm/hvcall.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -040011#include <asm/paca.h>
Brian King370e4582008-08-16 05:09:33 +100012#include <asm/page.h>
Michael Ellermana1218722005-11-03 15:33:31 +110013
14static inline long poll_pending(void)
15{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100016 return plpar_hcall_norets(H_POLL_PENDING);
Michael Ellermana1218722005-11-03 15:33:31 +110017}
18
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000019static inline u8 get_cede_latency_hint(void)
20{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000021 return get_lppaca()->cede_latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000022}
23
24static inline void set_cede_latency_hint(u8 latency_hint)
25{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000026 get_lppaca()->cede_latency_hint = latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000027}
28
Michael Ellermana1218722005-11-03 15:33:31 +110029static inline long cede_processor(void)
30{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100031 return plpar_hcall_norets(H_CEDE);
Michael Ellermana1218722005-11-03 15:33:31 +110032}
33
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000034static inline long extended_cede_processor(unsigned long latency_hint)
35{
36 long rc;
37 u8 old_latency_hint = get_cede_latency_hint();
38
39 set_cede_latency_hint(latency_hint);
Li Zhongfb912562012-10-17 21:30:13 +000040
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000041 rc = cede_processor();
Nicholas Piggin9f4b61b2018-05-05 03:19:26 +100042#ifdef CONFIG_PPC_IRQ_SOFT_MASK_DEBUG
43 /* Ensure that H_CEDE returns with IRQs on */
44 if (WARN_ON(!(mfmsr() & MSR_EE)))
45 __hard_irq_enable();
Li Zhongfb912562012-10-17 21:30:13 +000046#endif
47
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000048 set_cede_latency_hint(old_latency_hint);
49
50 return rc;
51}
52
Michael Ellerman40765d22005-11-03 19:34:38 +110053static inline long vpa_call(unsigned long flags, unsigned long cpu,
Michael Ellermana1218722005-11-03 15:33:31 +110054 unsigned long vpa)
55{
Li Zhongbb18b3a2013-01-24 22:12:21 +000056 flags = flags << H_VPA_FUNC_SHIFT;
Michael Ellerman40765d22005-11-03 19:34:38 +110057
58 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
Michael Ellermana1218722005-11-03 15:33:31 +110059}
60
Anton Blanchard598c8232011-07-25 01:46:34 +000061static inline long unregister_vpa(unsigned long cpu)
Michael Ellerman40765d22005-11-03 19:34:38 +110062{
Li Zhongbb18b3a2013-01-24 22:12:21 +000063 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
Michael Ellerman40765d22005-11-03 19:34:38 +110064}
65
66static inline long register_vpa(unsigned long cpu, unsigned long vpa)
67{
Li Zhongbb18b3a2013-01-24 22:12:21 +000068 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
Michael Ellerman40765d22005-11-03 19:34:38 +110069}
70
Anton Blanchard598c8232011-07-25 01:46:34 +000071static inline long unregister_slb_shadow(unsigned long cpu)
Michael Neuling2f6093c2006-08-07 16:19:19 +100072{
Li Zhongbb18b3a2013-01-24 22:12:21 +000073 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
Michael Neuling2f6093c2006-08-07 16:19:19 +100074}
75
76static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
77{
Li Zhongbb18b3a2013-01-24 22:12:21 +000078 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
Michael Neuling2f6093c2006-08-07 16:19:19 +100079}
80
Anton Blanchardb1301792011-07-25 01:46:32 +000081static inline long unregister_dtl(unsigned long cpu)
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000082{
Li Zhongbb18b3a2013-01-24 22:12:21 +000083 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000084}
85
86static inline long register_dtl(unsigned long cpu, unsigned long vpa)
87{
Li Zhongbb18b3a2013-01-24 22:12:21 +000088 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000089}
90
Michael Ellerman40765d22005-11-03 19:34:38 +110091extern void vpa_init(int cpu);
Michael Ellermana1218722005-11-03 15:33:31 +110092
Anton Blanchardb9377ff2006-07-19 08:01:28 +100093static inline long plpar_pte_enter(unsigned long flags,
94 unsigned long hpte_group, unsigned long hpte_v,
95 unsigned long hpte_r, unsigned long *slot)
96{
97 long rc;
98 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
99
100 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
101
102 *slot = retbuf[0];
103
104 return rc;
105}
106
Michael Ellermana1218722005-11-03 15:33:31 +1100107static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
108 unsigned long avpn, unsigned long *old_pteh_ret,
109 unsigned long *old_ptel_ret)
110{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000111 long rc;
112 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
113
114 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
115
116 *old_pteh_ret = retbuf[0];
117 *old_ptel_ret = retbuf[1];
118
119 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100120}
121
Mohan Kumar Mb4aea362007-03-21 11:21:32 +0530122/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
123static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
124 unsigned long avpn, unsigned long *old_pteh_ret,
125 unsigned long *old_ptel_ret)
126{
127 long rc;
128 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
129
130 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
131
132 *old_pteh_ret = retbuf[0];
133 *old_ptel_ret = retbuf[1];
134
135 return rc;
136}
137
Michael Ellermana1218722005-11-03 15:33:31 +1100138static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
139 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
140{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000141 long rc;
142 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
143
144 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
145
146 *old_pteh_ret = retbuf[0];
147 *old_ptel_ret = retbuf[1];
148
149 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100150}
151
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000152/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
153static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
154 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
155{
156 long rc;
157 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
158
159 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
160
161 *old_pteh_ret = retbuf[0];
162 *old_ptel_ret = retbuf[1];
163
164 return rc;
165}
166
Michael Neulingf90ece22010-05-10 20:28:26 +0000167/*
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530168 * ptes must be 8*sizeof(unsigned long)
169 */
170static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
171 unsigned long *ptes)
172
173{
174 long rc;
175 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
176
177 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
178
179 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
180
181 return rc;
182}
183
184/*
Michael Neulingf90ece22010-05-10 20:28:26 +0000185 * plpar_pte_read_4_raw can be called in real mode.
186 * ptes must be 8*sizeof(unsigned long)
187 */
188static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
189 unsigned long *ptes)
190
191{
192 long rc;
193 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
194
195 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
196
197 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
198
199 return rc;
200}
201
Michael Ellermana1218722005-11-03 15:33:31 +1100202static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
203 unsigned long avpn)
204{
205 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
206}
207
David Gibson64b40ff2016-12-09 11:07:35 +1100208static inline long plpar_resize_hpt_prepare(unsigned long flags,
209 unsigned long shift)
210{
211 return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
212}
213
214static inline long plpar_resize_hpt_commit(unsigned long flags,
215 unsigned long shift)
216{
217 return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
218}
219
Michael Ellermana1218722005-11-03 15:33:31 +1100220static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
221 unsigned long *tce_ret)
222{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000223 long rc;
224 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
225
226 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
227
228 *tce_ret = retbuf[0];
229
230 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100231}
232
233static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
234 unsigned long tceval)
235{
236 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
237}
238
239static inline long plpar_tce_put_indirect(unsigned long liobn,
240 unsigned long ioba, unsigned long page, unsigned long count)
241{
242 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
243}
244
245static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
246 unsigned long tceval, unsigned long count)
247{
248 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
249}
250
Ian Munsied8f48ec2012-11-06 16:15:17 +1100251/* Set various resource mode parameters */
252static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
253 unsigned long value1, unsigned long value2)
254{
255 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
256}
Ian Munsie798042d2012-11-08 15:57:04 +1100257
258/*
259 * Enable relocation on exceptions on this partition
260 *
261 * Note: this call has a partition wide scope and can take a while to complete.
262 * If it returns H_LONG_BUSY_* it should be retried periodically until it
263 * returns H_SUCCESS.
264 */
265static inline long enable_reloc_on_exceptions(void)
266{
267 /* mflags = 3: Exceptions at 0xC000000000004000 */
Michael Neuling60666de2014-05-29 17:45:47 +1000268 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100269}
270
271/*
272 * Disable relocation on exceptions on this partition
273 *
274 * Note: this call has a partition wide scope and can take a while to complete.
275 * If it returns H_LONG_BUSY_* it should be retried periodically until it
276 * returns H_SUCCESS.
277 */
278static inline long disable_reloc_on_exceptions(void) {
Michael Neuling60666de2014-05-29 17:45:47 +1000279 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100280}
281
Anton Blancharde844b1e2013-11-20 22:14:59 +1100282/*
283 * Take exceptions in big endian mode on this partition
284 *
285 * Note: this call has a partition wide scope and can take a while to complete.
286 * If it returns H_LONG_BUSY_* it should be retried periodically until it
287 * returns H_SUCCESS.
288 */
289static inline long enable_big_endian_exceptions(void)
290{
291 /* mflags = 0: big endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000292 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100293}
294
295/*
296 * Take exceptions in little endian mode on this partition
297 *
298 * Note: this call has a partition wide scope and can take a while to complete.
299 * If it returns H_LONG_BUSY_* it should be retried periodically until it
300 * returns H_SUCCESS.
301 */
302static inline long enable_little_endian_exceptions(void)
303{
304 /* mflags = 1: little endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000305 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100306}
307
Michael Ellerman7c09c182018-03-08 13:54:41 +1100308static inline long plpar_set_ciabr(unsigned long ciabr)
Ian Munsie376a8642012-12-20 14:06:41 +0000309{
Michael Neuling60666de2014-05-29 17:45:47 +1000310 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
Ian Munsie376a8642012-12-20 14:06:41 +0000311}
312
Michael Ellerman7c09c182018-03-08 13:54:41 +1100313static inline long plpar_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
Ian Munsie376a8642012-12-20 14:06:41 +0000314{
Michael Neuling60666de2014-05-29 17:45:47 +1000315 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
Ian Munsie376a8642012-12-20 14:06:41 +0000316}
317
Michael Ellerman7c09c182018-03-08 13:54:41 +1100318static inline long plpar_signal_sys_reset(long cpu)
Nicholas Piggin53ce2992016-11-08 17:08:06 +1100319{
320 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
321}
322
Michael Neuling191eccb2018-01-09 03:52:05 +1100323static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
324{
325 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
326 long rc;
327
328 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
329 if (rc == H_SUCCESS) {
330 p->character = retbuf[0];
331 p->behaviour = retbuf[1];
332 }
333
334 return rc;
335}
336
Michael Ellermanab83dc72018-03-08 13:54:42 +1100337#else /* !CONFIG_PPC_PSERIES */
338
339static inline long plpar_set_ciabr(unsigned long ciabr)
340{
341 return 0;
342}
Michael Ellerman5017e872018-03-08 13:54:40 +1100343#endif /* CONFIG_PPC_PSERIES */
344
Deepthi Dharwar212bebb2013-08-22 15:23:52 +0530345#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */