blob: 55eddf50d1498020ba7f17216c689ab89b84f3c7 [file] [log] [blame]
Greg Kroah-Hartmanb2441312017-11-01 15:07:57 +01001/* SPDX-License-Identifier: GPL-2.0 */
Deepthi Dharwar212bebb2013-08-22 15:23:52 +05302#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
3#define _ASM_POWERPC_PLPAR_WRAPPERS_H
Michael Ellermana1218722005-11-03 15:33:31 +11004
Paul Gortmaker614f15b2011-07-22 18:04:33 -04005#include <linux/string.h>
Li Zhongfb912562012-10-17 21:30:13 +00006#include <linux/irqflags.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04007
Michael Ellermana1218722005-11-03 15:33:31 +11008#include <asm/hvcall.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04009#include <asm/paca.h>
Brian King370e4582008-08-16 05:09:33 +100010#include <asm/page.h>
Michael Ellermana1218722005-11-03 15:33:31 +110011
Michael Neulingf8b67692010-04-28 13:39:41 +000012/* Get state of physical CPU from query_cpu_stopped */
13int smp_query_cpu_stopped(unsigned int pcpu);
14#define QCSS_STOPPED 0
15#define QCSS_STOPPING 1
16#define QCSS_NOT_STOPPED 2
17#define QCSS_HARDWARE_ERROR -1
18#define QCSS_HARDWARE_BUSY -2
19
Michael Ellermana1218722005-11-03 15:33:31 +110020static inline long poll_pending(void)
21{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100022 return plpar_hcall_norets(H_POLL_PENDING);
Michael Ellermana1218722005-11-03 15:33:31 +110023}
24
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000025static inline u8 get_cede_latency_hint(void)
26{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000027 return get_lppaca()->cede_latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000028}
29
30static inline void set_cede_latency_hint(u8 latency_hint)
31{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000032 get_lppaca()->cede_latency_hint = latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000033}
34
Michael Ellermana1218722005-11-03 15:33:31 +110035static inline long cede_processor(void)
36{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100037 return plpar_hcall_norets(H_CEDE);
Michael Ellermana1218722005-11-03 15:33:31 +110038}
39
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000040static inline long extended_cede_processor(unsigned long latency_hint)
41{
42 long rc;
43 u8 old_latency_hint = get_cede_latency_hint();
44
45 set_cede_latency_hint(latency_hint);
Li Zhongfb912562012-10-17 21:30:13 +000046
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000047 rc = cede_processor();
Li Zhongfb912562012-10-17 21:30:13 +000048#ifdef CONFIG_TRACE_IRQFLAGS
49 /* Ensure that H_CEDE returns with IRQs on */
50 if (WARN_ON(!(mfmsr() & MSR_EE)))
51 __hard_irq_enable();
52#endif
53
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000054 set_cede_latency_hint(old_latency_hint);
55
56 return rc;
57}
58
Michael Ellerman40765d22005-11-03 19:34:38 +110059static inline long vpa_call(unsigned long flags, unsigned long cpu,
Michael Ellermana1218722005-11-03 15:33:31 +110060 unsigned long vpa)
61{
Li Zhongbb18b3a2013-01-24 22:12:21 +000062 flags = flags << H_VPA_FUNC_SHIFT;
Michael Ellerman40765d22005-11-03 19:34:38 +110063
64 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
Michael Ellermana1218722005-11-03 15:33:31 +110065}
66
Anton Blanchard598c8232011-07-25 01:46:34 +000067static inline long unregister_vpa(unsigned long cpu)
Michael Ellerman40765d22005-11-03 19:34:38 +110068{
Li Zhongbb18b3a2013-01-24 22:12:21 +000069 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
Michael Ellerman40765d22005-11-03 19:34:38 +110070}
71
72static inline long register_vpa(unsigned long cpu, unsigned long vpa)
73{
Li Zhongbb18b3a2013-01-24 22:12:21 +000074 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
Michael Ellerman40765d22005-11-03 19:34:38 +110075}
76
Anton Blanchard598c8232011-07-25 01:46:34 +000077static inline long unregister_slb_shadow(unsigned long cpu)
Michael Neuling2f6093c2006-08-07 16:19:19 +100078{
Li Zhongbb18b3a2013-01-24 22:12:21 +000079 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
Michael Neuling2f6093c2006-08-07 16:19:19 +100080}
81
82static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
83{
Li Zhongbb18b3a2013-01-24 22:12:21 +000084 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
Michael Neuling2f6093c2006-08-07 16:19:19 +100085}
86
Anton Blanchardb1301792011-07-25 01:46:32 +000087static inline long unregister_dtl(unsigned long cpu)
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000088{
Li Zhongbb18b3a2013-01-24 22:12:21 +000089 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000090}
91
92static inline long register_dtl(unsigned long cpu, unsigned long vpa)
93{
Li Zhongbb18b3a2013-01-24 22:12:21 +000094 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000095}
96
Michael Ellerman40765d22005-11-03 19:34:38 +110097extern void vpa_init(int cpu);
Michael Ellermana1218722005-11-03 15:33:31 +110098
Anton Blanchardb9377ff2006-07-19 08:01:28 +100099static inline long plpar_pte_enter(unsigned long flags,
100 unsigned long hpte_group, unsigned long hpte_v,
101 unsigned long hpte_r, unsigned long *slot)
102{
103 long rc;
104 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
105
106 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
107
108 *slot = retbuf[0];
109
110 return rc;
111}
112
Michael Ellermana1218722005-11-03 15:33:31 +1100113static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
114 unsigned long avpn, unsigned long *old_pteh_ret,
115 unsigned long *old_ptel_ret)
116{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000117 long rc;
118 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
119
120 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
121
122 *old_pteh_ret = retbuf[0];
123 *old_ptel_ret = retbuf[1];
124
125 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100126}
127
Mohan Kumar Mb4aea362007-03-21 11:21:32 +0530128/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
129static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
130 unsigned long avpn, unsigned long *old_pteh_ret,
131 unsigned long *old_ptel_ret)
132{
133 long rc;
134 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
135
136 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
137
138 *old_pteh_ret = retbuf[0];
139 *old_ptel_ret = retbuf[1];
140
141 return rc;
142}
143
Michael Ellermana1218722005-11-03 15:33:31 +1100144static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
145 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
146{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000147 long rc;
148 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
149
150 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
151
152 *old_pteh_ret = retbuf[0];
153 *old_ptel_ret = retbuf[1];
154
155 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100156}
157
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000158/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
159static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
160 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
161{
162 long rc;
163 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
164
165 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
166
167 *old_pteh_ret = retbuf[0];
168 *old_ptel_ret = retbuf[1];
169
170 return rc;
171}
172
Michael Neulingf90ece22010-05-10 20:28:26 +0000173/*
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530174 * ptes must be 8*sizeof(unsigned long)
175 */
176static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
177 unsigned long *ptes)
178
179{
180 long rc;
181 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
182
183 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
184
185 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
186
187 return rc;
188}
189
190/*
Michael Neulingf90ece22010-05-10 20:28:26 +0000191 * plpar_pte_read_4_raw can be called in real mode.
192 * ptes must be 8*sizeof(unsigned long)
193 */
194static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
195 unsigned long *ptes)
196
197{
198 long rc;
199 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
200
201 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
202
203 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
204
205 return rc;
206}
207
Michael Ellermana1218722005-11-03 15:33:31 +1100208static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
209 unsigned long avpn)
210{
211 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
212}
213
David Gibson64b40ff2016-12-09 11:07:35 +1100214static inline long plpar_resize_hpt_prepare(unsigned long flags,
215 unsigned long shift)
216{
217 return plpar_hcall_norets(H_RESIZE_HPT_PREPARE, flags, shift);
218}
219
220static inline long plpar_resize_hpt_commit(unsigned long flags,
221 unsigned long shift)
222{
223 return plpar_hcall_norets(H_RESIZE_HPT_COMMIT, flags, shift);
224}
225
Michael Ellermana1218722005-11-03 15:33:31 +1100226static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
227 unsigned long *tce_ret)
228{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000229 long rc;
230 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
231
232 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
233
234 *tce_ret = retbuf[0];
235
236 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100237}
238
239static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
240 unsigned long tceval)
241{
242 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
243}
244
245static inline long plpar_tce_put_indirect(unsigned long liobn,
246 unsigned long ioba, unsigned long page, unsigned long count)
247{
248 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
249}
250
251static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
252 unsigned long tceval, unsigned long count)
253{
254 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
255}
256
Ian Munsied8f48ec2012-11-06 16:15:17 +1100257/* Set various resource mode parameters */
258static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
259 unsigned long value1, unsigned long value2)
260{
261 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
262}
Ian Munsie798042d2012-11-08 15:57:04 +1100263
264/*
265 * Enable relocation on exceptions on this partition
266 *
267 * Note: this call has a partition wide scope and can take a while to complete.
268 * If it returns H_LONG_BUSY_* it should be retried periodically until it
269 * returns H_SUCCESS.
270 */
271static inline long enable_reloc_on_exceptions(void)
272{
273 /* mflags = 3: Exceptions at 0xC000000000004000 */
Michael Neuling60666de2014-05-29 17:45:47 +1000274 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100275}
276
277/*
278 * Disable relocation on exceptions on this partition
279 *
280 * Note: this call has a partition wide scope and can take a while to complete.
281 * If it returns H_LONG_BUSY_* it should be retried periodically until it
282 * returns H_SUCCESS.
283 */
284static inline long disable_reloc_on_exceptions(void) {
Michael Neuling60666de2014-05-29 17:45:47 +1000285 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100286}
287
Anton Blancharde844b1e2013-11-20 22:14:59 +1100288/*
289 * Take exceptions in big endian mode on this partition
290 *
291 * Note: this call has a partition wide scope and can take a while to complete.
292 * If it returns H_LONG_BUSY_* it should be retried periodically until it
293 * returns H_SUCCESS.
294 */
295static inline long enable_big_endian_exceptions(void)
296{
297 /* mflags = 0: big endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000298 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100299}
300
301/*
302 * Take exceptions in little endian mode on this partition
303 *
304 * Note: this call has a partition wide scope and can take a while to complete.
305 * If it returns H_LONG_BUSY_* it should be retried periodically until it
306 * returns H_SUCCESS.
307 */
308static inline long enable_little_endian_exceptions(void)
309{
310 /* mflags = 1: little endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000311 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100312}
313
Ian Munsie376a8642012-12-20 14:06:41 +0000314static inline long plapr_set_ciabr(unsigned long ciabr)
315{
Michael Neuling60666de2014-05-29 17:45:47 +1000316 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
Ian Munsie376a8642012-12-20 14:06:41 +0000317}
318
319static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
320{
Michael Neuling60666de2014-05-29 17:45:47 +1000321 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
Ian Munsie376a8642012-12-20 14:06:41 +0000322}
323
Nicholas Piggin53ce2992016-11-08 17:08:06 +1100324static inline long plapr_signal_sys_reset(long cpu)
325{
326 return plpar_hcall_norets(H_SIGNAL_SYS_RESET, cpu);
327}
328
Michael Neuling191eccb2018-01-09 03:52:05 +1100329static inline long plpar_get_cpu_characteristics(struct h_cpu_char_result *p)
330{
331 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
332 long rc;
333
334 rc = plpar_hcall(H_GET_CPU_CHARACTERISTICS, retbuf);
335 if (rc == H_SUCCESS) {
336 p->character = retbuf[0];
337 p->behaviour = retbuf[1];
338 }
339
340 return rc;
341}
342
Deepthi Dharwar212bebb2013-08-22 15:23:52 +0530343#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */