blob: 1b394247afc2dc86c76a26c9b5ed0274ae0de536 [file] [log] [blame]
Deepthi Dharwar212bebb2013-08-22 15:23:52 +05301#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
2#define _ASM_POWERPC_PLPAR_WRAPPERS_H
Michael Ellermana1218722005-11-03 15:33:31 +11003
Paul Gortmaker614f15b2011-07-22 18:04:33 -04004#include <linux/string.h>
Li Zhongfb912562012-10-17 21:30:13 +00005#include <linux/irqflags.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04006
Michael Ellermana1218722005-11-03 15:33:31 +11007#include <asm/hvcall.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04008#include <asm/paca.h>
Brian King370e4582008-08-16 05:09:33 +10009#include <asm/page.h>
Michael Ellermana1218722005-11-03 15:33:31 +110010
Michael Neulingf8b67692010-04-28 13:39:41 +000011/* Get state of physical CPU from query_cpu_stopped */
12int smp_query_cpu_stopped(unsigned int pcpu);
13#define QCSS_STOPPED 0
14#define QCSS_STOPPING 1
15#define QCSS_NOT_STOPPED 2
16#define QCSS_HARDWARE_ERROR -1
17#define QCSS_HARDWARE_BUSY -2
18
Michael Ellermana1218722005-11-03 15:33:31 +110019static inline long poll_pending(void)
20{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100021 return plpar_hcall_norets(H_POLL_PENDING);
Michael Ellermana1218722005-11-03 15:33:31 +110022}
23
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000024static inline u8 get_cede_latency_hint(void)
25{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000026 return get_lppaca()->cede_latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000027}
28
29static inline void set_cede_latency_hint(u8 latency_hint)
30{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000031 get_lppaca()->cede_latency_hint = latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000032}
33
Michael Ellermana1218722005-11-03 15:33:31 +110034static inline long cede_processor(void)
35{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100036 return plpar_hcall_norets(H_CEDE);
Michael Ellermana1218722005-11-03 15:33:31 +110037}
38
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000039static inline long extended_cede_processor(unsigned long latency_hint)
40{
41 long rc;
42 u8 old_latency_hint = get_cede_latency_hint();
43
44 set_cede_latency_hint(latency_hint);
Li Zhongfb912562012-10-17 21:30:13 +000045
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000046 rc = cede_processor();
Li Zhongfb912562012-10-17 21:30:13 +000047#ifdef CONFIG_TRACE_IRQFLAGS
48 /* Ensure that H_CEDE returns with IRQs on */
49 if (WARN_ON(!(mfmsr() & MSR_EE)))
50 __hard_irq_enable();
51#endif
52
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000053 set_cede_latency_hint(old_latency_hint);
54
55 return rc;
56}
57
Michael Ellerman40765d22005-11-03 19:34:38 +110058static inline long vpa_call(unsigned long flags, unsigned long cpu,
Michael Ellermana1218722005-11-03 15:33:31 +110059 unsigned long vpa)
60{
Li Zhongbb18b3a2013-01-24 22:12:21 +000061 flags = flags << H_VPA_FUNC_SHIFT;
Michael Ellerman40765d22005-11-03 19:34:38 +110062
63 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
Michael Ellermana1218722005-11-03 15:33:31 +110064}
65
Anton Blanchard598c8232011-07-25 01:46:34 +000066static inline long unregister_vpa(unsigned long cpu)
Michael Ellerman40765d22005-11-03 19:34:38 +110067{
Li Zhongbb18b3a2013-01-24 22:12:21 +000068 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
Michael Ellerman40765d22005-11-03 19:34:38 +110069}
70
71static inline long register_vpa(unsigned long cpu, unsigned long vpa)
72{
Li Zhongbb18b3a2013-01-24 22:12:21 +000073 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
Michael Ellerman40765d22005-11-03 19:34:38 +110074}
75
Anton Blanchard598c8232011-07-25 01:46:34 +000076static inline long unregister_slb_shadow(unsigned long cpu)
Michael Neuling2f6093c2006-08-07 16:19:19 +100077{
Li Zhongbb18b3a2013-01-24 22:12:21 +000078 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
Michael Neuling2f6093c2006-08-07 16:19:19 +100079}
80
81static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
82{
Li Zhongbb18b3a2013-01-24 22:12:21 +000083 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
Michael Neuling2f6093c2006-08-07 16:19:19 +100084}
85
Anton Blanchardb1301792011-07-25 01:46:32 +000086static inline long unregister_dtl(unsigned long cpu)
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000087{
Li Zhongbb18b3a2013-01-24 22:12:21 +000088 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000089}
90
91static inline long register_dtl(unsigned long cpu, unsigned long vpa)
92{
Li Zhongbb18b3a2013-01-24 22:12:21 +000093 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000094}
95
Brian King86630a32008-07-24 04:29:16 +100096static inline long plpar_page_set_loaned(unsigned long vpa)
97{
Brian King370e4582008-08-16 05:09:33 +100098 unsigned long cmo_page_sz = cmo_get_page_size();
99 long rc = 0;
100 int i;
101
102 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
103 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
104
105 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
106 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
107 vpa + i - cmo_page_sz, 0);
108
109 return rc;
Brian King86630a32008-07-24 04:29:16 +1000110}
111
112static inline long plpar_page_set_active(unsigned long vpa)
113{
Brian King370e4582008-08-16 05:09:33 +1000114 unsigned long cmo_page_sz = cmo_get_page_size();
115 long rc = 0;
116 int i;
117
118 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
119 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
120
121 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
122 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
123 vpa + i - cmo_page_sz, 0);
124
125 return rc;
Brian King86630a32008-07-24 04:29:16 +1000126}
127
Michael Ellerman40765d22005-11-03 19:34:38 +1100128extern void vpa_init(int cpu);
Michael Ellermana1218722005-11-03 15:33:31 +1100129
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000130static inline long plpar_pte_enter(unsigned long flags,
131 unsigned long hpte_group, unsigned long hpte_v,
132 unsigned long hpte_r, unsigned long *slot)
133{
134 long rc;
135 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
136
137 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
138
139 *slot = retbuf[0];
140
141 return rc;
142}
143
Michael Ellermana1218722005-11-03 15:33:31 +1100144static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
145 unsigned long avpn, unsigned long *old_pteh_ret,
146 unsigned long *old_ptel_ret)
147{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000148 long rc;
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
150
151 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
152
153 *old_pteh_ret = retbuf[0];
154 *old_ptel_ret = retbuf[1];
155
156 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100157}
158
Mohan Kumar Mb4aea362007-03-21 11:21:32 +0530159/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
160static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
161 unsigned long avpn, unsigned long *old_pteh_ret,
162 unsigned long *old_ptel_ret)
163{
164 long rc;
165 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
166
167 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
168
169 *old_pteh_ret = retbuf[0];
170 *old_ptel_ret = retbuf[1];
171
172 return rc;
173}
174
Michael Ellermana1218722005-11-03 15:33:31 +1100175static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
176 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
177{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000178 long rc;
179 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
180
181 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
182
183 *old_pteh_ret = retbuf[0];
184 *old_ptel_ret = retbuf[1];
185
186 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100187}
188
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000189/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
190static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
191 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
192{
193 long rc;
194 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
195
196 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
197
198 *old_pteh_ret = retbuf[0];
199 *old_ptel_ret = retbuf[1];
200
201 return rc;
202}
203
Michael Neulingf90ece22010-05-10 20:28:26 +0000204/*
Aneesh Kumar K.V4ad90c82015-12-01 09:06:59 +0530205 * ptes must be 8*sizeof(unsigned long)
206 */
207static inline long plpar_pte_read_4(unsigned long flags, unsigned long ptex,
208 unsigned long *ptes)
209
210{
211 long rc;
212 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
213
214 rc = plpar_hcall9(H_READ, retbuf, flags | H_READ_4, ptex);
215
216 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
217
218 return rc;
219}
220
221/*
Michael Neulingf90ece22010-05-10 20:28:26 +0000222 * plpar_pte_read_4_raw can be called in real mode.
223 * ptes must be 8*sizeof(unsigned long)
224 */
225static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
226 unsigned long *ptes)
227
228{
229 long rc;
230 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
231
232 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
233
234 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
235
236 return rc;
237}
238
Michael Ellermana1218722005-11-03 15:33:31 +1100239static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
240 unsigned long avpn)
241{
242 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
243}
244
245static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
246 unsigned long *tce_ret)
247{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000248 long rc;
249 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
250
251 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
252
253 *tce_ret = retbuf[0];
254
255 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100256}
257
258static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
259 unsigned long tceval)
260{
261 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
262}
263
264static inline long plpar_tce_put_indirect(unsigned long liobn,
265 unsigned long ioba, unsigned long page, unsigned long count)
266{
267 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
268}
269
270static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
271 unsigned long tceval, unsigned long count)
272{
273 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
274}
275
Ian Munsied8f48ec2012-11-06 16:15:17 +1100276/* Set various resource mode parameters */
277static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
278 unsigned long value1, unsigned long value2)
279{
280 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
281}
Ian Munsie798042d2012-11-08 15:57:04 +1100282
283/*
284 * Enable relocation on exceptions on this partition
285 *
286 * Note: this call has a partition wide scope and can take a while to complete.
287 * If it returns H_LONG_BUSY_* it should be retried periodically until it
288 * returns H_SUCCESS.
289 */
290static inline long enable_reloc_on_exceptions(void)
291{
292 /* mflags = 3: Exceptions at 0xC000000000004000 */
Michael Neuling60666de2014-05-29 17:45:47 +1000293 return plpar_set_mode(3, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100294}
295
296/*
297 * Disable relocation on exceptions on this partition
298 *
299 * Note: this call has a partition wide scope and can take a while to complete.
300 * If it returns H_LONG_BUSY_* it should be retried periodically until it
301 * returns H_SUCCESS.
302 */
303static inline long disable_reloc_on_exceptions(void) {
Michael Neuling60666de2014-05-29 17:45:47 +1000304 return plpar_set_mode(0, H_SET_MODE_RESOURCE_ADDR_TRANS_MODE, 0, 0);
Ian Munsie798042d2012-11-08 15:57:04 +1100305}
306
Anton Blancharde844b1e2013-11-20 22:14:59 +1100307/*
308 * Take exceptions in big endian mode on this partition
309 *
310 * Note: this call has a partition wide scope and can take a while to complete.
311 * If it returns H_LONG_BUSY_* it should be retried periodically until it
312 * returns H_SUCCESS.
313 */
314static inline long enable_big_endian_exceptions(void)
315{
316 /* mflags = 0: big endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000317 return plpar_set_mode(0, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100318}
319
320/*
321 * Take exceptions in little endian mode on this partition
322 *
323 * Note: this call has a partition wide scope and can take a while to complete.
324 * If it returns H_LONG_BUSY_* it should be retried periodically until it
325 * returns H_SUCCESS.
326 */
327static inline long enable_little_endian_exceptions(void)
328{
329 /* mflags = 1: little endian exceptions */
Michael Neuling60666de2014-05-29 17:45:47 +1000330 return plpar_set_mode(1, H_SET_MODE_RESOURCE_LE, 0, 0);
Anton Blancharde844b1e2013-11-20 22:14:59 +1100331}
332
Ian Munsie376a8642012-12-20 14:06:41 +0000333static inline long plapr_set_ciabr(unsigned long ciabr)
334{
Michael Neuling60666de2014-05-29 17:45:47 +1000335 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_CIABR, ciabr, 0);
Ian Munsie376a8642012-12-20 14:06:41 +0000336}
337
338static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
339{
Michael Neuling60666de2014-05-29 17:45:47 +1000340 return plpar_set_mode(0, H_SET_MODE_RESOURCE_SET_DAWR, dawr0, dawrx0);
Ian Munsie376a8642012-12-20 14:06:41 +0000341}
342
Deepthi Dharwar212bebb2013-08-22 15:23:52 +0530343#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */