blob: 12c32c5f533d924428714301f7482493df00e3c8 [file] [log] [blame]
Deepthi Dharwar212bebb2013-08-22 15:23:52 +05301#ifndef _ASM_POWERPC_PLPAR_WRAPPERS_H
2#define _ASM_POWERPC_PLPAR_WRAPPERS_H
Michael Ellermana1218722005-11-03 15:33:31 +11003
Paul Gortmaker614f15b2011-07-22 18:04:33 -04004#include <linux/string.h>
Li Zhongfb912562012-10-17 21:30:13 +00005#include <linux/irqflags.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04006
Michael Ellermana1218722005-11-03 15:33:31 +11007#include <asm/hvcall.h>
Paul Gortmaker614f15b2011-07-22 18:04:33 -04008#include <asm/paca.h>
Brian King370e4582008-08-16 05:09:33 +10009#include <asm/page.h>
Michael Ellermana1218722005-11-03 15:33:31 +110010
Michael Neulingf8b67692010-04-28 13:39:41 +000011/* Get state of physical CPU from query_cpu_stopped */
12int smp_query_cpu_stopped(unsigned int pcpu);
13#define QCSS_STOPPED 0
14#define QCSS_STOPPING 1
15#define QCSS_NOT_STOPPED 2
16#define QCSS_HARDWARE_ERROR -1
17#define QCSS_HARDWARE_BUSY -2
18
Michael Ellermana1218722005-11-03 15:33:31 +110019static inline long poll_pending(void)
20{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100021 return plpar_hcall_norets(H_POLL_PENDING);
Michael Ellermana1218722005-11-03 15:33:31 +110022}
23
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000024static inline u8 get_cede_latency_hint(void)
25{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000026 return get_lppaca()->cede_latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000027}
28
29static inline void set_cede_latency_hint(u8 latency_hint)
30{
Anton Blanchardcf8a0562012-04-10 16:20:54 +000031 get_lppaca()->cede_latency_hint = latency_hint;
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000032}
33
Michael Ellermana1218722005-11-03 15:33:31 +110034static inline long cede_processor(void)
35{
Anton Blanchardb9377ff2006-07-19 08:01:28 +100036 return plpar_hcall_norets(H_CEDE);
Michael Ellermana1218722005-11-03 15:33:31 +110037}
38
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000039static inline long extended_cede_processor(unsigned long latency_hint)
40{
41 long rc;
42 u8 old_latency_hint = get_cede_latency_hint();
43
44 set_cede_latency_hint(latency_hint);
Li Zhongfb912562012-10-17 21:30:13 +000045
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000046 rc = cede_processor();
Li Zhongfb912562012-10-17 21:30:13 +000047#ifdef CONFIG_TRACE_IRQFLAGS
48 /* Ensure that H_CEDE returns with IRQs on */
49 if (WARN_ON(!(mfmsr() & MSR_EE)))
50 __hard_irq_enable();
51#endif
52
Gautham R Shenoy69ddb572009-10-29 19:22:48 +000053 set_cede_latency_hint(old_latency_hint);
54
55 return rc;
56}
57
Michael Ellerman40765d22005-11-03 19:34:38 +110058static inline long vpa_call(unsigned long flags, unsigned long cpu,
Michael Ellermana1218722005-11-03 15:33:31 +110059 unsigned long vpa)
60{
Li Zhongbb18b3a2013-01-24 22:12:21 +000061 flags = flags << H_VPA_FUNC_SHIFT;
Michael Ellerman40765d22005-11-03 19:34:38 +110062
63 return plpar_hcall_norets(H_REGISTER_VPA, flags, cpu, vpa);
Michael Ellermana1218722005-11-03 15:33:31 +110064}
65
Anton Blanchard598c8232011-07-25 01:46:34 +000066static inline long unregister_vpa(unsigned long cpu)
Michael Ellerman40765d22005-11-03 19:34:38 +110067{
Li Zhongbb18b3a2013-01-24 22:12:21 +000068 return vpa_call(H_VPA_DEREG_VPA, cpu, 0);
Michael Ellerman40765d22005-11-03 19:34:38 +110069}
70
71static inline long register_vpa(unsigned long cpu, unsigned long vpa)
72{
Li Zhongbb18b3a2013-01-24 22:12:21 +000073 return vpa_call(H_VPA_REG_VPA, cpu, vpa);
Michael Ellerman40765d22005-11-03 19:34:38 +110074}
75
Anton Blanchard598c8232011-07-25 01:46:34 +000076static inline long unregister_slb_shadow(unsigned long cpu)
Michael Neuling2f6093c2006-08-07 16:19:19 +100077{
Li Zhongbb18b3a2013-01-24 22:12:21 +000078 return vpa_call(H_VPA_DEREG_SLB, cpu, 0);
Michael Neuling2f6093c2006-08-07 16:19:19 +100079}
80
81static inline long register_slb_shadow(unsigned long cpu, unsigned long vpa)
82{
Li Zhongbb18b3a2013-01-24 22:12:21 +000083 return vpa_call(H_VPA_REG_SLB, cpu, vpa);
Michael Neuling2f6093c2006-08-07 16:19:19 +100084}
85
Anton Blanchardb1301792011-07-25 01:46:32 +000086static inline long unregister_dtl(unsigned long cpu)
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000087{
Li Zhongbb18b3a2013-01-24 22:12:21 +000088 return vpa_call(H_VPA_DEREG_DTL, cpu, 0);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000089}
90
91static inline long register_dtl(unsigned long cpu, unsigned long vpa)
92{
Li Zhongbb18b3a2013-01-24 22:12:21 +000093 return vpa_call(H_VPA_REG_DTL, cpu, vpa);
Jeremy Kerrfc59a3f2009-03-11 17:55:52 +000094}
95
Brian King86630a32008-07-24 04:29:16 +100096static inline long plpar_page_set_loaned(unsigned long vpa)
97{
Brian King370e4582008-08-16 05:09:33 +100098 unsigned long cmo_page_sz = cmo_get_page_size();
99 long rc = 0;
100 int i;
101
102 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
103 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED, vpa + i, 0);
104
105 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
106 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE,
107 vpa + i - cmo_page_sz, 0);
108
109 return rc;
Brian King86630a32008-07-24 04:29:16 +1000110}
111
112static inline long plpar_page_set_active(unsigned long vpa)
113{
Brian King370e4582008-08-16 05:09:33 +1000114 unsigned long cmo_page_sz = cmo_get_page_size();
115 long rc = 0;
116 int i;
117
118 for (i = 0; !rc && i < PAGE_SIZE; i += cmo_page_sz)
119 rc = plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_ACTIVE, vpa + i, 0);
120
121 for (i -= cmo_page_sz; rc && i != 0; i -= cmo_page_sz)
122 plpar_hcall_norets(H_PAGE_INIT, H_PAGE_SET_LOANED,
123 vpa + i - cmo_page_sz, 0);
124
125 return rc;
Brian King86630a32008-07-24 04:29:16 +1000126}
127
Michael Ellerman40765d22005-11-03 19:34:38 +1100128extern void vpa_init(int cpu);
Michael Ellermana1218722005-11-03 15:33:31 +1100129
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000130static inline long plpar_pte_enter(unsigned long flags,
131 unsigned long hpte_group, unsigned long hpte_v,
132 unsigned long hpte_r, unsigned long *slot)
133{
134 long rc;
135 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
136
137 rc = plpar_hcall(H_ENTER, retbuf, flags, hpte_group, hpte_v, hpte_r);
138
139 *slot = retbuf[0];
140
141 return rc;
142}
143
Michael Ellermana1218722005-11-03 15:33:31 +1100144static inline long plpar_pte_remove(unsigned long flags, unsigned long ptex,
145 unsigned long avpn, unsigned long *old_pteh_ret,
146 unsigned long *old_ptel_ret)
147{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000148 long rc;
149 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
150
151 rc = plpar_hcall(H_REMOVE, retbuf, flags, ptex, avpn);
152
153 *old_pteh_ret = retbuf[0];
154 *old_ptel_ret = retbuf[1];
155
156 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100157}
158
Mohan Kumar Mb4aea362007-03-21 11:21:32 +0530159/* plpar_pte_remove_raw can be called in real mode. It calls plpar_hcall_raw */
160static inline long plpar_pte_remove_raw(unsigned long flags, unsigned long ptex,
161 unsigned long avpn, unsigned long *old_pteh_ret,
162 unsigned long *old_ptel_ret)
163{
164 long rc;
165 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
166
167 rc = plpar_hcall_raw(H_REMOVE, retbuf, flags, ptex, avpn);
168
169 *old_pteh_ret = retbuf[0];
170 *old_ptel_ret = retbuf[1];
171
172 return rc;
173}
174
Michael Ellermana1218722005-11-03 15:33:31 +1100175static inline long plpar_pte_read(unsigned long flags, unsigned long ptex,
176 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
177{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000178 long rc;
179 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
180
181 rc = plpar_hcall(H_READ, retbuf, flags, ptex);
182
183 *old_pteh_ret = retbuf[0];
184 *old_ptel_ret = retbuf[1];
185
186 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100187}
188
Sachin P. Santb7abc5c2007-06-14 15:31:34 +1000189/* plpar_pte_read_raw can be called in real mode. It calls plpar_hcall_raw */
190static inline long plpar_pte_read_raw(unsigned long flags, unsigned long ptex,
191 unsigned long *old_pteh_ret, unsigned long *old_ptel_ret)
192{
193 long rc;
194 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
195
196 rc = plpar_hcall_raw(H_READ, retbuf, flags, ptex);
197
198 *old_pteh_ret = retbuf[0];
199 *old_ptel_ret = retbuf[1];
200
201 return rc;
202}
203
Michael Neulingf90ece22010-05-10 20:28:26 +0000204/*
205 * plpar_pte_read_4_raw can be called in real mode.
206 * ptes must be 8*sizeof(unsigned long)
207 */
208static inline long plpar_pte_read_4_raw(unsigned long flags, unsigned long ptex,
209 unsigned long *ptes)
210
211{
212 long rc;
213 unsigned long retbuf[PLPAR_HCALL9_BUFSIZE];
214
215 rc = plpar_hcall9_raw(H_READ, retbuf, flags | H_READ_4, ptex);
216
217 memcpy(ptes, retbuf, 8*sizeof(unsigned long));
218
219 return rc;
220}
221
Michael Ellermana1218722005-11-03 15:33:31 +1100222static inline long plpar_pte_protect(unsigned long flags, unsigned long ptex,
223 unsigned long avpn)
224{
225 return plpar_hcall_norets(H_PROTECT, flags, ptex, avpn);
226}
227
228static inline long plpar_tce_get(unsigned long liobn, unsigned long ioba,
229 unsigned long *tce_ret)
230{
Anton Blanchardb9377ff2006-07-19 08:01:28 +1000231 long rc;
232 unsigned long retbuf[PLPAR_HCALL_BUFSIZE];
233
234 rc = plpar_hcall(H_GET_TCE, retbuf, liobn, ioba);
235
236 *tce_ret = retbuf[0];
237
238 return rc;
Michael Ellermana1218722005-11-03 15:33:31 +1100239}
240
241static inline long plpar_tce_put(unsigned long liobn, unsigned long ioba,
242 unsigned long tceval)
243{
244 return plpar_hcall_norets(H_PUT_TCE, liobn, ioba, tceval);
245}
246
247static inline long plpar_tce_put_indirect(unsigned long liobn,
248 unsigned long ioba, unsigned long page, unsigned long count)
249{
250 return plpar_hcall_norets(H_PUT_TCE_INDIRECT, liobn, ioba, page, count);
251}
252
253static inline long plpar_tce_stuff(unsigned long liobn, unsigned long ioba,
254 unsigned long tceval, unsigned long count)
255{
256 return plpar_hcall_norets(H_STUFF_TCE, liobn, ioba, tceval, count);
257}
258
Ian Munsied8f48ec2012-11-06 16:15:17 +1100259/* Set various resource mode parameters */
260static inline long plpar_set_mode(unsigned long mflags, unsigned long resource,
261 unsigned long value1, unsigned long value2)
262{
263 return plpar_hcall_norets(H_SET_MODE, mflags, resource, value1, value2);
264}
Ian Munsie798042d2012-11-08 15:57:04 +1100265
266/*
267 * Enable relocation on exceptions on this partition
268 *
269 * Note: this call has a partition wide scope and can take a while to complete.
270 * If it returns H_LONG_BUSY_* it should be retried periodically until it
271 * returns H_SUCCESS.
272 */
273static inline long enable_reloc_on_exceptions(void)
274{
275 /* mflags = 3: Exceptions at 0xC000000000004000 */
276 return plpar_set_mode(3, 3, 0, 0);
277}
278
279/*
280 * Disable relocation on exceptions on this partition
281 *
282 * Note: this call has a partition wide scope and can take a while to complete.
283 * If it returns H_LONG_BUSY_* it should be retried periodically until it
284 * returns H_SUCCESS.
285 */
286static inline long disable_reloc_on_exceptions(void) {
287 return plpar_set_mode(0, 3, 0, 0);
288}
289
Anton Blancharde844b1e2013-11-20 22:14:59 +1100290/*
291 * Take exceptions in big endian mode on this partition
292 *
293 * Note: this call has a partition wide scope and can take a while to complete.
294 * If it returns H_LONG_BUSY_* it should be retried periodically until it
295 * returns H_SUCCESS.
296 */
297static inline long enable_big_endian_exceptions(void)
298{
299 /* mflags = 0: big endian exceptions */
300 return plpar_set_mode(0, 4, 0, 0);
301}
302
303/*
304 * Take exceptions in little endian mode on this partition
305 *
306 * Note: this call has a partition wide scope and can take a while to complete.
307 * If it returns H_LONG_BUSY_* it should be retried periodically until it
308 * returns H_SUCCESS.
309 */
310static inline long enable_little_endian_exceptions(void)
311{
312 /* mflags = 1: little endian exceptions */
313 return plpar_set_mode(1, 4, 0, 0);
314}
315
Ian Munsie376a8642012-12-20 14:06:41 +0000316static inline long plapr_set_ciabr(unsigned long ciabr)
317{
318 return plpar_set_mode(0, 1, ciabr, 0);
319}
320
321static inline long plapr_set_watchpoint0(unsigned long dawr0, unsigned long dawrx0)
322{
323 return plpar_set_mode(0, 2, dawr0, dawrx0);
324}
325
Deepthi Dharwar212bebb2013-08-22 15:23:52 +0530326#endif /* _ASM_POWERPC_PLPAR_WRAPPERS_H */