blob: 1a60af9f2fa1a578df805725bd79030ce7d48b7c [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
Paul Mackerrasa136a8b2012-09-25 20:31:56 +000031#include <linux/bug.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010032#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
Alexander Grafc7f38f42010-04-16 00:11:40 +020034#else
35#include <asm/kvm_booke.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010036#endif
Paul Mackerras371fefd2011-06-29 00:23:08 +000037#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050041enum emulation_result {
42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
44 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
45 EMULATE_FAIL, /* can't emulate this instruction */
Alexander Graf37f5bca2010-02-19 11:00:31 +010046 EMULATE_AGAIN, /* something went wrong. go again */
Bharat Bhushanc402a3f2013-04-08 00:32:13 +000047 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050048};
49
Mihai Caraman51f04722014-07-23 19:06:21 +030050enum instruction_type {
51 INST_GENERIC,
52 INST_SC, /* system call */
53};
54
Alexander Graf7d15c06f2014-06-20 13:52:36 +020055enum xlate_instdata {
56 XLATE_INST, /* translate instruction address */
57 XLATE_DATA /* translate data address */
58};
59
60enum xlate_readwrite {
61 XLATE_READ, /* check for read permissions */
62 XLATE_WRITE /* check for write permissions */
63};
64
Paul Mackerrasdf6909e52011-06-29 00:19:50 +000065extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050066extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
Alexander Graf29eb61b2009-10-30 05:47:07 +000067extern void kvmppc_handler_highmem(void);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068
69extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
70extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
71 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010072 int is_default_endian);
Alexander Graf3587d532010-02-19 11:00:30 +010073extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
74 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010075 int is_default_endian);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050076extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +010077 u64 val, unsigned int bytes,
78 int is_default_endian);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050079
Mihai Caraman51f04722014-07-23 19:06:21 +030080extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
81 enum instruction_type type, u32 *inst);
82
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050083extern int kvmppc_emulate_instruction(struct kvm_run *run,
84 struct kvm_vcpu *vcpu);
Hollis Blanchardce263d72008-05-21 18:22:51 -050085extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -060086extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -050087extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
Scott Wooddfd4d472011-11-17 12:39:59 +000088extern void kvmppc_decrementer_func(unsigned long data);
Alexander Grafaf8f38b2011-08-10 13:57:08 +020089extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +000090extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
91extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050092
Hollis Blanchardecc09812009-01-03 16:22:59 -060093/* Core-specific hooks */
94
Hollis Blanchard89168612008-12-02 15:51:53 -060095extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
Hollis Blanchard7924bd42008-12-02 15:51:55 -060096 unsigned int gtlb_idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050097extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
Hollis Blanchard49dd2c42008-07-25 13:54:53 -050098extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
Hollis Blanchardecc09812009-01-03 16:22:59 -060099extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
Alexander Graf9cc5e952010-04-16 00:11:45 +0200100extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600101extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
102extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600103extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
104 gva_t eaddr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600105extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
106extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
Alexander Graf7d15c06f2014-06-20 13:52:36 +0200107extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
108 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
109 struct kvmppc_pte *pte);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600110
Hollis Blancharddb93f572008-11-05 09:36:18 -0600111extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
112 unsigned int id);
113extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600114extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600115extern int kvmppc_core_check_processor_compat(void);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600116extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
117 struct kvm_translation *tr);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600118
119extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
120extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
121
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000122extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600123extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
Alexander Graf25a8a022010-01-08 02:58:07 +0100124extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600125extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
Alexander Graf7706664d2009-12-21 20:21:24 +0100126extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600127extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
128 struct kvm_interrupt *irq);
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000129extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200130extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200131extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -0600132
Hollis Blancharddb93f572008-11-05 09:36:18 -0600133extern int kvmppc_booke_init(void);
134extern void kvmppc_booke_exit(void);
135
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600136extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
Alexander Graf2a342ed2010-07-29 14:47:48 +0200137extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
Scott Wooda4cd8b22011-06-14 18:34:41 -0500138extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600139
Paul Mackerras32fad282012-05-04 02:32:53 +0000140extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
141extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000142extern void kvmppc_free_hpt(struct kvm *kvm);
143extern long kvmppc_prepare_vrma(struct kvm *kvm,
144 struct kvm_userspace_memory_region *mem);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000145extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000146 struct kvm_memory_slot *memslot, unsigned long porder);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000147extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000148
David Gibson54738c02011-06-29 00:22:41 +0000149extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
150 struct kvm_create_spapr_tce *args);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000151extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
152 unsigned long ioba, unsigned long tce);
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100153extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
154 unsigned long ioba);
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530155extern struct kvm_rma_info *kvm_alloc_rma(void);
156extern void kvm_release_rma(struct kvm_rma_info *ri);
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530157extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
158extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000159extern int kvmppc_core_init_vm(struct kvm *kvm);
160extern void kvmppc_core_destroy_vm(struct kvm *kvm);
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530161extern void kvmppc_core_free_memslot(struct kvm *kvm,
162 struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000163 struct kvm_memory_slot *dont);
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530164extern int kvmppc_core_create_memslot(struct kvm *kvm,
165 struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000166 unsigned long npages);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000167extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000168 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000169 struct kvm_userspace_memory_region *mem);
170extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000171 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900172 const struct kvm_memory_slot *old);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000173extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
174 struct kvm_ppc_smmu_info *info);
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000175extern void kvmppc_core_flush_memslot(struct kvm *kvm,
176 struct kvm_memory_slot *memslot);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000177
Scott Woodd30f6e42011-12-20 15:34:43 +0000178extern int kvmppc_bookehv_init(void);
179extern void kvmppc_bookehv_exit(void);
180
Alexander Graf03d25c52012-08-10 12:28:50 +0200181extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
182
Paul Mackerrasa2932922012-11-19 22:57:20 +0000183extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
184
Scott Wood5df554ad2013-04-12 14:08:46 +0000185int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
186
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000187extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
188extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
189extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000190extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
191 u32 priority);
192extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
193 u32 *priority);
Paul Mackerrasd19bd8622013-04-17 20:32:04 +0000194extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
195extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000196
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530197union kvmppc_one_reg {
198 u32 wval;
199 u64 dval;
200 vector128 vval;
201 u64 vsxval[2];
202 struct {
203 u64 addr;
204 u64 length;
205 } vpaval;
206};
207
208struct kvmppc_ops {
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530209 struct module *owner;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530210 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
211 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
212 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
213 union kvmppc_one_reg *val);
214 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
215 union kvmppc_one_reg *val);
216 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
217 void (*vcpu_put)(struct kvm_vcpu *vcpu);
218 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
219 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
220 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
221 void (*vcpu_free)(struct kvm_vcpu *vcpu);
222 int (*check_requests)(struct kvm_vcpu *vcpu);
223 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
224 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
225 int (*prepare_memory_region)(struct kvm *kvm,
226 struct kvm_memory_slot *memslot,
227 struct kvm_userspace_memory_region *mem);
228 void (*commit_memory_region)(struct kvm *kvm,
229 struct kvm_userspace_memory_region *mem,
230 const struct kvm_memory_slot *old);
231 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
232 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
233 unsigned long end);
234 int (*age_hva)(struct kvm *kvm, unsigned long hva);
235 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
236 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
237 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
238 void (*free_memslot)(struct kvm_memory_slot *free,
239 struct kvm_memory_slot *dont);
240 int (*create_memslot)(struct kvm_memory_slot *slot,
241 unsigned long npages);
242 int (*init_vm)(struct kvm *kvm);
243 void (*destroy_vm)(struct kvm *kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530244 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
245 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
246 unsigned int inst, int *advance);
247 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
248 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
249 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
250 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
251 unsigned long arg);
Paul Mackerrasae2113a2014-06-02 11:03:00 +1000252 int (*hcall_implemented)(unsigned long hcall);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530253};
254
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530255extern struct kvmppc_ops *kvmppc_hv_ops;
256extern struct kvmppc_ops *kvmppc_pr_ops;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530257
Mihai Caraman51f04722014-07-23 19:06:21 +0300258static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
259 enum instruction_type type, u32 *inst)
260{
261 int ret = EMULATE_DONE;
262 u32 fetched_inst;
263
264 /* Load the instruction manually if it failed to do so in the
265 * exit path */
266 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
267 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
268
269 /* Write fetch_failed unswapped if the fetch failed */
270 if (ret == EMULATE_DONE)
271 fetched_inst = kvmppc_need_byteswap(vcpu) ?
272 swab32(vcpu->arch.last_inst) :
273 vcpu->arch.last_inst;
274 else
275 fetched_inst = vcpu->arch.last_inst;
276
277 *inst = fetched_inst;
278 return ret;
279}
280
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530281static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
282{
283 return kvm->arch.kvm_ops == kvmppc_hv_ops;
284}
285
Alexander Graf0564ee82010-02-19 11:00:42 +0100286/*
287 * Cuts out inst bits with ordering according to spec.
288 * That means the leftmost bit is zero. All given bits are included.
289 */
290static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
291{
292 u32 r;
293 u32 mask;
294
295 BUG_ON(msb > lsb);
296
297 mask = (1 << (lsb - msb + 1)) - 1;
298 r = (inst >> (63 - lsb)) & mask;
299
300 return r;
301}
302
303/*
304 * Replaces inst bits with ordering according to spec.
305 */
306static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
307{
308 u32 r;
309 u32 mask;
310
311 BUG_ON(msb > lsb);
312
313 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
314 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
315
316 return r;
317}
318
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000319#define one_reg_size(id) \
320 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
321
322#define get_reg_val(id, reg) ({ \
323 union kvmppc_one_reg __u; \
324 switch (one_reg_size(id)) { \
325 case 4: __u.wval = (reg); break; \
326 case 8: __u.dval = (reg); break; \
327 default: BUG(); \
328 } \
329 __u; \
330})
331
332
333#define set_reg_val(id, val) ({ \
334 u64 __v; \
335 switch (one_reg_size(id)) { \
336 case 4: __v = (val).wval; break; \
337 case 8: __v = (val).dval; break; \
338 default: BUG(); \
339 } \
340 __v; \
341})
342
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530343int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500344int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
345
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530346int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500347int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
348
Paul Mackerras31f34382011-12-12 12:26:50 +0000349int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
350int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000351int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
352int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
Paul Mackerras31f34382011-12-12 12:26:50 +0000353
Scott Wood5ce941e2011-04-27 17:24:21 -0500354void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
355
Scott Wood5df554ad2013-04-12 14:08:46 +0000356struct openpic;
Scott Wood5df554ad2013-04-12 14:08:46 +0000357
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530358#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530359extern void kvm_cma_reserve(void) __init;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000360static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
361{
362 paca[cpu].kvm_hstate.xics_phys = addr;
363}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000364
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000365static inline u32 kvmppc_get_xics_latch(void)
366{
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530367 u32 xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000368
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530369 xirr = get_paca()->kvm_hstate.saved_xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000370 get_paca()->kvm_hstate.saved_xirr = 0;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000371 return xirr;
372}
373
374static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
375{
376 paca[cpu].kvm_hstate.host_ipi = host_ipi;
377}
378
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530379static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
380{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530381 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530382}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000383
Michael Ellerman441c19c2014-05-23 18:15:25 +1000384extern void kvm_hv_vm_activated(void);
385extern void kvm_hv_vm_deactivated(void);
386extern bool kvm_hv_mode_active(void);
387
Paul Mackerras371fefd2011-06-29 00:23:08 +0000388#else
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530389static inline void __init kvm_cma_reserve(void)
390{}
391
Paul Mackerras371fefd2011-06-29 00:23:08 +0000392static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
393{}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000394
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000395static inline u32 kvmppc_get_xics_latch(void)
396{
397 return 0;
398}
399
400static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
401{}
402
403static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
404{
405 kvm_vcpu_kick(vcpu);
406}
Michael Ellerman441c19c2014-05-23 18:15:25 +1000407
408static inline bool kvm_hv_mode_active(void) { return false; }
409
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000410#endif
411
412#ifdef CONFIG_KVM_XICS
413static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
414{
415 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
416}
417extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
418extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
419extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
420extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
Paul Mackerras8b786452013-04-17 20:32:26 +0000421extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
422extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000423extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
424 struct kvm_vcpu *vcpu, u32 cpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000425#else
426static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
427 { return 0; }
428static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
429static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
430 unsigned long server)
431 { return -EINVAL; }
432static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
433 struct kvm_irq_level *args)
434 { return -ENOTTY; }
435static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
436 { return 0; }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000437#endif
438
Bharat Bhushan34f754b2014-07-17 17:01:40 +0530439static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
440{
441#ifdef CONFIG_KVM_BOOKE_HV
442 return mfspr(SPRN_GEPR);
443#elif defined(CONFIG_BOOKE)
444 return vcpu->arch.epr;
445#else
446 return 0;
447#endif
448}
449
Alexander Graf1c810632013-01-04 18:12:48 +0100450static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
451{
452#ifdef CONFIG_KVM_BOOKE_HV
453 mtspr(SPRN_GEPR, epr);
454#elif defined(CONFIG_BOOKE)
455 vcpu->arch.epr = epr;
456#endif
457}
458
Scott Wood5df554ad2013-04-12 14:08:46 +0000459#ifdef CONFIG_KVM_MPIC
460
461void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000462int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
463 u32 cpu);
464void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
Scott Wood5df554ad2013-04-12 14:08:46 +0000465
466#else
467
468static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
469{
470}
471
Scott Woodeb1e4f42013-04-12 14:08:47 +0000472static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
473 struct kvm_vcpu *vcpu, u32 cpu)
474{
475 return -EINVAL;
476}
477
478static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
479 struct kvm_vcpu *vcpu)
480{
481}
482
Scott Wood5df554ad2013-04-12 14:08:46 +0000483#endif /* CONFIG_KVM_MPIC */
484
Scott Wooddc83b8b2011-08-18 15:25:21 -0500485int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
486 struct kvm_config_tlb *cfg);
487int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
488 struct kvm_dirty_tlb *cfg);
489
Scott Wood043cc4d2011-12-20 15:34:20 +0000490long kvmppc_alloc_lpid(void);
491void kvmppc_claim_lpid(long lpid);
492void kvmppc_free_lpid(long lpid);
493void kvmppc_init_lpid(unsigned long nr_lpids);
494
Alexander Graf249ba1e2012-08-03 13:56:33 +0200495static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
496{
Alexander Graf249ba1e2012-08-03 13:56:33 +0200497 struct page *page;
Bharat Bhushanadccf652013-04-25 06:33:57 +0000498 /*
499 * We can only access pages that the kernel maps
500 * as memory. Bail out for unmapped ones.
501 */
502 if (!pfn_valid(pfn))
503 return;
504
505 /* Clear i-cache for new pages */
Alexander Graf249ba1e2012-08-03 13:56:33 +0200506 page = pfn_to_page(pfn);
507 if (!test_bit(PG_arch_1, &page->flags)) {
508 flush_dcache_icache_page(page);
509 set_bit(PG_arch_1, &page->flags);
510 }
511}
512
Scott Wood5f1c2482013-07-10 17:47:39 -0500513/*
Alexander Graf5deb8e72014-04-24 13:46:24 +0200514 * Shared struct helpers. The shared struct can be little or big endian,
515 * depending on the guest endianness. So expose helpers to all of them.
516 */
517static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
518{
519#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
520 /* Only Book3S_64 PR supports bi-endian for now */
521 return vcpu->arch.shared_big_endian;
522#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
523 /* Book3s_64 HV on little endian is always little endian */
524 return false;
525#else
526 return true;
527#endif
528}
529
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530530#define SPRNG_WRAPPER_GET(reg, e500hv_spr) \
531static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
532{ \
533 return mfspr(e500hv_spr); \
534} \
535
536#define SPRNG_WRAPPER_SET(reg, e500hv_spr) \
537static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
538{ \
539 mtspr(e500hv_spr, val); \
540} \
541
Alexander Graf5deb8e72014-04-24 13:46:24 +0200542#define SHARED_WRAPPER_GET(reg, size) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530543static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
Alexander Graf5deb8e72014-04-24 13:46:24 +0200544{ \
545 if (kvmppc_shared_big_endian(vcpu)) \
546 return be##size##_to_cpu(vcpu->arch.shared->reg); \
547 else \
548 return le##size##_to_cpu(vcpu->arch.shared->reg); \
549} \
550
551#define SHARED_WRAPPER_SET(reg, size) \
552static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
553{ \
554 if (kvmppc_shared_big_endian(vcpu)) \
555 vcpu->arch.shared->reg = cpu_to_be##size(val); \
556 else \
557 vcpu->arch.shared->reg = cpu_to_le##size(val); \
558} \
559
560#define SHARED_WRAPPER(reg, size) \
561 SHARED_WRAPPER_GET(reg, size) \
562 SHARED_WRAPPER_SET(reg, size) \
563
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530564#define SPRNG_WRAPPER(reg, e500hv_spr) \
565 SPRNG_WRAPPER_GET(reg, e500hv_spr) \
566 SPRNG_WRAPPER_SET(reg, e500hv_spr) \
567
568#ifdef CONFIG_KVM_BOOKE_HV
569
570#define SHARED_SPRNG_WRAPPER(reg, size, e500hv_spr) \
571 SPRNG_WRAPPER(reg, e500hv_spr) \
572
573#else
574
575#define SHARED_SPRNG_WRAPPER(reg, size, e500hv_spr) \
576 SHARED_WRAPPER(reg, size) \
577
578#endif
579
Alexander Graf5deb8e72014-04-24 13:46:24 +0200580SHARED_WRAPPER(critical, 64)
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530581SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
582SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
583SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
584SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
585SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
586SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
587SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
Bharat Bhushandc168542014-07-17 17:01:38 +0530588SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
Alexander Graf5deb8e72014-04-24 13:46:24 +0200589SHARED_WRAPPER_GET(msr, 64)
590static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
591{
592 if (kvmppc_shared_big_endian(vcpu))
593 vcpu->arch.shared->msr = cpu_to_be64(val);
594 else
595 vcpu->arch.shared->msr = cpu_to_le64(val);
596}
597SHARED_WRAPPER(dsisr, 32)
598SHARED_WRAPPER(int_pending, 32)
599SHARED_WRAPPER(sprg4, 64)
600SHARED_WRAPPER(sprg5, 64)
601SHARED_WRAPPER(sprg6, 64)
602SHARED_WRAPPER(sprg7, 64)
603
604static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
605{
606 if (kvmppc_shared_big_endian(vcpu))
607 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
608 else
609 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
610}
611
612static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
613{
614 if (kvmppc_shared_big_endian(vcpu))
615 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
616 else
617 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
618}
619
620/*
Scott Wood5f1c2482013-07-10 17:47:39 -0500621 * Please call after prepare_to_enter. This function puts the lazy ee and irq
622 * disabled tracking state back to normal mode, without actually enabling
623 * interrupts.
624 */
625static inline void kvmppc_fix_ee_before_entry(void)
Alexander Grafbd2be682012-08-13 01:04:19 +0200626{
Scott Wood5f1c2482013-07-10 17:47:39 -0500627 trace_hardirqs_on();
628
Alexander Grafbd2be682012-08-13 01:04:19 +0200629#ifdef CONFIG_PPC64
Scott Wood6c85f522014-01-09 19:18:40 -0600630 /*
631 * To avoid races, the caller must have gone directly from having
632 * interrupts fully-enabled to hard-disabled.
633 */
634 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
635
Alexander Grafbd2be682012-08-13 01:04:19 +0200636 /* Only need to enable IRQs by hard enabling them after this */
637 local_paca->irq_happened = 0;
638 local_paca->soft_enabled = 1;
639#endif
640}
Alexander Graf249ba1e2012-08-03 13:56:33 +0200641
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000642static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
643{
644 ulong ea;
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000645 ulong msr_64bit = 0;
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000646
647 ea = kvmppc_get_gpr(vcpu, rb);
648 if (ra)
649 ea += kvmppc_get_gpr(vcpu, ra);
650
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000651#if defined(CONFIG_PPC_BOOK3E_64)
652 msr_64bit = MSR_CM;
653#elif defined(CONFIG_PPC_BOOK3S_64)
654 msr_64bit = MSR_SF;
655#endif
656
Alexander Graf5deb8e72014-04-24 13:46:24 +0200657 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000658 ea = (uint32_t)ea;
659
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000660 return ea;
661}
662
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000663extern void xics_wake_cpu(int cpu);
664
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500665#endif /* __POWERPC_KVM_PPC_H__ */