blob: 2214ee61f6684571445394ffecd5ccb96994e339 [file] [log] [blame]
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -05001/*
2 * This program is free software; you can redistribute it and/or modify
3 * it under the terms of the GNU General Public License, version 2, as
4 * published by the Free Software Foundation.
5 *
6 * This program is distributed in the hope that it will be useful,
7 * but WITHOUT ANY WARRANTY; without even the implied warranty of
8 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
9 * GNU General Public License for more details.
10 *
11 * You should have received a copy of the GNU General Public License
12 * along with this program; if not, write to the Free Software
13 * Foundation, 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
14 *
15 * Copyright IBM Corp. 2008
16 *
17 * Authors: Hollis Blanchard <hollisb@us.ibm.com>
18 */
19
20#ifndef __POWERPC_KVM_PPC_H__
21#define __POWERPC_KVM_PPC_H__
22
23/* This file exists just so we can dereference kvm_vcpu, avoiding nested header
24 * dependencies. */
25
26#include <linux/mutex.h>
27#include <linux/timer.h>
28#include <linux/types.h>
29#include <linux/kvm_types.h>
30#include <linux/kvm_host.h>
Paul Mackerrasa136a8b2012-09-25 20:31:56 +000031#include <linux/bug.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010032#ifdef CONFIG_PPC_BOOK3S
33#include <asm/kvm_book3s.h>
Alexander Grafc7f38f42010-04-16 00:11:40 +020034#else
35#include <asm/kvm_booke.h>
Alexander Graf1c0006d2010-01-15 14:49:12 +010036#endif
Paul Mackerras371fefd2011-06-29 00:23:08 +000037#ifdef CONFIG_KVM_BOOK3S_64_HANDLER
38#include <asm/paca.h>
39#endif
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050040
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050041enum emulation_result {
42 EMULATE_DONE, /* no further processing */
43 EMULATE_DO_MMIO, /* kvm_run filled with MMIO request */
44 EMULATE_DO_DCR, /* kvm_run filled with DCR request */
45 EMULATE_FAIL, /* can't emulate this instruction */
Alexander Graf37f5bca2010-02-19 11:00:31 +010046 EMULATE_AGAIN, /* something went wrong. go again */
Bharat Bhushanc402a3f2013-04-08 00:32:13 +000047 EMULATE_EXIT_USER, /* emulation requires exit to user-space */
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050048};
49
Mihai Caraman51f04722014-07-23 19:06:21 +030050enum instruction_type {
51 INST_GENERIC,
52 INST_SC, /* system call */
53};
54
Alexander Graf7d15c06f2014-06-20 13:52:36 +020055enum xlate_instdata {
56 XLATE_INST, /* translate instruction address */
57 XLATE_DATA /* translate data address */
58};
59
60enum xlate_readwrite {
61 XLATE_READ, /* check for read permissions */
62 XLATE_WRITE /* check for write permissions */
63};
64
Paul Mackerrasdf6909e52011-06-29 00:19:50 +000065extern int kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050066extern int __kvmppc_vcpu_run(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu);
Alexander Graf29eb61b2009-10-30 05:47:07 +000067extern void kvmppc_handler_highmem(void);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050068
69extern void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu);
70extern int kvmppc_handle_load(struct kvm_run *run, struct kvm_vcpu *vcpu,
71 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010072 int is_default_endian);
Alexander Graf3587d532010-02-19 11:00:30 +010073extern int kvmppc_handle_loads(struct kvm_run *run, struct kvm_vcpu *vcpu,
74 unsigned int rt, unsigned int bytes,
Cédric Le Goater73601772014-01-09 11:51:16 +010075 int is_default_endian);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050076extern int kvmppc_handle_store(struct kvm_run *run, struct kvm_vcpu *vcpu,
Cédric Le Goater73601772014-01-09 11:51:16 +010077 u64 val, unsigned int bytes,
78 int is_default_endian);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050079
Mihai Caraman51f04722014-07-23 19:06:21 +030080extern int kvmppc_load_last_inst(struct kvm_vcpu *vcpu,
81 enum instruction_type type, u32 *inst);
82
Alexander Graf35c4a732014-06-20 13:58:16 +020083extern int kvmppc_ld(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
84 bool data);
85extern int kvmppc_st(struct kvm_vcpu *vcpu, ulong *eaddr, int size, void *ptr,
86 bool data);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050087extern int kvmppc_emulate_instruction(struct kvm_run *run,
88 struct kvm_vcpu *vcpu);
Alexander Grafd69614a2014-06-18 14:53:49 +020089extern int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu);
Hollis Blanchardce263d72008-05-21 18:22:51 -050090extern int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -060091extern void kvmppc_emulate_dec(struct kvm_vcpu *vcpu);
Scott Wood5ce941e2011-04-27 17:24:21 -050092extern u32 kvmppc_get_dec(struct kvm_vcpu *vcpu, u64 tb);
Scott Wooddfd4d472011-11-17 12:39:59 +000093extern void kvmppc_decrementer_func(unsigned long data);
Alexander Grafaf8f38b2011-08-10 13:57:08 +020094extern int kvmppc_sanity_check(struct kvm_vcpu *vcpu);
Bharat Bhushanf61c94b2012-08-08 20:38:19 +000095extern int kvmppc_subarch_vcpu_init(struct kvm_vcpu *vcpu);
96extern void kvmppc_subarch_vcpu_uninit(struct kvm_vcpu *vcpu);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -050097
Hollis Blanchardecc09812009-01-03 16:22:59 -060098/* Core-specific hooks */
99
Hollis Blanchard89168612008-12-02 15:51:53 -0600100extern void kvmppc_mmu_map(struct kvm_vcpu *vcpu, u64 gvaddr, gpa_t gpaddr,
Hollis Blanchard7924bd42008-12-02 15:51:55 -0600101 unsigned int gtlb_idx);
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500102extern void kvmppc_mmu_priv_switch(struct kvm_vcpu *vcpu, int usermode);
Hollis Blanchard49dd2c42008-07-25 13:54:53 -0500103extern void kvmppc_mmu_switch_pid(struct kvm_vcpu *vcpu, u32 pid);
Hollis Blanchardecc09812009-01-03 16:22:59 -0600104extern void kvmppc_mmu_destroy(struct kvm_vcpu *vcpu);
Alexander Graf9cc5e952010-04-16 00:11:45 +0200105extern int kvmppc_mmu_init(struct kvm_vcpu *vcpu);
Hollis Blanchardfa86b8d2009-01-03 16:23:03 -0600106extern int kvmppc_mmu_dtlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
107extern int kvmppc_mmu_itlb_index(struct kvm_vcpu *vcpu, gva_t eaddr);
Hollis Blanchardbe8d1ca2009-01-03 16:23:02 -0600108extern gpa_t kvmppc_mmu_xlate(struct kvm_vcpu *vcpu, unsigned int gtlb_index,
109 gva_t eaddr);
Hollis Blanchardb52a6382009-01-03 16:23:11 -0600110extern void kvmppc_mmu_dtlb_miss(struct kvm_vcpu *vcpu);
111extern void kvmppc_mmu_itlb_miss(struct kvm_vcpu *vcpu);
Alexander Graf7d15c06f2014-06-20 13:52:36 +0200112extern int kvmppc_xlate(struct kvm_vcpu *vcpu, ulong eaddr,
113 enum xlate_instdata xlid, enum xlate_readwrite xlrw,
114 struct kvmppc_pte *pte);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600115
Hollis Blancharddb93f572008-11-05 09:36:18 -0600116extern struct kvm_vcpu *kvmppc_core_vcpu_create(struct kvm *kvm,
117 unsigned int id);
118extern void kvmppc_core_vcpu_free(struct kvm_vcpu *vcpu);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600119extern int kvmppc_core_vcpu_setup(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600120extern int kvmppc_core_check_processor_compat(void);
Hollis Blanchard5cbb5102008-11-05 09:36:17 -0600121extern int kvmppc_core_vcpu_translate(struct kvm_vcpu *vcpu,
122 struct kvm_translation *tr);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600123
124extern void kvmppc_core_vcpu_load(struct kvm_vcpu *vcpu, int cpu);
125extern void kvmppc_core_vcpu_put(struct kvm_vcpu *vcpu);
126
Alexander Grafa8e4ef82012-02-16 14:07:37 +0000127extern int kvmppc_core_prepare_to_enter(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600128extern int kvmppc_core_pending_dec(struct kvm_vcpu *vcpu);
Alexander Graf25a8a022010-01-08 02:58:07 +0100129extern void kvmppc_core_queue_program(struct kvm_vcpu *vcpu, ulong flags);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600130extern void kvmppc_core_queue_dec(struct kvm_vcpu *vcpu);
Alexander Graf7706664d2009-12-21 20:21:24 +0100131extern void kvmppc_core_dequeue_dec(struct kvm_vcpu *vcpu);
Hollis Blanchard9dd921c2008-11-05 09:36:14 -0600132extern void kvmppc_core_queue_external(struct kvm_vcpu *vcpu,
133 struct kvm_interrupt *irq);
Paul Mackerras4fe27d22013-02-14 14:00:25 +0000134extern void kvmppc_core_dequeue_external(struct kvm_vcpu *vcpu);
Alexander Graf862d31f2012-07-31 00:19:50 +0200135extern void kvmppc_core_flush_tlb(struct kvm_vcpu *vcpu);
Alexander Graf7c973a22012-08-13 12:50:35 +0200136extern int kvmppc_core_check_requests(struct kvm_vcpu *vcpu);
Hollis Blanchard75f74f02008-11-05 09:36:16 -0600137
Hollis Blancharddb93f572008-11-05 09:36:18 -0600138extern int kvmppc_booke_init(void);
139extern void kvmppc_booke_exit(void);
140
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600141extern void kvmppc_core_destroy_mmu(struct kvm_vcpu *vcpu);
Alexander Graf2a342ed2010-07-29 14:47:48 +0200142extern int kvmppc_kvm_pv(struct kvm_vcpu *vcpu);
Scott Wooda4cd8b22011-06-14 18:34:41 -0500143extern void kvmppc_map_magic(struct kvm_vcpu *vcpu);
Hollis Blanchardc30f8a62008-11-24 11:37:38 -0600144
Paul Mackerras32fad282012-05-04 02:32:53 +0000145extern long kvmppc_alloc_hpt(struct kvm *kvm, u32 *htab_orderp);
146extern long kvmppc_alloc_reset_hpt(struct kvm *kvm, u32 *htab_orderp);
Paul Mackerrasde56a942011-06-29 00:21:34 +0000147extern void kvmppc_free_hpt(struct kvm *kvm);
148extern long kvmppc_prepare_vrma(struct kvm *kvm,
149 struct kvm_userspace_memory_region *mem);
Paul Mackerrasc77162d2011-12-12 12:31:00 +0000150extern void kvmppc_map_vrma(struct kvm_vcpu *vcpu,
Paul Mackerrasda9d1d72011-12-12 12:31:41 +0000151 struct kvm_memory_slot *memslot, unsigned long porder);
Paul Mackerrasa8606e22011-06-29 00:22:05 +0000152extern int kvmppc_pseries_do_hcall(struct kvm_vcpu *vcpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000153
David Gibson54738c02011-06-29 00:22:41 +0000154extern long kvm_vm_ioctl_create_spapr_tce(struct kvm *kvm,
155 struct kvm_create_spapr_tce *args);
Benjamin Herrenschmidtf31e65e2012-03-15 21:58:34 +0000156extern long kvmppc_h_put_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
157 unsigned long ioba, unsigned long tce);
Laurent Dufour69e9fbb22014-02-21 16:31:10 +0100158extern long kvmppc_h_get_tce(struct kvm_vcpu *vcpu, unsigned long liobn,
159 unsigned long ioba);
Aneesh Kumar K.V6c45b812013-07-02 11:15:17 +0530160extern struct kvm_rma_info *kvm_alloc_rma(void);
161extern void kvm_release_rma(struct kvm_rma_info *ri);
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530162extern struct page *kvm_alloc_hpt(unsigned long nr_pages);
163extern void kvm_release_hpt(struct page *page, unsigned long nr_pages);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000164extern int kvmppc_core_init_vm(struct kvm *kvm);
165extern void kvmppc_core_destroy_vm(struct kvm *kvm);
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530166extern void kvmppc_core_free_memslot(struct kvm *kvm,
167 struct kvm_memory_slot *free,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000168 struct kvm_memory_slot *dont);
Aneesh Kumar K.V55870272013-10-07 22:18:00 +0530169extern int kvmppc_core_create_memslot(struct kvm *kvm,
170 struct kvm_memory_slot *slot,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000171 unsigned long npages);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000172extern int kvmppc_core_prepare_memory_region(struct kvm *kvm,
Paul Mackerrasa66b48c2012-09-11 13:27:46 +0000173 struct kvm_memory_slot *memslot,
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000174 struct kvm_userspace_memory_region *mem);
175extern void kvmppc_core_commit_memory_region(struct kvm *kvm,
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000176 struct kvm_userspace_memory_region *mem,
Takuya Yoshikawa84826442013-02-27 19:45:25 +0900177 const struct kvm_memory_slot *old);
Benjamin Herrenschmidt5b747162012-04-26 19:43:42 +0000178extern int kvm_vm_ioctl_get_smmu_info(struct kvm *kvm,
179 struct kvm_ppc_smmu_info *info);
Paul Mackerrasdfe49db2012-09-11 13:28:18 +0000180extern void kvmppc_core_flush_memslot(struct kvm *kvm,
181 struct kvm_memory_slot *memslot);
Paul Mackerrasf9e05542011-06-29 00:19:22 +0000182
Scott Woodd30f6e42011-12-20 15:34:43 +0000183extern int kvmppc_bookehv_init(void);
184extern void kvmppc_bookehv_exit(void);
185
Alexander Graf03d25c52012-08-10 12:28:50 +0200186extern int kvmppc_prepare_to_enter(struct kvm_vcpu *vcpu);
187
Paul Mackerrasa2932922012-11-19 22:57:20 +0000188extern int kvm_vm_ioctl_get_htab_fd(struct kvm *kvm, struct kvm_get_htab_fd *);
189
Scott Wood5df554ad2013-04-12 14:08:46 +0000190int kvm_vcpu_ioctl_interrupt(struct kvm_vcpu *vcpu, struct kvm_interrupt *irq);
191
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000192extern int kvm_vm_ioctl_rtas_define_token(struct kvm *kvm, void __user *argp);
193extern int kvmppc_rtas_hcall(struct kvm_vcpu *vcpu);
194extern void kvmppc_rtas_tokens_free(struct kvm *kvm);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000195extern int kvmppc_xics_set_xive(struct kvm *kvm, u32 irq, u32 server,
196 u32 priority);
197extern int kvmppc_xics_get_xive(struct kvm *kvm, u32 irq, u32 *server,
198 u32 *priority);
Paul Mackerrasd19bd8622013-04-17 20:32:04 +0000199extern int kvmppc_xics_int_on(struct kvm *kvm, u32 irq);
200extern int kvmppc_xics_int_off(struct kvm *kvm, u32 irq);
Michael Ellerman8e591cb2013-04-17 20:30:00 +0000201
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530202union kvmppc_one_reg {
203 u32 wval;
204 u64 dval;
205 vector128 vval;
206 u64 vsxval[2];
207 struct {
208 u64 addr;
209 u64 length;
210 } vpaval;
211};
212
213struct kvmppc_ops {
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530214 struct module *owner;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530215 int (*get_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
216 int (*set_sregs)(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
217 int (*get_one_reg)(struct kvm_vcpu *vcpu, u64 id,
218 union kvmppc_one_reg *val);
219 int (*set_one_reg)(struct kvm_vcpu *vcpu, u64 id,
220 union kvmppc_one_reg *val);
221 void (*vcpu_load)(struct kvm_vcpu *vcpu, int cpu);
222 void (*vcpu_put)(struct kvm_vcpu *vcpu);
223 void (*set_msr)(struct kvm_vcpu *vcpu, u64 msr);
224 int (*vcpu_run)(struct kvm_run *run, struct kvm_vcpu *vcpu);
225 struct kvm_vcpu *(*vcpu_create)(struct kvm *kvm, unsigned int id);
226 void (*vcpu_free)(struct kvm_vcpu *vcpu);
227 int (*check_requests)(struct kvm_vcpu *vcpu);
228 int (*get_dirty_log)(struct kvm *kvm, struct kvm_dirty_log *log);
229 void (*flush_memslot)(struct kvm *kvm, struct kvm_memory_slot *memslot);
230 int (*prepare_memory_region)(struct kvm *kvm,
231 struct kvm_memory_slot *memslot,
232 struct kvm_userspace_memory_region *mem);
233 void (*commit_memory_region)(struct kvm *kvm,
234 struct kvm_userspace_memory_region *mem,
235 const struct kvm_memory_slot *old);
236 int (*unmap_hva)(struct kvm *kvm, unsigned long hva);
237 int (*unmap_hva_range)(struct kvm *kvm, unsigned long start,
238 unsigned long end);
239 int (*age_hva)(struct kvm *kvm, unsigned long hva);
240 int (*test_age_hva)(struct kvm *kvm, unsigned long hva);
241 void (*set_spte_hva)(struct kvm *kvm, unsigned long hva, pte_t pte);
242 void (*mmu_destroy)(struct kvm_vcpu *vcpu);
243 void (*free_memslot)(struct kvm_memory_slot *free,
244 struct kvm_memory_slot *dont);
245 int (*create_memslot)(struct kvm_memory_slot *slot,
246 unsigned long npages);
247 int (*init_vm)(struct kvm *kvm);
248 void (*destroy_vm)(struct kvm *kvm);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530249 int (*get_smmu_info)(struct kvm *kvm, struct kvm_ppc_smmu_info *info);
250 int (*emulate_op)(struct kvm_run *run, struct kvm_vcpu *vcpu,
251 unsigned int inst, int *advance);
252 int (*emulate_mtspr)(struct kvm_vcpu *vcpu, int sprn, ulong spr_val);
253 int (*emulate_mfspr)(struct kvm_vcpu *vcpu, int sprn, ulong *spr_val);
254 void (*fast_vcpu_kick)(struct kvm_vcpu *vcpu);
255 long (*arch_vm_ioctl)(struct file *filp, unsigned int ioctl,
256 unsigned long arg);
Paul Mackerrasae2113a2014-06-02 11:03:00 +1000257 int (*hcall_implemented)(unsigned long hcall);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530258};
259
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530260extern struct kvmppc_ops *kvmppc_hv_ops;
261extern struct kvmppc_ops *kvmppc_pr_ops;
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530262
Mihai Caraman51f04722014-07-23 19:06:21 +0300263static inline int kvmppc_get_last_inst(struct kvm_vcpu *vcpu,
264 enum instruction_type type, u32 *inst)
265{
266 int ret = EMULATE_DONE;
267 u32 fetched_inst;
268
269 /* Load the instruction manually if it failed to do so in the
270 * exit path */
271 if (vcpu->arch.last_inst == KVM_INST_FETCH_FAILED)
272 ret = kvmppc_load_last_inst(vcpu, type, &vcpu->arch.last_inst);
273
274 /* Write fetch_failed unswapped if the fetch failed */
275 if (ret == EMULATE_DONE)
276 fetched_inst = kvmppc_need_byteswap(vcpu) ?
277 swab32(vcpu->arch.last_inst) :
278 vcpu->arch.last_inst;
279 else
280 fetched_inst = vcpu->arch.last_inst;
281
282 *inst = fetched_inst;
283 return ret;
284}
285
Aneesh Kumar K.Va78b55d2013-10-07 22:18:02 +0530286static inline bool is_kvmppc_hv_enabled(struct kvm *kvm)
287{
288 return kvm->arch.kvm_ops == kvmppc_hv_ops;
289}
290
Alexander Graf0564ee82010-02-19 11:00:42 +0100291/*
292 * Cuts out inst bits with ordering according to spec.
293 * That means the leftmost bit is zero. All given bits are included.
294 */
295static inline u32 kvmppc_get_field(u64 inst, int msb, int lsb)
296{
297 u32 r;
298 u32 mask;
299
300 BUG_ON(msb > lsb);
301
302 mask = (1 << (lsb - msb + 1)) - 1;
303 r = (inst >> (63 - lsb)) & mask;
304
305 return r;
306}
307
308/*
309 * Replaces inst bits with ordering according to spec.
310 */
311static inline u32 kvmppc_set_field(u64 inst, int msb, int lsb, int value)
312{
313 u32 r;
314 u32 mask;
315
316 BUG_ON(msb > lsb);
317
318 mask = ((1 << (lsb - msb + 1)) - 1) << (63 - lsb);
319 r = (inst & ~mask) | ((value << (63 - lsb)) & mask);
320
321 return r;
322}
323
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000324#define one_reg_size(id) \
325 (1ul << (((id) & KVM_REG_SIZE_MASK) >> KVM_REG_SIZE_SHIFT))
326
327#define get_reg_val(id, reg) ({ \
328 union kvmppc_one_reg __u; \
329 switch (one_reg_size(id)) { \
330 case 4: __u.wval = (reg); break; \
331 case 8: __u.dval = (reg); break; \
332 default: BUG(); \
333 } \
334 __u; \
335})
336
337
338#define set_reg_val(id, val) ({ \
339 u64 __v; \
340 switch (one_reg_size(id)) { \
341 case 4: __v = (val).wval; break; \
342 case 8: __v = (val).dval; break; \
343 default: BUG(); \
344 } \
345 __v; \
346})
347
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530348int kvmppc_core_get_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500349int kvmppc_core_set_sregs(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
350
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530351int kvmppc_get_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
Scott Wood5ce941e2011-04-27 17:24:21 -0500352int kvmppc_set_sregs_ivor(struct kvm_vcpu *vcpu, struct kvm_sregs *sregs);
353
Paul Mackerras31f34382011-12-12 12:26:50 +0000354int kvm_vcpu_ioctl_get_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
355int kvm_vcpu_ioctl_set_one_reg(struct kvm_vcpu *vcpu, struct kvm_one_reg *reg);
Paul Mackerrasa136a8b2012-09-25 20:31:56 +0000356int kvmppc_get_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
357int kvmppc_set_one_reg(struct kvm_vcpu *vcpu, u64 id, union kvmppc_one_reg *);
Paul Mackerras31f34382011-12-12 12:26:50 +0000358
Scott Wood5ce941e2011-04-27 17:24:21 -0500359void kvmppc_set_pid(struct kvm_vcpu *vcpu, u32 pid);
360
Scott Wood5df554ad2013-04-12 14:08:46 +0000361struct openpic;
Scott Wood5df554ad2013-04-12 14:08:46 +0000362
Aneesh Kumar K.V9975f5e2013-10-07 22:17:52 +0530363#ifdef CONFIG_KVM_BOOK3S_HV_POSSIBLE
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530364extern void kvm_cma_reserve(void) __init;
Paul Mackerras371fefd2011-06-29 00:23:08 +0000365static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
366{
367 paca[cpu].kvm_hstate.xics_phys = addr;
368}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000369
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000370static inline u32 kvmppc_get_xics_latch(void)
371{
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530372 u32 xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000373
Aneesh Kumar K.V699cc872013-10-07 22:17:56 +0530374 xirr = get_paca()->kvm_hstate.saved_xirr;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000375 get_paca()->kvm_hstate.saved_xirr = 0;
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000376 return xirr;
377}
378
379static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
380{
381 paca[cpu].kvm_hstate.host_ipi = host_ipi;
382}
383
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530384static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
385{
Aneesh Kumar K.Vcbbc58d2013-10-07 22:18:01 +0530386 vcpu->kvm->arch.kvm_ops->fast_vcpu_kick(vcpu);
Aneesh Kumar K.V3a167bea2013-10-07 22:17:53 +0530387}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000388
Michael Ellerman441c19c2014-05-23 18:15:25 +1000389extern void kvm_hv_vm_activated(void);
390extern void kvm_hv_vm_deactivated(void);
391extern bool kvm_hv_mode_active(void);
392
Paul Mackerras371fefd2011-06-29 00:23:08 +0000393#else
Aneesh Kumar K.Vfa61a4e32013-07-02 11:15:16 +0530394static inline void __init kvm_cma_reserve(void)
395{}
396
Paul Mackerras371fefd2011-06-29 00:23:08 +0000397static inline void kvmppc_set_xics_phys(int cpu, unsigned long addr)
398{}
Paul Mackerrasaa04b4c2011-06-29 00:25:44 +0000399
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000400static inline u32 kvmppc_get_xics_latch(void)
401{
402 return 0;
403}
404
405static inline void kvmppc_set_host_ipi(int cpu, u8 host_ipi)
406{}
407
408static inline void kvmppc_fast_vcpu_kick(struct kvm_vcpu *vcpu)
409{
410 kvm_vcpu_kick(vcpu);
411}
Michael Ellerman441c19c2014-05-23 18:15:25 +1000412
413static inline bool kvm_hv_mode_active(void) { return false; }
414
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000415#endif
416
417#ifdef CONFIG_KVM_XICS
418static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
419{
420 return vcpu->arch.irq_type == KVMPPC_IRQ_XICS;
421}
422extern void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu);
423extern int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu, unsigned long server);
424extern int kvm_vm_ioctl_xics_irq(struct kvm *kvm, struct kvm_irq_level *args);
425extern int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd);
Paul Mackerras8b786452013-04-17 20:32:26 +0000426extern u64 kvmppc_xics_get_icp(struct kvm_vcpu *vcpu);
427extern int kvmppc_xics_set_icp(struct kvm_vcpu *vcpu, u64 icpval);
Paul Mackerras5975a2e2013-04-27 00:28:37 +0000428extern int kvmppc_xics_connect_vcpu(struct kvm_device *dev,
429 struct kvm_vcpu *vcpu, u32 cpu);
Benjamin Herrenschmidtbc5ad3f2013-04-17 20:30:26 +0000430#else
431static inline int kvmppc_xics_enabled(struct kvm_vcpu *vcpu)
432 { return 0; }
433static inline void kvmppc_xics_free_icp(struct kvm_vcpu *vcpu) { }
434static inline int kvmppc_xics_create_icp(struct kvm_vcpu *vcpu,
435 unsigned long server)
436 { return -EINVAL; }
437static inline int kvm_vm_ioctl_xics_irq(struct kvm *kvm,
438 struct kvm_irq_level *args)
439 { return -ENOTTY; }
440static inline int kvmppc_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd)
441 { return 0; }
Paul Mackerras371fefd2011-06-29 00:23:08 +0000442#endif
443
Bharat Bhushan34f754b2014-07-17 17:01:40 +0530444static inline unsigned long kvmppc_get_epr(struct kvm_vcpu *vcpu)
445{
446#ifdef CONFIG_KVM_BOOKE_HV
447 return mfspr(SPRN_GEPR);
448#elif defined(CONFIG_BOOKE)
449 return vcpu->arch.epr;
450#else
451 return 0;
452#endif
453}
454
Alexander Graf1c810632013-01-04 18:12:48 +0100455static inline void kvmppc_set_epr(struct kvm_vcpu *vcpu, u32 epr)
456{
457#ifdef CONFIG_KVM_BOOKE_HV
458 mtspr(SPRN_GEPR, epr);
459#elif defined(CONFIG_BOOKE)
460 vcpu->arch.epr = epr;
461#endif
462}
463
Scott Wood5df554ad2013-04-12 14:08:46 +0000464#ifdef CONFIG_KVM_MPIC
465
466void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu);
Scott Woodeb1e4f42013-04-12 14:08:47 +0000467int kvmppc_mpic_connect_vcpu(struct kvm_device *dev, struct kvm_vcpu *vcpu,
468 u32 cpu);
469void kvmppc_mpic_disconnect_vcpu(struct openpic *opp, struct kvm_vcpu *vcpu);
Scott Wood5df554ad2013-04-12 14:08:46 +0000470
471#else
472
473static inline void kvmppc_mpic_set_epr(struct kvm_vcpu *vcpu)
474{
475}
476
Scott Woodeb1e4f42013-04-12 14:08:47 +0000477static inline int kvmppc_mpic_connect_vcpu(struct kvm_device *dev,
478 struct kvm_vcpu *vcpu, u32 cpu)
479{
480 return -EINVAL;
481}
482
483static inline void kvmppc_mpic_disconnect_vcpu(struct openpic *opp,
484 struct kvm_vcpu *vcpu)
485{
486}
487
Scott Wood5df554ad2013-04-12 14:08:46 +0000488#endif /* CONFIG_KVM_MPIC */
489
Scott Wooddc83b8b2011-08-18 15:25:21 -0500490int kvm_vcpu_ioctl_config_tlb(struct kvm_vcpu *vcpu,
491 struct kvm_config_tlb *cfg);
492int kvm_vcpu_ioctl_dirty_tlb(struct kvm_vcpu *vcpu,
493 struct kvm_dirty_tlb *cfg);
494
Scott Wood043cc4d2011-12-20 15:34:20 +0000495long kvmppc_alloc_lpid(void);
496void kvmppc_claim_lpid(long lpid);
497void kvmppc_free_lpid(long lpid);
498void kvmppc_init_lpid(unsigned long nr_lpids);
499
Alexander Graf249ba1e2012-08-03 13:56:33 +0200500static inline void kvmppc_mmu_flush_icache(pfn_t pfn)
501{
Alexander Graf249ba1e2012-08-03 13:56:33 +0200502 struct page *page;
Bharat Bhushanadccf652013-04-25 06:33:57 +0000503 /*
504 * We can only access pages that the kernel maps
505 * as memory. Bail out for unmapped ones.
506 */
507 if (!pfn_valid(pfn))
508 return;
509
510 /* Clear i-cache for new pages */
Alexander Graf249ba1e2012-08-03 13:56:33 +0200511 page = pfn_to_page(pfn);
512 if (!test_bit(PG_arch_1, &page->flags)) {
513 flush_dcache_icache_page(page);
514 set_bit(PG_arch_1, &page->flags);
515 }
516}
517
Scott Wood5f1c2482013-07-10 17:47:39 -0500518/*
Alexander Graf5deb8e72014-04-24 13:46:24 +0200519 * Shared struct helpers. The shared struct can be little or big endian,
520 * depending on the guest endianness. So expose helpers to all of them.
521 */
522static inline bool kvmppc_shared_big_endian(struct kvm_vcpu *vcpu)
523{
524#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
525 /* Only Book3S_64 PR supports bi-endian for now */
526 return vcpu->arch.shared_big_endian;
527#elif defined(CONFIG_PPC_BOOK3S_64) && defined(__LITTLE_ENDIAN__)
528 /* Book3s_64 HV on little endian is always little endian */
529 return false;
530#else
531 return true;
532#endif
533}
534
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530535#define SPRNG_WRAPPER_GET(reg, e500hv_spr) \
536static inline ulong kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
537{ \
538 return mfspr(e500hv_spr); \
539} \
540
541#define SPRNG_WRAPPER_SET(reg, e500hv_spr) \
542static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, ulong val) \
543{ \
544 mtspr(e500hv_spr, val); \
545} \
546
Alexander Graf5deb8e72014-04-24 13:46:24 +0200547#define SHARED_WRAPPER_GET(reg, size) \
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530548static inline u##size kvmppc_get_##reg(struct kvm_vcpu *vcpu) \
Alexander Graf5deb8e72014-04-24 13:46:24 +0200549{ \
550 if (kvmppc_shared_big_endian(vcpu)) \
551 return be##size##_to_cpu(vcpu->arch.shared->reg); \
552 else \
553 return le##size##_to_cpu(vcpu->arch.shared->reg); \
554} \
555
556#define SHARED_WRAPPER_SET(reg, size) \
557static inline void kvmppc_set_##reg(struct kvm_vcpu *vcpu, u##size val) \
558{ \
559 if (kvmppc_shared_big_endian(vcpu)) \
560 vcpu->arch.shared->reg = cpu_to_be##size(val); \
561 else \
562 vcpu->arch.shared->reg = cpu_to_le##size(val); \
563} \
564
565#define SHARED_WRAPPER(reg, size) \
566 SHARED_WRAPPER_GET(reg, size) \
567 SHARED_WRAPPER_SET(reg, size) \
568
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530569#define SPRNG_WRAPPER(reg, e500hv_spr) \
570 SPRNG_WRAPPER_GET(reg, e500hv_spr) \
571 SPRNG_WRAPPER_SET(reg, e500hv_spr) \
572
573#ifdef CONFIG_KVM_BOOKE_HV
574
575#define SHARED_SPRNG_WRAPPER(reg, size, e500hv_spr) \
576 SPRNG_WRAPPER(reg, e500hv_spr) \
577
578#else
579
580#define SHARED_SPRNG_WRAPPER(reg, size, e500hv_spr) \
581 SHARED_WRAPPER(reg, size) \
582
583#endif
584
Alexander Graf5deb8e72014-04-24 13:46:24 +0200585SHARED_WRAPPER(critical, 64)
Bharat Bhushan1dc0c5b2014-07-17 17:01:35 +0530586SHARED_SPRNG_WRAPPER(sprg0, 64, SPRN_GSPRG0)
587SHARED_SPRNG_WRAPPER(sprg1, 64, SPRN_GSPRG1)
588SHARED_SPRNG_WRAPPER(sprg2, 64, SPRN_GSPRG2)
589SHARED_SPRNG_WRAPPER(sprg3, 64, SPRN_GSPRG3)
590SHARED_SPRNG_WRAPPER(srr0, 64, SPRN_GSRR0)
591SHARED_SPRNG_WRAPPER(srr1, 64, SPRN_GSRR1)
592SHARED_SPRNG_WRAPPER(dar, 64, SPRN_GDEAR)
Bharat Bhushandc168542014-07-17 17:01:38 +0530593SHARED_SPRNG_WRAPPER(esr, 64, SPRN_GESR)
Alexander Graf5deb8e72014-04-24 13:46:24 +0200594SHARED_WRAPPER_GET(msr, 64)
595static inline void kvmppc_set_msr_fast(struct kvm_vcpu *vcpu, u64 val)
596{
597 if (kvmppc_shared_big_endian(vcpu))
598 vcpu->arch.shared->msr = cpu_to_be64(val);
599 else
600 vcpu->arch.shared->msr = cpu_to_le64(val);
601}
602SHARED_WRAPPER(dsisr, 32)
603SHARED_WRAPPER(int_pending, 32)
604SHARED_WRAPPER(sprg4, 64)
605SHARED_WRAPPER(sprg5, 64)
606SHARED_WRAPPER(sprg6, 64)
607SHARED_WRAPPER(sprg7, 64)
608
609static inline u32 kvmppc_get_sr(struct kvm_vcpu *vcpu, int nr)
610{
611 if (kvmppc_shared_big_endian(vcpu))
612 return be32_to_cpu(vcpu->arch.shared->sr[nr]);
613 else
614 return le32_to_cpu(vcpu->arch.shared->sr[nr]);
615}
616
617static inline void kvmppc_set_sr(struct kvm_vcpu *vcpu, int nr, u32 val)
618{
619 if (kvmppc_shared_big_endian(vcpu))
620 vcpu->arch.shared->sr[nr] = cpu_to_be32(val);
621 else
622 vcpu->arch.shared->sr[nr] = cpu_to_le32(val);
623}
624
625/*
Scott Wood5f1c2482013-07-10 17:47:39 -0500626 * Please call after prepare_to_enter. This function puts the lazy ee and irq
627 * disabled tracking state back to normal mode, without actually enabling
628 * interrupts.
629 */
630static inline void kvmppc_fix_ee_before_entry(void)
Alexander Grafbd2be682012-08-13 01:04:19 +0200631{
Scott Wood5f1c2482013-07-10 17:47:39 -0500632 trace_hardirqs_on();
633
Alexander Grafbd2be682012-08-13 01:04:19 +0200634#ifdef CONFIG_PPC64
Scott Wood6c85f522014-01-09 19:18:40 -0600635 /*
636 * To avoid races, the caller must have gone directly from having
637 * interrupts fully-enabled to hard-disabled.
638 */
639 WARN_ON(local_paca->irq_happened != PACA_IRQ_HARD_DIS);
640
Alexander Grafbd2be682012-08-13 01:04:19 +0200641 /* Only need to enable IRQs by hard enabling them after this */
642 local_paca->irq_happened = 0;
643 local_paca->soft_enabled = 1;
644#endif
645}
Alexander Graf249ba1e2012-08-03 13:56:33 +0200646
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000647static inline ulong kvmppc_get_ea_indexed(struct kvm_vcpu *vcpu, int ra, int rb)
648{
649 ulong ea;
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000650 ulong msr_64bit = 0;
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000651
652 ea = kvmppc_get_gpr(vcpu, rb);
653 if (ra)
654 ea += kvmppc_get_gpr(vcpu, ra);
655
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000656#if defined(CONFIG_PPC_BOOK3E_64)
657 msr_64bit = MSR_CM;
658#elif defined(CONFIG_PPC_BOOK3S_64)
659 msr_64bit = MSR_SF;
660#endif
661
Alexander Graf5deb8e72014-04-24 13:46:24 +0200662 if (!(kvmppc_get_msr(vcpu) & msr_64bit))
Mihai Caraman8823a8f2012-10-11 06:13:23 +0000663 ea = (uint32_t)ea;
664
Mihai Caraman7cdd7a92012-10-11 06:13:22 +0000665 return ea;
666}
667
Benjamin Herrenschmidt54695c32013-04-17 20:30:50 +0000668extern void xics_wake_cpu(int cpu);
669
Hollis Blanchardbbf45ba2008-04-16 23:28:09 -0500670#endif /* __POWERPC_KVM_PPC_H__ */