blob: 65f8bb9279e02fc4e0c9a11696654db2f516793c [file] [log] [blame]
H. Peter Anvin1965aae2008-10-22 22:26:29 -07001#ifndef _ASM_X86_KVM_PARA_H
2#define _ASM_X86_KVM_PARA_H
Christian Borntraeger5f432382007-10-11 15:34:17 +02003
Avi Kivityfa6870c2009-08-16 15:31:33 +03004#include <linux/types.h>
Gleb Natapov55cd8e52010-01-17 15:51:22 +02005#include <asm/hyperv.h>
Avi Kivityfa6870c2009-08-16 15:31:33 +03006
Christian Borntraeger5f432382007-10-11 15:34:17 +02007/* This CPUID returns the signature 'KVMKVMKVM' in ebx, ecx, and edx. It
8 * should be used to determine that a VM is running under KVM.
9 */
10#define KVM_CPUID_SIGNATURE 0x40000000
11
12/* This CPUID returns a feature bitmap in eax. Before enabling a particular
13 * paravirtualization, the appropriate feature bit should be checked.
14 */
15#define KVM_CPUID_FEATURES 0x40000001
Marcelo Tosattia28e4f52008-02-22 12:21:36 -050016#define KVM_FEATURE_CLOCKSOURCE 0
17#define KVM_FEATURE_NOP_IO_DELAY 1
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -050018#define KVM_FEATURE_MMU_OP 2
Glauber Costa0e6ac582010-05-11 12:17:42 -040019/* This indicates that the new set of kvmclock msrs
20 * are available. The use of 0x11 and 0x12 is deprecated
21 */
22#define KVM_FEATURE_CLOCKSOURCE2 3
Gleb Natapov344d9582010-10-14 11:22:50 +020023#define KVM_FEATURE_ASYNC_PF 4
Glauber Costa9ddabbe2011-07-11 15:28:13 -040024#define KVM_FEATURE_STEAL_TIME 5
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020025
Glauber Costa3a0d7252010-05-11 12:17:45 -040026/* The last 8 bits are used to indicate how to interpret the flags field
27 * in pvclock structure. If no bits are set, all flags are ignored.
28 */
29#define KVM_FEATURE_CLOCKSOURCE_STABLE_BIT 24
30
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020031#define MSR_KVM_WALL_CLOCK 0x11
32#define MSR_KVM_SYSTEM_TIME 0x12
Christian Borntraeger5f432382007-10-11 15:34:17 +020033
Glauber Costa4b6b35f2011-07-11 15:28:12 -040034#define KVM_MSR_ENABLED 1
Glauber Costa11c6bff2010-05-11 12:17:41 -040035/* Custom MSRs falls in the range 0x4b564d00-0x4b564dff */
36#define MSR_KVM_WALL_CLOCK_NEW 0x4b564d00
37#define MSR_KVM_SYSTEM_TIME_NEW 0x4b564d01
Gleb Natapov344d9582010-10-14 11:22:50 +020038#define MSR_KVM_ASYNC_PF_EN 0x4b564d02
Glauber Costa9ddabbe2011-07-11 15:28:13 -040039#define MSR_KVM_STEAL_TIME 0x4b564d03
40
41struct kvm_steal_time {
42 __u64 steal;
43 __u32 version;
44 __u32 flags;
45 __u32 pad[12];
46};
Glauber Costa11c6bff2010-05-11 12:17:41 -040047
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -050048#define KVM_MAX_MMU_OP_BATCH 32
49
Gleb Natapov344d9582010-10-14 11:22:50 +020050#define KVM_ASYNC_PF_ENABLED (1 << 0)
Gleb Natapov6adba522010-10-14 11:22:55 +020051#define KVM_ASYNC_PF_SEND_ALWAYS (1 << 1)
Gleb Natapov344d9582010-10-14 11:22:50 +020052
Marcelo Tosatti2f333bc2008-02-22 12:21:37 -050053/* Operations for KVM_HC_MMU_OP */
54#define KVM_MMU_OP_WRITE_PTE 1
55#define KVM_MMU_OP_FLUSH_TLB 2
56#define KVM_MMU_OP_RELEASE_PT 3
57
58/* Payload for KVM_HC_MMU_OP */
59struct kvm_mmu_op_header {
60 __u32 op;
61 __u32 pad;
62};
63
64struct kvm_mmu_op_write_pte {
65 struct kvm_mmu_op_header header;
66 __u64 pte_phys;
67 __u64 pte_val;
68};
69
70struct kvm_mmu_op_flush_tlb {
71 struct kvm_mmu_op_header header;
72};
73
74struct kvm_mmu_op_release_pt {
75 struct kvm_mmu_op_header header;
76 __u64 pt_phys;
77};
78
Gleb Natapov631bc482010-10-14 11:22:52 +020079#define KVM_PV_REASON_PAGE_NOT_PRESENT 1
80#define KVM_PV_REASON_PAGE_READY 2
81
Gleb Natapovfd10cde2010-10-14 11:22:51 +020082struct kvm_vcpu_pv_apf_data {
83 __u32 reason;
84 __u8 pad[60];
85 __u32 enabled;
86};
87
Christian Borntraeger5f432382007-10-11 15:34:17 +020088#ifdef __KERNEL__
89#include <asm/processor.h>
90
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020091extern void kvmclock_init(void);
Gleb Natapovca3f1012010-10-14 11:22:49 +020092extern int kvm_register_clock(char *txt);
Glauber de Oliveira Costa18068522008-02-15 17:52:47 -020093
94
Christian Borntraeger5f432382007-10-11 15:34:17 +020095/* This instruction is vmcall. On non-VT architectures, it will generate a
96 * trap that we will then rewrite to the appropriate instruction.
97 */
98#define KVM_HYPERCALL ".byte 0x0f,0x01,0xc1"
99
100/* For KVM hypercalls, a three-byte sequence of either the vmrun or the vmmrun
101 * instruction. The hypervisor may replace it with something else but only the
102 * instructions are guaranteed to be supported.
103 *
104 * Up to four arguments may be passed in rbx, rcx, rdx, and rsi respectively.
105 * The hypercall number should be placed in rax and the return value will be
106 * placed in rax. No other registers will be clobbered unless explicited
107 * noted by the particular hypercall.
108 */
109
110static inline long kvm_hypercall0(unsigned int nr)
111{
112 long ret;
113 asm volatile(KVM_HYPERCALL
114 : "=a"(ret)
Anthony Liguorica373932008-07-03 19:02:36 +0300115 : "a"(nr)
116 : "memory");
Christian Borntraeger5f432382007-10-11 15:34:17 +0200117 return ret;
118}
119
120static inline long kvm_hypercall1(unsigned int nr, unsigned long p1)
121{
122 long ret;
123 asm volatile(KVM_HYPERCALL
124 : "=a"(ret)
Anthony Liguorica373932008-07-03 19:02:36 +0300125 : "a"(nr), "b"(p1)
126 : "memory");
Christian Borntraeger5f432382007-10-11 15:34:17 +0200127 return ret;
128}
129
130static inline long kvm_hypercall2(unsigned int nr, unsigned long p1,
131 unsigned long p2)
132{
133 long ret;
134 asm volatile(KVM_HYPERCALL
135 : "=a"(ret)
Anthony Liguorica373932008-07-03 19:02:36 +0300136 : "a"(nr), "b"(p1), "c"(p2)
137 : "memory");
Christian Borntraeger5f432382007-10-11 15:34:17 +0200138 return ret;
139}
140
141static inline long kvm_hypercall3(unsigned int nr, unsigned long p1,
142 unsigned long p2, unsigned long p3)
143{
144 long ret;
145 asm volatile(KVM_HYPERCALL
146 : "=a"(ret)
Anthony Liguorica373932008-07-03 19:02:36 +0300147 : "a"(nr), "b"(p1), "c"(p2), "d"(p3)
148 : "memory");
Christian Borntraeger5f432382007-10-11 15:34:17 +0200149 return ret;
150}
151
152static inline long kvm_hypercall4(unsigned int nr, unsigned long p1,
153 unsigned long p2, unsigned long p3,
154 unsigned long p4)
155{
156 long ret;
157 asm volatile(KVM_HYPERCALL
158 : "=a"(ret)
Anthony Liguorica373932008-07-03 19:02:36 +0300159 : "a"(nr), "b"(p1), "c"(p2), "d"(p3), "S"(p4)
160 : "memory");
Christian Borntraeger5f432382007-10-11 15:34:17 +0200161 return ret;
162}
163
164static inline int kvm_para_available(void)
165{
166 unsigned int eax, ebx, ecx, edx;
167 char signature[13];
168
169 cpuid(KVM_CPUID_SIGNATURE, &eax, &ebx, &ecx, &edx);
170 memcpy(signature + 0, &ebx, 4);
171 memcpy(signature + 4, &ecx, 4);
172 memcpy(signature + 8, &edx, 4);
173 signature[12] = 0;
174
175 if (strcmp(signature, "KVMKVMKVM") == 0)
176 return 1;
177
178 return 0;
179}
180
181static inline unsigned int kvm_arch_para_features(void)
182{
183 return cpuid_eax(KVM_CPUID_FEATURES);
184}
185
Alexander Grafba492962010-07-29 14:47:56 +0200186#ifdef CONFIG_KVM_GUEST
187void __init kvm_guest_init(void);
Gleb Natapov631bc482010-10-14 11:22:52 +0200188void kvm_async_pf_task_wait(u32 token);
189void kvm_async_pf_task_wake(u32 token);
190u32 kvm_read_and_reset_pf_reason(void);
Alexander Grafba492962010-07-29 14:47:56 +0200191#else
192#define kvm_guest_init() do { } while (0)
Gleb Natapov631bc482010-10-14 11:22:52 +0200193#define kvm_async_pf_task_wait(T) do {} while(0)
194#define kvm_async_pf_task_wake(T) do {} while(0)
Jan Kiszkad4c90b02010-10-20 18:34:54 +0200195static inline u32 kvm_read_and_reset_pf_reason(void)
Gleb Natapov631bc482010-10-14 11:22:52 +0200196{
197 return 0;
198}
Christian Borntraeger5f432382007-10-11 15:34:17 +0200199#endif
200
Alexander Grafba492962010-07-29 14:47:56 +0200201#endif /* __KERNEL__ */
202
H. Peter Anvin1965aae2008-10-22 22:26:29 -0700203#endif /* _ASM_X86_KVM_PARA_H */