blob: 1452851ae2583b689f250a403ce6ee08075144c8 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
7 *
8 * Authors:
9 * Yaniv Kamay <yaniv@qumranet.com>
10 * Avi Kivity <avi@qumranet.com>
11 *
12 * This work is licensed under the terms of the GNU GPL, version 2. See
13 * the COPYING file in the top-level directory.
14 *
15 */
Avi Kivityedf88412007-12-16 11:02:48 +020016#include <linux/kvm_host.h>
17
Avi Kivitye4956062007-06-28 14:15:57 -040018#include "kvm_svm.h"
Eddie Dong85f455f2007-07-06 12:20:49 +030019#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080020#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030021#include "kvm_cache_regs.h"
Avi Kivitye4956062007-06-28 14:15:57 -040022
Avi Kivity6aa8b732006-12-10 02:21:36 -080023#include <linux/module.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020024#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080025#include <linux/vmalloc.h>
26#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040027#include <linux/sched.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080028
Avi Kivitye4956062007-06-28 14:15:57 -040029#include <asm/desc.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080030
Eduardo Habkost63d11422008-11-17 19:03:20 -020031#include <asm/virtext.h>
32
Avi Kivity4ecac3f2008-05-13 13:23:38 +030033#define __ex(x) __kvm_handle_fault_on_reboot(x)
34
Avi Kivity6aa8b732006-12-10 02:21:36 -080035MODULE_AUTHOR("Qumranet");
36MODULE_LICENSE("GPL");
37
38#define IOPM_ALLOC_ORDER 2
39#define MSRPM_ALLOC_ORDER 1
40
Avi Kivity6aa8b732006-12-10 02:21:36 -080041#define DR7_GD_MASK (1 << 13)
42#define DR6_BD_MASK (1 << 13)
Avi Kivity6aa8b732006-12-10 02:21:36 -080043
44#define SEG_TYPE_LDT 2
45#define SEG_TYPE_BUSY_TSS16 3
46
Joerg Roedel80b77062007-03-30 17:02:14 +030047#define SVM_FEATURE_NPT (1 << 0)
48#define SVM_FEATURE_LBRV (1 << 1)
Amit Shah94c935a12008-08-18 13:11:46 +030049#define SVM_FEATURE_SVML (1 << 2)
Joerg Roedel80b77062007-03-30 17:02:14 +030050
Joerg Roedel24e09cb2008-02-13 18:58:47 +010051#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
52
Joerg Roedel709ddeb2008-02-07 13:47:45 +010053/* enable NPT for AMD64 and X86 with PAE */
54#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
55static bool npt_enabled = true;
56#else
Joerg Roedele3da3ac2008-02-07 13:47:39 +010057static bool npt_enabled = false;
Joerg Roedel709ddeb2008-02-07 13:47:45 +010058#endif
Joerg Roedel6c7dac72008-02-07 13:47:40 +010059static int npt = 1;
60
61module_param(npt, int, S_IRUGO);
Joerg Roedele3da3ac2008-02-07 13:47:39 +010062
Avi Kivity04d2cc72007-09-10 18:10:54 +030063static void kvm_reput_irq(struct vcpu_svm *svm);
Joerg Roedel44874f82008-08-27 14:18:43 +020064static void svm_flush_tlb(struct kvm_vcpu *vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +030065
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040066static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
67{
Rusty Russellfb3f0f52007-07-27 17:16:56 +100068 return container_of(vcpu, struct vcpu_svm, vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -040069}
70
Harvey Harrison4866d5e2008-02-19 10:32:02 -080071static unsigned long iopm_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -080072
73struct kvm_ldttss_desc {
74 u16 limit0;
75 u16 base0;
76 unsigned base1 : 8, type : 5, dpl : 2, p : 1;
77 unsigned limit1 : 4, zero0 : 3, g : 1, base2 : 8;
78 u32 base3;
79 u32 zero1;
80} __attribute__((packed));
81
82struct svm_cpu_data {
83 int cpu;
84
Avi Kivity5008fdf2007-04-02 13:05:50 +030085 u64 asid_generation;
86 u32 max_asid;
87 u32 next_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -080088 struct kvm_ldttss_desc *tss_desc;
89
90 struct page *save_area;
91};
92
93static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
Joerg Roedel80b77062007-03-30 17:02:14 +030094static uint32_t svm_features;
Avi Kivity6aa8b732006-12-10 02:21:36 -080095
96struct svm_init_data {
97 int cpu;
98 int r;
99};
100
101static u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
102
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200103#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800104#define MSRS_RANGE_SIZE 2048
105#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
106
107#define MAX_INST_SIZE 15
108
Joerg Roedel80b77062007-03-30 17:02:14 +0300109static inline u32 svm_has(u32 feat)
110{
111 return svm_features & feat;
112}
113
Avi Kivity6aa8b732006-12-10 02:21:36 -0800114static inline u8 pop_irq(struct kvm_vcpu *vcpu)
115{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800116 int word_index = __ffs(vcpu->arch.irq_summary);
117 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800118 int irq = word_index * BITS_PER_LONG + bit_index;
119
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800120 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
121 if (!vcpu->arch.irq_pending[word_index])
122 clear_bit(word_index, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800123 return irq;
124}
125
126static inline void push_irq(struct kvm_vcpu *vcpu, u8 irq)
127{
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800128 set_bit(irq, vcpu->arch.irq_pending);
129 set_bit(irq / BITS_PER_LONG, &vcpu->arch.irq_summary);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800130}
131
132static inline void clgi(void)
133{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300134 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800135}
136
137static inline void stgi(void)
138{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300139 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800140}
141
142static inline void invlpga(unsigned long addr, u32 asid)
143{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300144 asm volatile (__ex(SVM_INVLPGA) :: "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800145}
146
147static inline unsigned long kvm_read_cr2(void)
148{
149 unsigned long cr2;
150
151 asm volatile ("mov %%cr2, %0" : "=r" (cr2));
152 return cr2;
153}
154
155static inline void kvm_write_cr2(unsigned long val)
156{
157 asm volatile ("mov %0, %%cr2" :: "r" (val));
158}
159
160static inline unsigned long read_dr6(void)
161{
162 unsigned long dr6;
163
164 asm volatile ("mov %%dr6, %0" : "=r" (dr6));
165 return dr6;
166}
167
168static inline void write_dr6(unsigned long val)
169{
170 asm volatile ("mov %0, %%dr6" :: "r" (val));
171}
172
173static inline unsigned long read_dr7(void)
174{
175 unsigned long dr7;
176
177 asm volatile ("mov %%dr7, %0" : "=r" (dr7));
178 return dr7;
179}
180
181static inline void write_dr7(unsigned long val)
182{
183 asm volatile ("mov %0, %%dr7" :: "r" (val));
184}
185
Avi Kivity6aa8b732006-12-10 02:21:36 -0800186static inline void force_new_asid(struct kvm_vcpu *vcpu)
187{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400188 to_svm(vcpu)->asid_generation--;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800189}
190
191static inline void flush_guest_tlb(struct kvm_vcpu *vcpu)
192{
193 force_new_asid(vcpu);
194}
195
196static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
197{
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100198 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600199 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800200
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400201 to_svm(vcpu)->vmcb->save.efer = efer | MSR_EFER_SVME_MASK;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800202 vcpu->arch.shadow_efer = efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800203}
204
Avi Kivity298101d2007-11-25 13:41:11 +0200205static void svm_queue_exception(struct kvm_vcpu *vcpu, unsigned nr,
206 bool has_error_code, u32 error_code)
207{
208 struct vcpu_svm *svm = to_svm(vcpu);
209
210 svm->vmcb->control.event_inj = nr
211 | SVM_EVTINJ_VALID
212 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
213 | SVM_EVTINJ_TYPE_EXEPT;
214 svm->vmcb->control.event_inj_err = error_code;
215}
216
217static bool svm_exception_injected(struct kvm_vcpu *vcpu)
218{
219 struct vcpu_svm *svm = to_svm(vcpu);
220
221 return !(svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID);
222}
223
Avi Kivity6aa8b732006-12-10 02:21:36 -0800224static int is_external_interrupt(u32 info)
225{
226 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
227 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
228}
229
230static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
231{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400232 struct vcpu_svm *svm = to_svm(vcpu);
233
234 if (!svm->next_rip) {
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800235 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800236 return;
237 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300238 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
239 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
240 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800241
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300242 kvm_rip_write(vcpu, svm->next_rip);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400243 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -0800244
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800245 vcpu->arch.interrupt_window_open = 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800246}
247
248static int has_svm(void)
249{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200250 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800251
Eduardo Habkost63d11422008-11-17 19:03:20 -0200252 if (!cpu_has_svm(&msg)) {
253 printk(KERN_INFO "has_svn: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800254 return 0;
255 }
256
Avi Kivity6aa8b732006-12-10 02:21:36 -0800257 return 1;
258}
259
260static void svm_hardware_disable(void *garbage)
261{
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200262 cpu_svm_disable();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800263}
264
265static void svm_hardware_enable(void *garbage)
266{
267
268 struct svm_cpu_data *svm_data;
269 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800270 struct desc_ptr gdt_descr;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271 struct desc_struct *gdt;
272 int me = raw_smp_processor_id();
273
274 if (!has_svm()) {
275 printk(KERN_ERR "svm_cpu_init: err EOPNOTSUPP on %d\n", me);
276 return;
277 }
278 svm_data = per_cpu(svm_data, me);
279
280 if (!svm_data) {
281 printk(KERN_ERR "svm_cpu_init: svm_data is NULL on %d\n",
282 me);
283 return;
284 }
285
286 svm_data->asid_generation = 1;
287 svm_data->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
288 svm_data->next_asid = svm_data->max_asid + 1;
289
Mike Dayd77c26f2007-10-08 09:02:08 -0400290 asm volatile ("sgdt %0" : "=m"(gdt_descr));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800291 gdt = (struct desc_struct *)gdt_descr.address;
292 svm_data->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
293
294 rdmsrl(MSR_EFER, efer);
295 wrmsrl(MSR_EFER, efer | MSR_EFER_SVME_MASK);
296
297 wrmsrl(MSR_VM_HSAVE_PA,
298 page_to_pfn(svm_data->save_area) << PAGE_SHIFT);
299}
300
Joerg Roedel0da1db752008-07-02 16:02:11 +0200301static void svm_cpu_uninit(int cpu)
302{
303 struct svm_cpu_data *svm_data
304 = per_cpu(svm_data, raw_smp_processor_id());
305
306 if (!svm_data)
307 return;
308
309 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
310 __free_page(svm_data->save_area);
311 kfree(svm_data);
312}
313
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314static int svm_cpu_init(int cpu)
315{
316 struct svm_cpu_data *svm_data;
317 int r;
318
319 svm_data = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
320 if (!svm_data)
321 return -ENOMEM;
322 svm_data->cpu = cpu;
323 svm_data->save_area = alloc_page(GFP_KERNEL);
324 r = -ENOMEM;
325 if (!svm_data->save_area)
326 goto err_1;
327
328 per_cpu(svm_data, cpu) = svm_data;
329
330 return 0;
331
332err_1:
333 kfree(svm_data);
334 return r;
335
336}
337
Rusty Russellbfc733a2007-07-31 20:42:42 +1000338static void set_msr_interception(u32 *msrpm, unsigned msr,
339 int read, int write)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800340{
341 int i;
342
343 for (i = 0; i < NUM_MSR_MAPS; i++) {
344 if (msr >= msrpm_ranges[i] &&
345 msr < msrpm_ranges[i] + MSRS_IN_RANGE) {
346 u32 msr_offset = (i * MSRS_IN_RANGE + msr -
347 msrpm_ranges[i]) * 2;
348
349 u32 *base = msrpm + (msr_offset / 32);
350 u32 msr_shift = msr_offset % 32;
351 u32 mask = ((write) ? 0 : 2) | ((read) ? 0 : 1);
352 *base = (*base & ~(0x3 << msr_shift)) |
353 (mask << msr_shift);
Rusty Russellbfc733a2007-07-31 20:42:42 +1000354 return;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800355 }
356 }
Rusty Russellbfc733a2007-07-31 20:42:42 +1000357 BUG();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800358}
359
Joerg Roedelf65c2292008-02-13 18:58:46 +0100360static void svm_vcpu_init_msrpm(u32 *msrpm)
361{
362 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
363
364#ifdef CONFIG_X86_64
365 set_msr_interception(msrpm, MSR_GS_BASE, 1, 1);
366 set_msr_interception(msrpm, MSR_FS_BASE, 1, 1);
367 set_msr_interception(msrpm, MSR_KERNEL_GS_BASE, 1, 1);
368 set_msr_interception(msrpm, MSR_LSTAR, 1, 1);
369 set_msr_interception(msrpm, MSR_CSTAR, 1, 1);
370 set_msr_interception(msrpm, MSR_SYSCALL_MASK, 1, 1);
371#endif
372 set_msr_interception(msrpm, MSR_K6_STAR, 1, 1);
373 set_msr_interception(msrpm, MSR_IA32_SYSENTER_CS, 1, 1);
374 set_msr_interception(msrpm, MSR_IA32_SYSENTER_ESP, 1, 1);
375 set_msr_interception(msrpm, MSR_IA32_SYSENTER_EIP, 1, 1);
376}
377
Joerg Roedel24e09cb2008-02-13 18:58:47 +0100378static void svm_enable_lbrv(struct vcpu_svm *svm)
379{
380 u32 *msrpm = svm->msrpm;
381
382 svm->vmcb->control.lbr_ctl = 1;
383 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
384 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
385 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
386 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
387}
388
389static void svm_disable_lbrv(struct vcpu_svm *svm)
390{
391 u32 *msrpm = svm->msrpm;
392
393 svm->vmcb->control.lbr_ctl = 0;
394 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
395 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
396 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
397 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
398}
399
Avi Kivity6aa8b732006-12-10 02:21:36 -0800400static __init int svm_hardware_setup(void)
401{
402 int cpu;
403 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100404 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800405 int r;
406
Avi Kivity6aa8b732006-12-10 02:21:36 -0800407 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
408
409 if (!iopm_pages)
410 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +0300411
412 iopm_va = page_address(iopm_pages);
413 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
414 clear_bit(0x80, iopm_va); /* allow direct access to PC debug port */
Avi Kivity6aa8b732006-12-10 02:21:36 -0800415 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
416
Joerg Roedel50a37eb2008-01-31 14:57:38 +0100417 if (boot_cpu_has(X86_FEATURE_NX))
418 kvm_enable_efer_bits(EFER_NX);
419
Avi Kivity6aa8b732006-12-10 02:21:36 -0800420 for_each_online_cpu(cpu) {
421 r = svm_cpu_init(cpu);
422 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +0100423 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800424 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +0100425
426 svm_features = cpuid_edx(SVM_CPUID_FUNC);
427
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100428 if (!svm_has(SVM_FEATURE_NPT))
429 npt_enabled = false;
430
Joerg Roedel6c7dac72008-02-07 13:47:40 +0100431 if (npt_enabled && !npt) {
432 printk(KERN_INFO "kvm: Nested Paging disabled\n");
433 npt_enabled = false;
434 }
435
Joerg Roedel18552672008-02-07 13:47:41 +0100436 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100437 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +0100438 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +0200439 } else
440 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +0100441
Avi Kivity6aa8b732006-12-10 02:21:36 -0800442 return 0;
443
Joerg Roedelf65c2292008-02-13 18:58:46 +0100444err:
Avi Kivity6aa8b732006-12-10 02:21:36 -0800445 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
446 iopm_base = 0;
447 return r;
448}
449
450static __exit void svm_hardware_unsetup(void)
451{
Joerg Roedel0da1db752008-07-02 16:02:11 +0200452 int cpu;
453
454 for_each_online_cpu(cpu)
455 svm_cpu_uninit(cpu);
456
Avi Kivity6aa8b732006-12-10 02:21:36 -0800457 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +0100458 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800459}
460
461static void init_seg(struct vmcb_seg *seg)
462{
463 seg->selector = 0;
464 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
465 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
466 seg->limit = 0xffff;
467 seg->base = 0;
468}
469
470static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
471{
472 seg->selector = 0;
473 seg->attrib = SVM_SELECTOR_P_MASK | type;
474 seg->limit = 0xffff;
475 seg->base = 0;
476}
477
Joerg Roedele6101a92008-02-13 18:58:45 +0100478static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800479{
Joerg Roedele6101a92008-02-13 18:58:45 +0100480 struct vmcb_control_area *control = &svm->vmcb->control;
481 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800482
483 control->intercept_cr_read = INTERCEPT_CR0_MASK |
484 INTERCEPT_CR3_MASK |
Joerg Roedel649d6862008-04-16 16:51:15 +0200485 INTERCEPT_CR4_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800486
487 control->intercept_cr_write = INTERCEPT_CR0_MASK |
488 INTERCEPT_CR3_MASK |
Avi Kivity80a81192007-12-06 19:50:00 +0200489 INTERCEPT_CR4_MASK |
490 INTERCEPT_CR8_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800491
492 control->intercept_dr_read = INTERCEPT_DR0_MASK |
493 INTERCEPT_DR1_MASK |
494 INTERCEPT_DR2_MASK |
495 INTERCEPT_DR3_MASK;
496
497 control->intercept_dr_write = INTERCEPT_DR0_MASK |
498 INTERCEPT_DR1_MASK |
499 INTERCEPT_DR2_MASK |
500 INTERCEPT_DR3_MASK |
501 INTERCEPT_DR5_MASK |
502 INTERCEPT_DR7_MASK;
503
Anthony Liguori7aa81cc2007-09-17 14:57:50 -0500504 control->intercept_exceptions = (1 << PF_VECTOR) |
Joerg Roedel53371b52008-04-09 14:15:30 +0200505 (1 << UD_VECTOR) |
506 (1 << MC_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800507
508
509 control->intercept = (1ULL << INTERCEPT_INTR) |
510 (1ULL << INTERCEPT_NMI) |
Joerg Roedel01525272007-02-19 14:37:47 +0200511 (1ULL << INTERCEPT_SMI) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800512 (1ULL << INTERCEPT_CPUID) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200513 (1ULL << INTERCEPT_INVD) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800514 (1ULL << INTERCEPT_HLT) |
Marcelo Tosattia7052892008-09-23 13:18:35 -0300515 (1ULL << INTERCEPT_INVLPG) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800516 (1ULL << INTERCEPT_INVLPGA) |
517 (1ULL << INTERCEPT_IOIO_PROT) |
518 (1ULL << INTERCEPT_MSR_PROT) |
519 (1ULL << INTERCEPT_TASK_SWITCH) |
Joerg Roedel46fe4dd2007-01-26 00:56:42 -0800520 (1ULL << INTERCEPT_SHUTDOWN) |
Avi Kivity6aa8b732006-12-10 02:21:36 -0800521 (1ULL << INTERCEPT_VMRUN) |
522 (1ULL << INTERCEPT_VMMCALL) |
523 (1ULL << INTERCEPT_VMLOAD) |
524 (1ULL << INTERCEPT_VMSAVE) |
525 (1ULL << INTERCEPT_STGI) |
526 (1ULL << INTERCEPT_CLGI) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100527 (1ULL << INTERCEPT_SKINIT) |
Avi Kivitycf5a94d2007-10-28 16:11:58 +0200528 (1ULL << INTERCEPT_WBINVD) |
Joerg Roedel916ce232007-03-21 19:47:00 +0100529 (1ULL << INTERCEPT_MONITOR) |
530 (1ULL << INTERCEPT_MWAIT);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800531
532 control->iopm_base_pa = iopm_base;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100533 control->msrpm_base_pa = __pa(svm->msrpm);
Avi Kivity0cc50642007-03-25 12:07:27 +0200534 control->tsc_offset = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800535 control->int_ctl = V_INTR_MASKING_MASK;
536
537 init_seg(&save->es);
538 init_seg(&save->ss);
539 init_seg(&save->ds);
540 init_seg(&save->fs);
541 init_seg(&save->gs);
542
543 save->cs.selector = 0xf000;
544 /* Executable/Readable Code Segment */
545 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
546 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
547 save->cs.limit = 0xffff;
Avi Kivityd92899a2007-02-12 00:54:38 -0800548 /*
549 * cs.base should really be 0xffff0000, but vmx can't handle that, so
550 * be consistent with it.
551 *
552 * Replace when we have real mode working for vmx.
553 */
554 save->cs.base = 0xf0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800555
556 save->gdtr.limit = 0xffff;
557 save->idtr.limit = 0xffff;
558
559 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
560 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
561
562 save->efer = MSR_EFER_SVME_MASK;
Mike Dayd77c26f2007-10-08 09:02:08 -0400563 save->dr6 = 0xffff0ff0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564 save->dr7 = 0x400;
565 save->rflags = 2;
566 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300567 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800568
569 /*
570 * cr0 val on cpu init should be 0x60000010, we enable cpu
571 * cache by default. the orderly way is to enable cache in bios.
572 */
Rusty Russell707d92fa2007-07-17 23:19:08 +1000573 save->cr0 = 0x00000010 | X86_CR0_PG | X86_CR0_WP;
Rusty Russell66aee912007-07-17 23:34:16 +1000574 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800575 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100576
577 if (npt_enabled) {
578 /* Setup VMCB for Nested Paging */
579 control->nested_ctl = 1;
Marcelo Tosattia7052892008-09-23 13:18:35 -0300580 control->intercept &= ~((1ULL << INTERCEPT_TASK_SWITCH) |
581 (1ULL << INTERCEPT_INVLPG));
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100582 control->intercept_exceptions &= ~(1 << PF_VECTOR);
583 control->intercept_cr_read &= ~(INTERCEPT_CR0_MASK|
584 INTERCEPT_CR3_MASK);
585 control->intercept_cr_write &= ~(INTERCEPT_CR0_MASK|
586 INTERCEPT_CR3_MASK);
587 save->g_pat = 0x0007040600070406ULL;
588 /* enable caching because the QEMU Bios doesn't enable it */
589 save->cr0 = X86_CR0_ET;
590 save->cr3 = 0;
591 save->cr4 = 0;
592 }
Avi Kivitya79d2f12008-04-14 13:10:21 +0300593 force_new_asid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800594}
595
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200596static int svm_vcpu_reset(struct kvm_vcpu *vcpu)
Avi Kivity04d2cc72007-09-10 18:10:54 +0300597{
598 struct vcpu_svm *svm = to_svm(vcpu);
599
Joerg Roedele6101a92008-02-13 18:58:45 +0100600 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +0200601
602 if (vcpu->vcpu_id != 0) {
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300603 kvm_rip_write(vcpu, 0);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800604 svm->vmcb->save.cs.base = svm->vcpu.arch.sipi_vector << 12;
605 svm->vmcb->save.cs.selector = svm->vcpu.arch.sipi_vector << 8;
Avi Kivity70433382007-11-07 12:57:23 +0200606 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300607 vcpu->arch.regs_avail = ~0;
608 vcpu->arch.regs_dirty = ~0;
Avi Kivitye00c8cf2007-10-21 11:00:39 +0200609
610 return 0;
Avi Kivity04d2cc72007-09-10 18:10:54 +0300611}
612
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000613static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800614{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400615 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800616 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +0100617 struct page *msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000618 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800619
Rusty Russellc16f8622007-07-30 21:12:19 +1000620 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000621 if (!svm) {
622 err = -ENOMEM;
623 goto out;
624 }
625
626 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
627 if (err)
628 goto free_svm;
629
Avi Kivity6aa8b732006-12-10 02:21:36 -0800630 page = alloc_page(GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000631 if (!page) {
632 err = -ENOMEM;
633 goto uninit;
634 }
Avi Kivity6aa8b732006-12-10 02:21:36 -0800635
Joerg Roedelf65c2292008-02-13 18:58:46 +0100636 err = -ENOMEM;
637 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
638 if (!msrpm_pages)
639 goto uninit;
640 svm->msrpm = page_address(msrpm_pages);
641 svm_vcpu_init_msrpm(svm->msrpm);
642
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400643 svm->vmcb = page_address(page);
644 clear_page(svm->vmcb);
645 svm->vmcb_pa = page_to_pfn(page) << PAGE_SHIFT;
646 svm->asid_generation = 0;
647 memset(svm->db_regs, 0, sizeof(svm->db_regs));
Joerg Roedele6101a92008-02-13 18:58:45 +0100648 init_vmcb(svm);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400649
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000650 fx_init(&svm->vcpu);
651 svm->vcpu.fpu_active = 1;
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800652 svm->vcpu.arch.apic_base = 0xfee00000 | MSR_IA32_APICBASE_ENABLE;
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000653 if (svm->vcpu.vcpu_id == 0)
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800654 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800655
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000656 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -0800657
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000658uninit:
659 kvm_vcpu_uninit(&svm->vcpu);
660free_svm:
Rusty Russella4770342007-08-01 14:46:11 +1000661 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000662out:
663 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800664}
665
666static void svm_free_vcpu(struct kvm_vcpu *vcpu)
667{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400668 struct vcpu_svm *svm = to_svm(vcpu);
669
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000670 __free_page(pfn_to_page(svm->vmcb_pa >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +0100671 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +1000672 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +1000673 kmem_cache_free(kvm_vcpu_cache, svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800674}
675
Avi Kivity15ad7142007-07-11 18:17:21 +0300676static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800677{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400678 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity15ad7142007-07-11 18:17:21 +0300679 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +0200680
Avi Kivity0cc50642007-03-25 12:07:27 +0200681 if (unlikely(cpu != vcpu->cpu)) {
682 u64 tsc_this, delta;
683
684 /*
685 * Make sure that the guest sees a monotonically
686 * increasing TSC.
687 */
688 rdtscll(tsc_this);
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800689 delta = vcpu->arch.host_tsc - tsc_this;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400690 svm->vmcb->control.tsc_offset += delta;
Avi Kivity0cc50642007-03-25 12:07:27 +0200691 vcpu->cpu = cpu;
Marcelo Tosatti2f599712008-05-27 12:10:20 -0300692 kvm_migrate_timers(vcpu);
Avi Kivity0cc50642007-03-25 12:07:27 +0200693 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300694
695 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400696 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800697}
698
699static void svm_vcpu_put(struct kvm_vcpu *vcpu)
700{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400701 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300702 int i;
703
Avi Kivitye1beb1d2007-11-18 13:50:24 +0200704 ++vcpu->stat.host_state_reload;
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300705 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400706 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300707
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800708 rdtscll(vcpu->arch.host_tsc);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800709}
710
Avi Kivity6aa8b732006-12-10 02:21:36 -0800711static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
712{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400713 return to_svm(vcpu)->vmcb->save.rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800714}
715
716static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
717{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400718 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800719}
720
721static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
722{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400723 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800724
725 switch (seg) {
726 case VCPU_SREG_CS: return &save->cs;
727 case VCPU_SREG_DS: return &save->ds;
728 case VCPU_SREG_ES: return &save->es;
729 case VCPU_SREG_FS: return &save->fs;
730 case VCPU_SREG_GS: return &save->gs;
731 case VCPU_SREG_SS: return &save->ss;
732 case VCPU_SREG_TR: return &save->tr;
733 case VCPU_SREG_LDTR: return &save->ldtr;
734 }
735 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +0000736 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800737}
738
739static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
740{
741 struct vmcb_seg *s = svm_seg(vcpu, seg);
742
743 return s->base;
744}
745
746static void svm_get_segment(struct kvm_vcpu *vcpu,
747 struct kvm_segment *var, int seg)
748{
749 struct vmcb_seg *s = svm_seg(vcpu, seg);
750
751 var->base = s->base;
752 var->limit = s->limit;
753 var->selector = s->selector;
754 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
755 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
756 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
757 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
758 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
759 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
760 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
761 var->g = (s->attrib >> SVM_SELECTOR_G_SHIFT) & 1;
Amit Shah25022ac2008-10-27 09:04:17 +0000762
763 /*
764 * SVM always stores 0 for the 'G' bit in the CS selector in
765 * the VMCB on a VMEXIT. This hurts cross-vendor migration:
766 * Intel's VMENTRY has a check on the 'G' bit.
767 */
768 if (seg == VCPU_SREG_CS)
769 var->g = s->limit > 0xfffff;
770
Amit Shahc0d09822008-10-27 09:04:18 +0000771 /*
772 * Work around a bug where the busy flag in the tr selector
773 * isn't exposed
774 */
775 if (seg == VCPU_SREG_TR)
776 var->type |= 0x2;
777
Avi Kivity6aa8b732006-12-10 02:21:36 -0800778 var->unusable = !var->present;
779}
780
Izik Eidus2e4d2652008-03-24 19:38:34 +0200781static int svm_get_cpl(struct kvm_vcpu *vcpu)
782{
783 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
784
785 return save->cpl;
786}
787
Avi Kivity6aa8b732006-12-10 02:21:36 -0800788static void svm_get_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
789{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400790 struct vcpu_svm *svm = to_svm(vcpu);
791
792 dt->limit = svm->vmcb->save.idtr.limit;
793 dt->base = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800794}
795
796static void svm_set_idt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
797{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400798 struct vcpu_svm *svm = to_svm(vcpu);
799
800 svm->vmcb->save.idtr.limit = dt->limit;
801 svm->vmcb->save.idtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800802}
803
804static void svm_get_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
805{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400806 struct vcpu_svm *svm = to_svm(vcpu);
807
808 dt->limit = svm->vmcb->save.gdtr.limit;
809 dt->base = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800810}
811
812static void svm_set_gdt(struct kvm_vcpu *vcpu, struct descriptor_table *dt)
813{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400814 struct vcpu_svm *svm = to_svm(vcpu);
815
816 svm->vmcb->save.gdtr.limit = dt->limit;
817 svm->vmcb->save.gdtr.base = dt->base ;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800818}
819
Anthony Liguori25c4c272007-04-27 09:29:21 +0300820static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -0800821{
822}
823
Avi Kivity6aa8b732006-12-10 02:21:36 -0800824static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
825{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400826 struct vcpu_svm *svm = to_svm(vcpu);
827
Avi Kivity05b3e0c2006-12-13 00:33:45 -0800828#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800829 if (vcpu->arch.shadow_efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +1000830 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800831 vcpu->arch.shadow_efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600832 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800833 }
834
Mike Dayd77c26f2007-10-08 09:02:08 -0400835 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800836 vcpu->arch.shadow_efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600837 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838 }
839 }
840#endif
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100841 if (npt_enabled)
842 goto set;
843
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800844 if ((vcpu->arch.cr0 & X86_CR0_TS) && !(cr0 & X86_CR0_TS)) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400845 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Anthony Liguori7807fa62007-04-23 09:17:21 -0500846 vcpu->fpu_active = 1;
847 }
848
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800849 vcpu->arch.cr0 = cr0;
Rusty Russell707d92fa2007-07-17 23:19:08 +1000850 cr0 |= X86_CR0_PG | X86_CR0_WP;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100851 if (!vcpu->fpu_active) {
852 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
Joerg Roedel334df502008-01-21 13:09:33 +0100853 cr0 |= X86_CR0_TS;
Joerg Roedel6b390b62008-01-29 13:01:27 +0100854 }
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100855set:
856 /*
857 * re-enable caching here because the QEMU bios
858 * does not do it - this results in some delay at
859 * reboot
860 */
861 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400862 svm->vmcb->save.cr0 = cr0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800863}
864
865static void svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
866{
Joerg Roedel6394b642008-04-09 14:15:29 +0200867 unsigned long host_cr4_mce = read_cr4() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +0200868 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
869
870 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
871 force_new_asid(vcpu);
Joerg Roedel6394b642008-04-09 14:15:29 +0200872
Joerg Roedelec077262008-04-09 14:15:28 +0200873 vcpu->arch.cr4 = cr4;
874 if (!npt_enabled)
875 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +0200876 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +0200877 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800878}
879
880static void svm_set_segment(struct kvm_vcpu *vcpu,
881 struct kvm_segment *var, int seg)
882{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400883 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800884 struct vmcb_seg *s = svm_seg(vcpu, seg);
885
886 s->base = var->base;
887 s->limit = var->limit;
888 s->selector = var->selector;
889 if (var->unusable)
890 s->attrib = 0;
891 else {
892 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
893 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
894 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
895 s->attrib |= (var->present & 1) << SVM_SELECTOR_P_SHIFT;
896 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
897 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
898 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
899 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
900 }
901 if (seg == VCPU_SREG_CS)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400902 svm->vmcb->save.cpl
903 = (svm->vmcb->save.cs.attrib
Avi Kivity6aa8b732006-12-10 02:21:36 -0800904 >> SVM_SELECTOR_DPL_SHIFT) & 3;
905
906}
907
Avi Kivity6aa8b732006-12-10 02:21:36 -0800908static int svm_guest_debug(struct kvm_vcpu *vcpu, struct kvm_debug_guest *dbg)
909{
910 return -EOPNOTSUPP;
911}
912
Eddie Dong2a8067f2007-08-06 16:29:07 +0300913static int svm_get_irq(struct kvm_vcpu *vcpu)
914{
915 struct vcpu_svm *svm = to_svm(vcpu);
916 u32 exit_int_info = svm->vmcb->control.exit_int_info;
917
918 if (is_external_interrupt(exit_int_info))
919 return exit_int_info & SVM_EVTINJ_VEC_MASK;
920 return -1;
921}
922
Avi Kivity6aa8b732006-12-10 02:21:36 -0800923static void load_host_msrs(struct kvm_vcpu *vcpu)
924{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300925#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400926 wrmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300927#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800928}
929
930static void save_host_msrs(struct kvm_vcpu *vcpu)
931{
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300932#ifdef CONFIG_X86_64
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400933 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host_gs_base);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +0300934#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -0800935}
936
Rusty Russelle756fc62007-07-30 20:07:08 +1000937static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *svm_data)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800938{
939 if (svm_data->next_asid > svm_data->max_asid) {
940 ++svm_data->asid_generation;
941 svm_data->next_asid = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400942 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800943 }
944
Rusty Russelle756fc62007-07-30 20:07:08 +1000945 svm->vcpu.cpu = svm_data->cpu;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400946 svm->asid_generation = svm_data->asid_generation;
947 svm->vmcb->control.asid = svm_data->next_asid++;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800948}
949
Avi Kivity6aa8b732006-12-10 02:21:36 -0800950static unsigned long svm_get_dr(struct kvm_vcpu *vcpu, int dr)
951{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +0200952 unsigned long val = to_svm(vcpu)->db_regs[dr];
953 KVMTRACE_2D(DR_READ, vcpu, (u32)dr, (u32)val, handler);
954 return val;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800955}
956
957static void svm_set_dr(struct kvm_vcpu *vcpu, int dr, unsigned long value,
958 int *exception)
959{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400960 struct vcpu_svm *svm = to_svm(vcpu);
961
Avi Kivity6aa8b732006-12-10 02:21:36 -0800962 *exception = 0;
963
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400964 if (svm->vmcb->save.dr7 & DR7_GD_MASK) {
965 svm->vmcb->save.dr7 &= ~DR7_GD_MASK;
966 svm->vmcb->save.dr6 |= DR6_BD_MASK;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800967 *exception = DB_VECTOR;
968 return;
969 }
970
971 switch (dr) {
972 case 0 ... 3:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400973 svm->db_regs[dr] = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800974 return;
975 case 4 ... 5:
Zhang Xiantaoad312c72007-12-13 23:50:52 +0800976 if (vcpu->arch.cr4 & X86_CR4_DE) {
Avi Kivity6aa8b732006-12-10 02:21:36 -0800977 *exception = UD_VECTOR;
978 return;
979 }
980 case 7: {
981 if (value & ~((1ULL << 32) - 1)) {
982 *exception = GP_VECTOR;
983 return;
984 }
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400985 svm->vmcb->save.dr7 = value;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800986 return;
987 }
988 default:
989 printk(KERN_DEBUG "%s: unexpected dr %u\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -0800990 __func__, dr);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800991 *exception = UD_VECTOR;
992 return;
993 }
994}
995
Rusty Russelle756fc62007-07-30 20:07:08 +1000996static int pf_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800997{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400998 u32 exit_int_info = svm->vmcb->control.exit_int_info;
Rusty Russelle756fc62007-07-30 20:07:08 +1000999 struct kvm *kvm = svm->vcpu.kvm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001000 u64 fault_address;
1001 u32 error_code;
Avi Kivity577bdc42008-07-19 08:57:05 +03001002 bool event_injection = false;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001003
Eddie Dong85f455f2007-07-06 12:20:49 +03001004 if (!irqchip_in_kernel(kvm) &&
Avi Kivity577bdc42008-07-19 08:57:05 +03001005 is_external_interrupt(exit_int_info)) {
1006 event_injection = true;
Rusty Russelle756fc62007-07-30 20:07:08 +10001007 push_irq(&svm->vcpu, exit_int_info & SVM_EVTINJ_VEC_MASK);
Avi Kivity577bdc42008-07-19 08:57:05 +03001008 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001009
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001010 fault_address = svm->vmcb->control.exit_info_2;
1011 error_code = svm->vmcb->control.exit_info_1;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001012
1013 if (!npt_enabled)
1014 KVMTRACE_3D(PAGE_FAULT, &svm->vcpu, error_code,
1015 (u32)fault_address, (u32)(fault_address >> 32),
1016 handler);
Joerg Roedeld2ebb412008-04-30 17:56:04 +02001017 else
1018 KVMTRACE_3D(TDP_FAULT, &svm->vcpu, error_code,
1019 (u32)fault_address, (u32)(fault_address >> 32),
1020 handler);
Joerg Roedel44874f82008-08-27 14:18:43 +02001021 /*
1022 * FIXME: Tis shouldn't be necessary here, but there is a flush
1023 * missing in the MMU code. Until we find this bug, flush the
1024 * complete TLB here on an NPF
1025 */
1026 if (npt_enabled)
1027 svm_flush_tlb(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001028
Avi Kivity48d15032008-08-28 18:27:15 +03001029 if (!npt_enabled && event_injection)
Avi Kivity577bdc42008-07-19 08:57:05 +03001030 kvm_mmu_unprotect_page_virt(&svm->vcpu, fault_address);
Avi Kivity30677142007-10-28 18:48:59 +02001031 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001032}
1033
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001034static int ud_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1035{
1036 int er;
1037
Sheng Yang571008d2008-01-02 14:49:22 +08001038 er = emulate_instruction(&svm->vcpu, kvm_run, 0, 0, EMULTYPE_TRAP_UD);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001039 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001040 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001041 return 1;
1042}
1043
Rusty Russelle756fc62007-07-30 20:07:08 +10001044static int nm_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Anthony Liguori7807fa62007-04-23 09:17:21 -05001045{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001046 svm->vmcb->control.intercept_exceptions &= ~(1 << NM_VECTOR);
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001047 if (!(svm->vcpu.arch.cr0 & X86_CR0_TS))
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001048 svm->vmcb->save.cr0 &= ~X86_CR0_TS;
Rusty Russelle756fc62007-07-30 20:07:08 +10001049 svm->vcpu.fpu_active = 1;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001050
1051 return 1;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001052}
1053
Joerg Roedel53371b52008-04-09 14:15:30 +02001054static int mc_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1055{
1056 /*
1057 * On an #MC intercept the MCE handler is not called automatically in
1058 * the host. So do it by hand here.
1059 */
1060 asm volatile (
1061 "int $0x12\n");
1062 /* not sure if we ever come back to this point */
1063
1064 return 1;
1065}
1066
Rusty Russelle756fc62007-07-30 20:07:08 +10001067static int shutdown_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001068{
1069 /*
1070 * VMCB is undefined after a SHUTDOWN intercept
1071 * so reinitialize it.
1072 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001073 clear_page(svm->vmcb);
Joerg Roedele6101a92008-02-13 18:58:45 +01001074 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001075
1076 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
1077 return 0;
1078}
1079
Rusty Russelle756fc62007-07-30 20:07:08 +10001080static int io_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001081{
Mike Dayd77c26f2007-10-08 09:02:08 -04001082 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Avi Kivity039576c2007-03-20 12:46:50 +02001083 int size, down, in, string, rep;
1084 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001085
Rusty Russelle756fc62007-07-30 20:07:08 +10001086 ++svm->vcpu.stat.io_exits;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001087
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001088 svm->next_rip = svm->vmcb->control.exit_info_2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001089
Laurent Viviere70669a2007-08-05 10:36:40 +03001090 string = (io_info & SVM_IOIO_STR_MASK) != 0;
1091
1092 if (string) {
Laurent Vivier34273182007-09-18 11:27:37 +02001093 if (emulate_instruction(&svm->vcpu,
1094 kvm_run, 0, 0, 0) == EMULATE_DO_MMIO)
Laurent Viviere70669a2007-08-05 10:36:40 +03001095 return 0;
1096 return 1;
1097 }
1098
Avi Kivity039576c2007-03-20 12:46:50 +02001099 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
1100 port = io_info >> 16;
1101 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Avi Kivity039576c2007-03-20 12:46:50 +02001102 rep = (io_info & SVM_IOIO_REP_MASK) != 0;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001103 down = (svm->vmcb->save.rflags & X86_EFLAGS_DF) != 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001104
Guillaume Thouvenine93f36b2008-10-28 10:51:30 +01001105 skip_emulated_instruction(&svm->vcpu);
Laurent Vivier3090dd72007-08-05 10:43:32 +03001106 return kvm_emulate_pio(&svm->vcpu, kvm_run, in, size, port);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001107}
1108
Joerg Roedelc47f0982008-04-30 17:56:00 +02001109static int nmi_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1110{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001111 KVMTRACE_0D(NMI, &svm->vcpu, handler);
Joerg Roedelc47f0982008-04-30 17:56:00 +02001112 return 1;
1113}
1114
Joerg Roedela0698052008-04-30 17:56:01 +02001115static int intr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1116{
1117 ++svm->vcpu.stat.irq_exits;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001118 KVMTRACE_0D(INTR, &svm->vcpu, handler);
Joerg Roedela0698052008-04-30 17:56:01 +02001119 return 1;
1120}
1121
Rusty Russelle756fc62007-07-30 20:07:08 +10001122static int nop_on_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001123{
1124 return 1;
1125}
1126
Rusty Russelle756fc62007-07-30 20:07:08 +10001127static int halt_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001128{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001129 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10001130 skip_emulated_instruction(&svm->vcpu);
1131 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001132}
1133
Rusty Russelle756fc62007-07-30 20:07:08 +10001134static int vmmcall_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity02e235b2007-02-19 14:37:47 +02001135{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001136 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Rusty Russelle756fc62007-07-30 20:07:08 +10001137 skip_emulated_instruction(&svm->vcpu);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001138 kvm_emulate_hypercall(&svm->vcpu);
1139 return 1;
Avi Kivity02e235b2007-02-19 14:37:47 +02001140}
1141
Rusty Russelle756fc62007-07-30 20:07:08 +10001142static int invalid_op_interception(struct vcpu_svm *svm,
1143 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001144{
Avi Kivity7ee5d9402007-11-25 15:22:50 +02001145 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001146 return 1;
1147}
1148
Rusty Russelle756fc62007-07-30 20:07:08 +10001149static int task_switch_interception(struct vcpu_svm *svm,
1150 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001151{
Izik Eidus37817f22008-03-24 23:14:53 +02001152 u16 tss_selector;
1153
1154 tss_selector = (u16)svm->vmcb->control.exit_info_1;
1155 if (svm->vmcb->control.exit_info_2 &
1156 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
1157 return kvm_task_switch(&svm->vcpu, tss_selector,
1158 TASK_SWITCH_IRET);
1159 if (svm->vmcb->control.exit_info_2 &
1160 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
1161 return kvm_task_switch(&svm->vcpu, tss_selector,
1162 TASK_SWITCH_JMP);
1163 return kvm_task_switch(&svm->vcpu, tss_selector, TASK_SWITCH_CALL);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001164}
1165
Rusty Russelle756fc62007-07-30 20:07:08 +10001166static int cpuid_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001167{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001168 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001169 kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity06465c52007-02-28 20:46:53 +02001170 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001171}
1172
Marcelo Tosattia7052892008-09-23 13:18:35 -03001173static int invlpg_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1174{
1175 if (emulate_instruction(&svm->vcpu, kvm_run, 0, 0, 0) != EMULATE_DONE)
1176 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
1177 return 1;
1178}
1179
Rusty Russelle756fc62007-07-30 20:07:08 +10001180static int emulate_on_interception(struct vcpu_svm *svm,
1181 struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001182{
Laurent Vivier34273182007-09-18 11:27:37 +02001183 if (emulate_instruction(&svm->vcpu, NULL, 0, 0, 0) != EMULATE_DONE)
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001184 pr_unimpl(&svm->vcpu, "%s: failed\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001185 return 1;
1186}
1187
Joerg Roedel1d075432007-12-06 21:02:25 +01001188static int cr8_write_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
1189{
1190 emulate_instruction(&svm->vcpu, NULL, 0, 0, 0);
1191 if (irqchip_in_kernel(svm->vcpu.kvm))
1192 return 1;
1193 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
1194 return 0;
1195}
1196
Avi Kivity6aa8b732006-12-10 02:21:36 -08001197static int svm_get_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 *data)
1198{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001199 struct vcpu_svm *svm = to_svm(vcpu);
1200
Avi Kivity6aa8b732006-12-10 02:21:36 -08001201 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001202 case MSR_IA32_TIME_STAMP_COUNTER: {
1203 u64 tsc;
1204
1205 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001206 *data = svm->vmcb->control.tsc_offset + tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001207 break;
1208 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001209 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001210 *data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001211 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08001212#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001213 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001214 *data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001215 break;
1216 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001217 *data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001218 break;
1219 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001220 *data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001221 break;
1222 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001223 *data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001224 break;
1225#endif
1226 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001227 *data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001228 break;
1229 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001230 *data = svm->vmcb->save.sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001231 break;
1232 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001233 *data = svm->vmcb->save.sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001234 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001235 /* Nobody will change the following 5 values in the VMCB so
1236 we can safely return them on rdmsr. They will always be 0
1237 until LBRV is implemented. */
1238 case MSR_IA32_DEBUGCTLMSR:
1239 *data = svm->vmcb->save.dbgctl;
1240 break;
1241 case MSR_IA32_LASTBRANCHFROMIP:
1242 *data = svm->vmcb->save.br_from;
1243 break;
1244 case MSR_IA32_LASTBRANCHTOIP:
1245 *data = svm->vmcb->save.br_to;
1246 break;
1247 case MSR_IA32_LASTINTFROMIP:
1248 *data = svm->vmcb->save.last_excp_from;
1249 break;
1250 case MSR_IA32_LASTINTTOIP:
1251 *data = svm->vmcb->save.last_excp_to;
1252 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001253 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001254 return kvm_get_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001255 }
1256 return 0;
1257}
1258
Rusty Russelle756fc62007-07-30 20:07:08 +10001259static int rdmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001260{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001261 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Avi Kivity6aa8b732006-12-10 02:21:36 -08001262 u64 data;
1263
Rusty Russelle756fc62007-07-30 20:07:08 +10001264 if (svm_get_msr(&svm->vcpu, ecx, &data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001265 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001266 else {
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001267 KVMTRACE_3D(MSR_READ, &svm->vcpu, ecx, (u32)data,
1268 (u32)(data >> 32), handler);
1269
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001270 svm->vcpu.arch.regs[VCPU_REGS_RAX] = data & 0xffffffff;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001271 svm->vcpu.arch.regs[VCPU_REGS_RDX] = data >> 32;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001272 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001273 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001274 }
1275 return 1;
1276}
1277
1278static int svm_set_msr(struct kvm_vcpu *vcpu, unsigned ecx, u64 data)
1279{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001280 struct vcpu_svm *svm = to_svm(vcpu);
1281
Avi Kivity6aa8b732006-12-10 02:21:36 -08001282 switch (ecx) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001283 case MSR_IA32_TIME_STAMP_COUNTER: {
1284 u64 tsc;
1285
1286 rdtscll(tsc);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001287 svm->vmcb->control.tsc_offset = data - tsc;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001288 break;
1289 }
Avi Kivity0e859ca2006-12-22 01:05:08 -08001290 case MSR_K6_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001291 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001292 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08001293#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08001294 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001295 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001296 break;
1297 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001298 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001299 break;
1300 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001301 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001302 break;
1303 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001304 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001305 break;
1306#endif
1307 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001308 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001309 break;
1310 case MSR_IA32_SYSENTER_EIP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001311 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001312 break;
1313 case MSR_IA32_SYSENTER_ESP:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001314 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001315 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01001316 case MSR_IA32_DEBUGCTLMSR:
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001317 if (!svm_has(SVM_FEATURE_LBRV)) {
1318 pr_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001319 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001320 break;
1321 }
1322 if (data & DEBUGCTL_RESERVED_BITS)
1323 return 1;
1324
1325 svm->vmcb->save.dbgctl = data;
1326 if (data & (1ULL<<0))
1327 svm_enable_lbrv(svm);
1328 else
1329 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01001330 break;
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001331 case MSR_K7_EVNTSEL0:
1332 case MSR_K7_EVNTSEL1:
1333 case MSR_K7_EVNTSEL2:
1334 case MSR_K7_EVNTSEL3:
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001335 case MSR_K7_PERFCTR0:
1336 case MSR_K7_PERFCTR1:
1337 case MSR_K7_PERFCTR2:
1338 case MSR_K7_PERFCTR3:
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001339 /*
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001340 * Just discard all writes to the performance counters; this
1341 * should keep both older linux and windows 64-bit guests
1342 * happy
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001343 */
Chris Lalancette14ae51b2008-05-05 13:05:16 -04001344 pr_unimpl(vcpu, "unimplemented perfctr wrmsr: 0x%x data 0x%llx\n", ecx, data);
1345
Joerg Roedel62b9aba2007-12-11 15:36:57 +01001346 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001347 default:
Avi Kivity3bab1f52006-12-29 16:49:48 -08001348 return kvm_set_msr_common(vcpu, ecx, data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001349 }
1350 return 0;
1351}
1352
Rusty Russelle756fc62007-07-30 20:07:08 +10001353static int wrmsr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001354{
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001355 u32 ecx = svm->vcpu.arch.regs[VCPU_REGS_RCX];
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001356 u64 data = (svm->vcpu.arch.regs[VCPU_REGS_RAX] & -1u)
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001357 | ((u64)(svm->vcpu.arch.regs[VCPU_REGS_RDX] & -1u) << 32);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001358
1359 KVMTRACE_3D(MSR_WRITE, &svm->vcpu, ecx, (u32)data, (u32)(data >> 32),
1360 handler);
1361
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001362 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Rusty Russelle756fc62007-07-30 20:07:08 +10001363 if (svm_set_msr(&svm->vcpu, ecx, data))
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02001364 kvm_inject_gp(&svm->vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001365 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001366 skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001367 return 1;
1368}
1369
Rusty Russelle756fc62007-07-30 20:07:08 +10001370static int msr_interception(struct vcpu_svm *svm, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001371{
Rusty Russelle756fc62007-07-30 20:07:08 +10001372 if (svm->vmcb->control.exit_info_1)
1373 return wrmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001374 else
Rusty Russelle756fc62007-07-30 20:07:08 +10001375 return rdmsr_interception(svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001376}
1377
Rusty Russelle756fc62007-07-30 20:07:08 +10001378static int interrupt_window_interception(struct vcpu_svm *svm,
Dor Laorc1150d82007-01-05 16:36:24 -08001379 struct kvm_run *kvm_run)
1380{
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001381 KVMTRACE_0D(PEND_INTR, &svm->vcpu, handler);
1382
Eddie Dong85f455f2007-07-06 12:20:49 +03001383 svm->vmcb->control.intercept &= ~(1ULL << INTERCEPT_VINTR);
1384 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Dor Laorc1150d82007-01-05 16:36:24 -08001385 /*
1386 * If the user space waits to inject interrupts, exit as soon as
1387 * possible
1388 */
1389 if (kvm_run->request_interrupt_window &&
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001390 !svm->vcpu.arch.irq_summary) {
Rusty Russelle756fc62007-07-30 20:07:08 +10001391 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08001392 kvm_run->exit_reason = KVM_EXIT_IRQ_WINDOW_OPEN;
1393 return 0;
1394 }
1395
1396 return 1;
1397}
1398
Rusty Russelle756fc62007-07-30 20:07:08 +10001399static int (*svm_exit_handlers[])(struct vcpu_svm *svm,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001400 struct kvm_run *kvm_run) = {
1401 [SVM_EXIT_READ_CR0] = emulate_on_interception,
1402 [SVM_EXIT_READ_CR3] = emulate_on_interception,
1403 [SVM_EXIT_READ_CR4] = emulate_on_interception,
Avi Kivity80a81192007-12-06 19:50:00 +02001404 [SVM_EXIT_READ_CR8] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001405 /* for now: */
1406 [SVM_EXIT_WRITE_CR0] = emulate_on_interception,
1407 [SVM_EXIT_WRITE_CR3] = emulate_on_interception,
1408 [SVM_EXIT_WRITE_CR4] = emulate_on_interception,
Joerg Roedel1d075432007-12-06 21:02:25 +01001409 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001410 [SVM_EXIT_READ_DR0] = emulate_on_interception,
1411 [SVM_EXIT_READ_DR1] = emulate_on_interception,
1412 [SVM_EXIT_READ_DR2] = emulate_on_interception,
1413 [SVM_EXIT_READ_DR3] = emulate_on_interception,
1414 [SVM_EXIT_WRITE_DR0] = emulate_on_interception,
1415 [SVM_EXIT_WRITE_DR1] = emulate_on_interception,
1416 [SVM_EXIT_WRITE_DR2] = emulate_on_interception,
1417 [SVM_EXIT_WRITE_DR3] = emulate_on_interception,
1418 [SVM_EXIT_WRITE_DR5] = emulate_on_interception,
1419 [SVM_EXIT_WRITE_DR7] = emulate_on_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05001420 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001421 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Anthony Liguori7807fa62007-04-23 09:17:21 -05001422 [SVM_EXIT_EXCP_BASE + NM_VECTOR] = nm_interception,
Joerg Roedel53371b52008-04-09 14:15:30 +02001423 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Joerg Roedela0698052008-04-30 17:56:01 +02001424 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02001425 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001426 [SVM_EXIT_SMI] = nop_on_interception,
1427 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08001428 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001429 /* [SVM_EXIT_CR0_SEL_WRITE] = emulate_on_interception, */
1430 [SVM_EXIT_CPUID] = cpuid_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001431 [SVM_EXIT_INVD] = emulate_on_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001432 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03001433 [SVM_EXIT_INVLPG] = invlpg_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001434 [SVM_EXIT_INVLPGA] = invalid_op_interception,
1435 [SVM_EXIT_IOIO] = io_interception,
1436 [SVM_EXIT_MSR] = msr_interception,
1437 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08001438 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001439 [SVM_EXIT_VMRUN] = invalid_op_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02001440 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001441 [SVM_EXIT_VMLOAD] = invalid_op_interception,
1442 [SVM_EXIT_VMSAVE] = invalid_op_interception,
1443 [SVM_EXIT_STGI] = invalid_op_interception,
1444 [SVM_EXIT_CLGI] = invalid_op_interception,
1445 [SVM_EXIT_SKINIT] = invalid_op_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02001446 [SVM_EXIT_WBINVD] = emulate_on_interception,
Joerg Roedel916ce232007-03-21 19:47:00 +01001447 [SVM_EXIT_MONITOR] = invalid_op_interception,
1448 [SVM_EXIT_MWAIT] = invalid_op_interception,
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001449 [SVM_EXIT_NPF] = pf_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001450};
1451
Avi Kivity04d2cc72007-09-10 18:10:54 +03001452static int handle_exit(struct kvm_run *kvm_run, struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001453{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001454 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001455 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001456
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001457 KVMTRACE_3D(VMEXIT, vcpu, exit_code, (u32)svm->vmcb->save.rip,
1458 (u32)((u64)svm->vmcb->save.rip >> 32), entryexit);
1459
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001460 if (npt_enabled) {
1461 int mmu_reload = 0;
1462 if ((vcpu->arch.cr0 ^ svm->vmcb->save.cr0) & X86_CR0_PG) {
1463 svm_set_cr0(vcpu, svm->vmcb->save.cr0);
1464 mmu_reload = 1;
1465 }
1466 vcpu->arch.cr0 = svm->vmcb->save.cr0;
1467 vcpu->arch.cr3 = svm->vmcb->save.cr3;
1468 if (is_paging(vcpu) && is_pae(vcpu) && !is_long_mode(vcpu)) {
1469 if (!load_pdptrs(vcpu, vcpu->arch.cr3)) {
1470 kvm_inject_gp(vcpu, 0);
1471 return 1;
1472 }
1473 }
1474 if (mmu_reload) {
1475 kvm_mmu_reset_context(vcpu);
1476 kvm_mmu_load(vcpu);
1477 }
1478 }
1479
Avi Kivity04d2cc72007-09-10 18:10:54 +03001480 kvm_reput_irq(svm);
1481
1482 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
1483 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
1484 kvm_run->fail_entry.hardware_entry_failure_reason
1485 = svm->vmcb->control.exit_code;
1486 return 0;
1487 }
1488
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001489 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001490 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
1491 exit_code != SVM_EXIT_NPF)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001492 printk(KERN_ERR "%s: unexpected exit_ini_info 0x%x "
1493 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08001494 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001495 exit_code);
1496
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02001497 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08001498 || !svm_exit_handlers[exit_code]) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001499 kvm_run->exit_reason = KVM_EXIT_UNKNOWN;
Avi Kivity364b6252007-04-16 14:28:40 +03001500 kvm_run->hw.hardware_exit_reason = exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001501 return 0;
1502 }
1503
Rusty Russelle756fc62007-07-30 20:07:08 +10001504 return svm_exit_handlers[exit_code](svm, kvm_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001505}
1506
1507static void reload_tss(struct kvm_vcpu *vcpu)
1508{
1509 int cpu = raw_smp_processor_id();
1510
1511 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
Mike Dayd77c26f2007-10-08 09:02:08 -04001512 svm_data->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001513 load_TR_desc();
1514}
1515
Rusty Russelle756fc62007-07-30 20:07:08 +10001516static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001517{
1518 int cpu = raw_smp_processor_id();
1519
1520 struct svm_cpu_data *svm_data = per_cpu(svm_data, cpu);
1521
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001522 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
Rusty Russelle756fc62007-07-30 20:07:08 +10001523 if (svm->vcpu.cpu != cpu ||
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001524 svm->asid_generation != svm_data->asid_generation)
Rusty Russelle756fc62007-07-30 20:07:08 +10001525 new_asid(svm, svm_data);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001526}
1527
1528
Eddie Dong85f455f2007-07-06 12:20:49 +03001529static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001530{
1531 struct vmcb_control_area *control;
1532
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02001533 KVMTRACE_1D(INJ_VIRQ, &svm->vcpu, (u32)irq, handler);
1534
Avi Kivityfa89a812008-09-01 15:57:51 +03001535 ++svm->vcpu.stat.irq_injections;
Rusty Russelle756fc62007-07-30 20:07:08 +10001536 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03001537 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001538 control->int_ctl &= ~V_INTR_PRIO_MASK;
1539 control->int_ctl |= V_IRQ_MASK |
1540 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
1541}
1542
Eddie Dong2a8067f2007-08-06 16:29:07 +03001543static void svm_set_irq(struct kvm_vcpu *vcpu, int irq)
1544{
1545 struct vcpu_svm *svm = to_svm(vcpu);
1546
1547 svm_inject_irq(svm, irq);
1548}
1549
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001550static void update_cr8_intercept(struct kvm_vcpu *vcpu)
1551{
1552 struct vcpu_svm *svm = to_svm(vcpu);
1553 struct vmcb *vmcb = svm->vmcb;
1554 int max_irr, tpr;
1555
1556 if (!irqchip_in_kernel(vcpu->kvm) || vcpu->arch.apic->vapic_addr)
1557 return;
1558
1559 vmcb->control.intercept_cr_write &= ~INTERCEPT_CR8_MASK;
1560
1561 max_irr = kvm_lapic_find_highest_irr(vcpu);
1562 if (max_irr == -1)
1563 return;
1564
1565 tpr = kvm_lapic_get_cr8(vcpu) << 4;
1566
1567 if (tpr >= (max_irr & 0xf0))
1568 vmcb->control.intercept_cr_write |= INTERCEPT_CR8_MASK;
1569}
1570
Avi Kivity04d2cc72007-09-10 18:10:54 +03001571static void svm_intr_assist(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001572{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001573 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001574 struct vmcb *vmcb = svm->vmcb;
1575 int intr_vector = -1;
1576
1577 if ((vmcb->control.exit_int_info & SVM_EVTINJ_VALID) &&
1578 ((vmcb->control.exit_int_info & SVM_EVTINJ_TYPE_MASK) == 0)) {
1579 intr_vector = vmcb->control.exit_int_info &
1580 SVM_EVTINJ_VEC_MASK;
1581 vmcb->control.exit_int_info = 0;
1582 svm_inject_irq(svm, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001583 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001584 }
1585
1586 if (vmcb->control.int_ctl & V_IRQ_MASK)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001587 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001588
Eddie Dong1b9778d2007-09-03 16:56:58 +03001589 if (!kvm_cpu_has_interrupt(vcpu))
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001590 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001591
1592 if (!(vmcb->save.rflags & X86_EFLAGS_IF) ||
1593 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) ||
1594 (vmcb->control.event_inj & SVM_EVTINJ_VALID)) {
1595 /* unable to deliver irq, set pending irq */
1596 vmcb->control.intercept |= (1ULL << INTERCEPT_VINTR);
1597 svm_inject_irq(svm, 0x0);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001598 goto out;
Eddie Dong85f455f2007-07-06 12:20:49 +03001599 }
1600 /* Okay, we can deliver the interrupt: grab it and update PIC state. */
Eddie Dong1b9778d2007-09-03 16:56:58 +03001601 intr_vector = kvm_cpu_get_interrupt(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001602 svm_inject_irq(svm, intr_vector);
Eddie Dong1b9778d2007-09-03 16:56:58 +03001603 kvm_timer_intr_post(vcpu, intr_vector);
Joerg Roedelaaacfc92008-04-16 16:51:18 +02001604out:
1605 update_cr8_intercept(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03001606}
1607
1608static void kvm_reput_irq(struct vcpu_svm *svm)
1609{
Rusty Russelle756fc62007-07-30 20:07:08 +10001610 struct vmcb_control_area *control = &svm->vmcb->control;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001611
Eddie Dong7017fc32007-07-18 11:34:57 +03001612 if ((control->int_ctl & V_IRQ_MASK)
1613 && !irqchip_in_kernel(svm->vcpu.kvm)) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001614 control->int_ctl &= ~V_IRQ_MASK;
Rusty Russelle756fc62007-07-30 20:07:08 +10001615 push_irq(&svm->vcpu, control->int_vector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001616 }
Dor Laorc1150d82007-01-05 16:36:24 -08001617
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001618 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001619 !(control->int_state & SVM_INTERRUPT_SHADOW_MASK);
1620}
1621
Eddie Dong85f455f2007-07-06 12:20:49 +03001622static void svm_do_inject_vector(struct vcpu_svm *svm)
1623{
1624 struct kvm_vcpu *vcpu = &svm->vcpu;
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001625 int word_index = __ffs(vcpu->arch.irq_summary);
1626 int bit_index = __ffs(vcpu->arch.irq_pending[word_index]);
Eddie Dong85f455f2007-07-06 12:20:49 +03001627 int irq = word_index * BITS_PER_LONG + bit_index;
1628
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001629 clear_bit(bit_index, &vcpu->arch.irq_pending[word_index]);
1630 if (!vcpu->arch.irq_pending[word_index])
1631 clear_bit(word_index, &vcpu->arch.irq_summary);
Eddie Dong85f455f2007-07-06 12:20:49 +03001632 svm_inject_irq(svm, irq);
1633}
1634
Avi Kivity04d2cc72007-09-10 18:10:54 +03001635static void do_interrupt_requests(struct kvm_vcpu *vcpu,
Dor Laorc1150d82007-01-05 16:36:24 -08001636 struct kvm_run *kvm_run)
1637{
Avi Kivity04d2cc72007-09-10 18:10:54 +03001638 struct vcpu_svm *svm = to_svm(vcpu);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001639 struct vmcb_control_area *control = &svm->vmcb->control;
Dor Laorc1150d82007-01-05 16:36:24 -08001640
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001641 svm->vcpu.arch.interrupt_window_open =
Dor Laorc1150d82007-01-05 16:36:24 -08001642 (!(control->int_state & SVM_INTERRUPT_SHADOW_MASK) &&
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001643 (svm->vmcb->save.rflags & X86_EFLAGS_IF));
Dor Laorc1150d82007-01-05 16:36:24 -08001644
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001645 if (svm->vcpu.arch.interrupt_window_open && svm->vcpu.arch.irq_summary)
Dor Laorc1150d82007-01-05 16:36:24 -08001646 /*
1647 * If interrupts enabled, and not blocked by sti or mov ss. Good.
1648 */
Eddie Dong85f455f2007-07-06 12:20:49 +03001649 svm_do_inject_vector(svm);
Dor Laorc1150d82007-01-05 16:36:24 -08001650
1651 /*
1652 * Interrupts blocked. Wait for unblock.
1653 */
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001654 if (!svm->vcpu.arch.interrupt_window_open &&
1655 (svm->vcpu.arch.irq_summary || kvm_run->request_interrupt_window))
Dor Laorc1150d82007-01-05 16:36:24 -08001656 control->intercept |= 1ULL << INTERCEPT_VINTR;
Mike Dayd77c26f2007-10-08 09:02:08 -04001657 else
Dor Laorc1150d82007-01-05 16:36:24 -08001658 control->intercept &= ~(1ULL << INTERCEPT_VINTR);
1659}
1660
Izik Eiduscbc94022007-10-25 00:29:55 +02001661static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
1662{
1663 return 0;
1664}
1665
Avi Kivity6aa8b732006-12-10 02:21:36 -08001666static void save_db_regs(unsigned long *db_regs)
1667{
Avi Kivity5aff4582006-12-13 00:33:45 -08001668 asm volatile ("mov %%dr0, %0" : "=r"(db_regs[0]));
1669 asm volatile ("mov %%dr1, %0" : "=r"(db_regs[1]));
1670 asm volatile ("mov %%dr2, %0" : "=r"(db_regs[2]));
1671 asm volatile ("mov %%dr3, %0" : "=r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001672}
1673
1674static void load_db_regs(unsigned long *db_regs)
1675{
Avi Kivity5aff4582006-12-13 00:33:45 -08001676 asm volatile ("mov %0, %%dr0" : : "r"(db_regs[0]));
1677 asm volatile ("mov %0, %%dr1" : : "r"(db_regs[1]));
1678 asm volatile ("mov %0, %%dr2" : : "r"(db_regs[2]));
1679 asm volatile ("mov %0, %%dr3" : : "r"(db_regs[3]));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001680}
1681
Avi Kivityd9e368d2007-06-07 19:18:30 +03001682static void svm_flush_tlb(struct kvm_vcpu *vcpu)
1683{
1684 force_new_asid(vcpu);
1685}
1686
Avi Kivity04d2cc72007-09-10 18:10:54 +03001687static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
1688{
1689}
1690
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001691static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
1692{
1693 struct vcpu_svm *svm = to_svm(vcpu);
1694
1695 if (!(svm->vmcb->control.intercept_cr_write & INTERCEPT_CR8_MASK)) {
1696 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
1697 kvm_lapic_set_tpr(vcpu, cr8);
1698 }
1699}
1700
Joerg Roedel649d6862008-04-16 16:51:15 +02001701static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
1702{
1703 struct vcpu_svm *svm = to_svm(vcpu);
1704 u64 cr8;
1705
1706 if (!irqchip_in_kernel(vcpu->kvm))
1707 return;
1708
1709 cr8 = kvm_get_cr8(vcpu);
1710 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
1711 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
1712}
1713
Avi Kivity80e31d42008-07-14 14:44:59 +03001714#ifdef CONFIG_X86_64
1715#define R "r"
1716#else
1717#define R "e"
1718#endif
1719
Avi Kivity04d2cc72007-09-10 18:10:54 +03001720static void svm_vcpu_run(struct kvm_vcpu *vcpu, struct kvm_run *kvm_run)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001721{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001722 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001723 u16 fs_selector;
1724 u16 gs_selector;
1725 u16 ldt_selector;
Avi Kivityd9e368d2007-06-07 19:18:30 +03001726
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001727 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
1728 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
1729 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
1730
Rusty Russelle756fc62007-07-30 20:07:08 +10001731 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001732
Joerg Roedel649d6862008-04-16 16:51:15 +02001733 sync_lapic_to_cr8(vcpu);
1734
Avi Kivity6aa8b732006-12-10 02:21:36 -08001735 save_host_msrs(vcpu);
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001736 fs_selector = kvm_read_fs();
1737 gs_selector = kvm_read_gs();
1738 ldt_selector = kvm_read_ldt();
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001739 svm->host_cr2 = kvm_read_cr2();
1740 svm->host_dr6 = read_dr6();
1741 svm->host_dr7 = read_dr7();
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001742 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001743 /* required for live migration with NPT */
1744 if (npt_enabled)
1745 svm->vmcb->save.cr3 = vcpu->arch.cr3;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001746
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001747 if (svm->vmcb->save.dr7 & 0xff) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001748 write_dr7(0);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001749 save_db_regs(svm->host_db_regs);
1750 load_db_regs(svm->db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001751 }
Avi Kivity36241b82006-12-22 01:05:20 -08001752
Avi Kivity04d2cc72007-09-10 18:10:54 +03001753 clgi();
1754
1755 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08001756
Avi Kivity6aa8b732006-12-10 02:21:36 -08001757 asm volatile (
Avi Kivity80e31d42008-07-14 14:44:59 +03001758 "push %%"R"bp; \n\t"
1759 "mov %c[rbx](%[svm]), %%"R"bx \n\t"
1760 "mov %c[rcx](%[svm]), %%"R"cx \n\t"
1761 "mov %c[rdx](%[svm]), %%"R"dx \n\t"
1762 "mov %c[rsi](%[svm]), %%"R"si \n\t"
1763 "mov %c[rdi](%[svm]), %%"R"di \n\t"
1764 "mov %c[rbp](%[svm]), %%"R"bp \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001765#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001766 "mov %c[r8](%[svm]), %%r8 \n\t"
1767 "mov %c[r9](%[svm]), %%r9 \n\t"
1768 "mov %c[r10](%[svm]), %%r10 \n\t"
1769 "mov %c[r11](%[svm]), %%r11 \n\t"
1770 "mov %c[r12](%[svm]), %%r12 \n\t"
1771 "mov %c[r13](%[svm]), %%r13 \n\t"
1772 "mov %c[r14](%[svm]), %%r14 \n\t"
1773 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001774#endif
1775
Avi Kivity6aa8b732006-12-10 02:21:36 -08001776 /* Enter guest mode */
Avi Kivity80e31d42008-07-14 14:44:59 +03001777 "push %%"R"ax \n\t"
1778 "mov %c[vmcb](%[svm]), %%"R"ax \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03001779 __ex(SVM_VMLOAD) "\n\t"
1780 __ex(SVM_VMRUN) "\n\t"
1781 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity80e31d42008-07-14 14:44:59 +03001782 "pop %%"R"ax \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001783
1784 /* Save guest registers, load host registers */
Avi Kivity80e31d42008-07-14 14:44:59 +03001785 "mov %%"R"bx, %c[rbx](%[svm]) \n\t"
1786 "mov %%"R"cx, %c[rcx](%[svm]) \n\t"
1787 "mov %%"R"dx, %c[rdx](%[svm]) \n\t"
1788 "mov %%"R"si, %c[rsi](%[svm]) \n\t"
1789 "mov %%"R"di, %c[rdi](%[svm]) \n\t"
1790 "mov %%"R"bp, %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001791#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001792 "mov %%r8, %c[r8](%[svm]) \n\t"
1793 "mov %%r9, %c[r9](%[svm]) \n\t"
1794 "mov %%r10, %c[r10](%[svm]) \n\t"
1795 "mov %%r11, %c[r11](%[svm]) \n\t"
1796 "mov %%r12, %c[r12](%[svm]) \n\t"
1797 "mov %%r13, %c[r13](%[svm]) \n\t"
1798 "mov %%r14, %c[r14](%[svm]) \n\t"
1799 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001800#endif
Avi Kivity80e31d42008-07-14 14:44:59 +03001801 "pop %%"R"bp"
Avi Kivity6aa8b732006-12-10 02:21:36 -08001802 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001803 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08001804 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001805 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
1806 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
1807 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
1808 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
1809 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
1810 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08001811#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001812 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
1813 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
1814 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
1815 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
1816 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
1817 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
1818 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
1819 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08001820#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02001821 : "cc", "memory"
Avi Kivity80e31d42008-07-14 14:44:59 +03001822 , R"bx", R"cx", R"dx", R"si", R"di"
Laurent Vivier54a08c02007-10-25 14:18:53 +02001823#ifdef CONFIG_X86_64
Laurent Vivier54a08c02007-10-25 14:18:53 +02001824 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
1825#endif
1826 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08001827
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001828 if ((svm->vmcb->save.dr7 & 0xff))
1829 load_db_regs(svm->host_db_regs);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001830
Zhang Xiantaoad312c72007-12-13 23:50:52 +08001831 vcpu->arch.cr2 = svm->vmcb->save.cr2;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001832 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
1833 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
1834 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001835
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001836 write_dr6(svm->host_dr6);
1837 write_dr7(svm->host_dr7);
1838 kvm_write_cr2(svm->host_cr2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001839
Avi Kivityd6e88ae2008-07-10 16:53:33 +03001840 kvm_load_fs(fs_selector);
1841 kvm_load_gs(gs_selector);
1842 kvm_load_ldt(ldt_selector);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001843 load_host_msrs(vcpu);
1844
1845 reload_tss(vcpu);
1846
Avi Kivity56ba47d2007-11-07 17:14:18 +02001847 local_irq_disable();
1848
1849 stgi();
1850
Joerg Roedeld7bf8222008-04-16 16:51:17 +02001851 sync_cr8_to_lapic(vcpu);
1852
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001853 svm->next_rip = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001854}
1855
Avi Kivity80e31d42008-07-14 14:44:59 +03001856#undef R
1857
Avi Kivity6aa8b732006-12-10 02:21:36 -08001858static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
1859{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001860 struct vcpu_svm *svm = to_svm(vcpu);
1861
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001862 if (npt_enabled) {
1863 svm->vmcb->control.nested_cr3 = root;
1864 force_new_asid(vcpu);
1865 return;
1866 }
1867
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001868 svm->vmcb->save.cr3 = root;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001869 force_new_asid(vcpu);
Anthony Liguori7807fa62007-04-23 09:17:21 -05001870
1871 if (vcpu->fpu_active) {
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001872 svm->vmcb->control.intercept_exceptions |= (1 << NM_VECTOR);
1873 svm->vmcb->save.cr0 |= X86_CR0_TS;
Anthony Liguori7807fa62007-04-23 09:17:21 -05001874 vcpu->fpu_active = 0;
1875 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001876}
1877
Avi Kivity6aa8b732006-12-10 02:21:36 -08001878static int is_disabled(void)
1879{
Joerg Roedel6031a612007-06-22 12:29:50 +03001880 u64 vm_cr;
1881
1882 rdmsrl(MSR_VM_CR, vm_cr);
1883 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
1884 return 1;
1885
Avi Kivity6aa8b732006-12-10 02:21:36 -08001886 return 0;
1887}
1888
Ingo Molnar102d8322007-02-19 14:37:47 +02001889static void
1890svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
1891{
1892 /*
1893 * Patch in the VMMCALL instruction:
1894 */
1895 hypercall[0] = 0x0f;
1896 hypercall[1] = 0x01;
1897 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02001898}
1899
Yang, Sheng002c7f72007-07-31 14:23:01 +03001900static void svm_check_processor_compat(void *rtn)
1901{
1902 *(int *)rtn = 0;
1903}
1904
Avi Kivity774ead32007-12-26 13:57:04 +02001905static bool svm_cpu_has_accelerated_tpr(void)
1906{
1907 return false;
1908}
1909
Sheng Yang67253af2008-04-25 10:20:22 +08001910static int get_npt_level(void)
1911{
1912#ifdef CONFIG_X86_64
1913 return PT64_ROOT_LEVEL;
1914#else
1915 return PT32E_ROOT_LEVEL;
1916#endif
1917}
1918
Sheng Yang64d4d522008-10-09 16:01:57 +08001919static int svm_get_mt_mask_shift(void)
1920{
1921 return 0;
1922}
1923
Christian Ehrhardtcbdd1be2007-09-09 15:41:59 +03001924static struct kvm_x86_ops svm_x86_ops = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001925 .cpu_has_kvm_support = has_svm,
1926 .disabled_by_bios = is_disabled,
1927 .hardware_setup = svm_hardware_setup,
1928 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03001929 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001930 .hardware_enable = svm_hardware_enable,
1931 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02001932 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001933
1934 .vcpu_create = svm_create_vcpu,
1935 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001936 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001937
Avi Kivity04d2cc72007-09-10 18:10:54 +03001938 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001939 .vcpu_load = svm_vcpu_load,
1940 .vcpu_put = svm_vcpu_put,
1941
1942 .set_guest_debug = svm_guest_debug,
1943 .get_msr = svm_get_msr,
1944 .set_msr = svm_set_msr,
1945 .get_segment_base = svm_get_segment_base,
1946 .get_segment = svm_get_segment,
1947 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02001948 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10001949 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Anthony Liguori25c4c272007-04-27 09:29:21 +03001950 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001951 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001952 .set_cr3 = svm_set_cr3,
1953 .set_cr4 = svm_set_cr4,
1954 .set_efer = svm_set_efer,
1955 .get_idt = svm_get_idt,
1956 .set_idt = svm_set_idt,
1957 .get_gdt = svm_get_gdt,
1958 .set_gdt = svm_set_gdt,
1959 .get_dr = svm_get_dr,
1960 .set_dr = svm_set_dr,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001961 .get_rflags = svm_get_rflags,
1962 .set_rflags = svm_set_rflags,
1963
Avi Kivity6aa8b732006-12-10 02:21:36 -08001964 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001965
Avi Kivity6aa8b732006-12-10 02:21:36 -08001966 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001967 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001968 .skip_emulated_instruction = skip_emulated_instruction,
Ingo Molnar102d8322007-02-19 14:37:47 +02001969 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03001970 .get_irq = svm_get_irq,
1971 .set_irq = svm_set_irq,
Avi Kivity298101d2007-11-25 13:41:11 +02001972 .queue_exception = svm_queue_exception,
1973 .exception_injected = svm_exception_injected,
Avi Kivity04d2cc72007-09-10 18:10:54 +03001974 .inject_pending_irq = svm_intr_assist,
1975 .inject_pending_vectors = do_interrupt_requests,
Izik Eiduscbc94022007-10-25 00:29:55 +02001976
1977 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08001978 .get_tdp_level = get_npt_level,
Sheng Yang64d4d522008-10-09 16:01:57 +08001979 .get_mt_mask_shift = svm_get_mt_mask_shift,
Avi Kivity6aa8b732006-12-10 02:21:36 -08001980};
1981
1982static int __init svm_init(void)
1983{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001984 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Rusty Russellc16f8622007-07-30 21:12:19 +10001985 THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001986}
1987
1988static void __exit svm_exit(void)
1989{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08001990 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08001991}
1992
1993module_init(svm_init)
1994module_exit(svm_exit)