blob: 9729b7e59cc9d6cf8dddb3400dd9d0a66cab61c4 [file] [log] [blame]
Avi Kivity6aa8b732006-12-10 02:21:36 -08001/*
2 * Kernel-based Virtual Machine driver for Linux
3 *
4 * AMD SVM support
5 *
6 * Copyright (C) 2006 Qumranet, Inc.
Nicolas Kaiser9611c182010-10-06 14:23:22 +02007 * Copyright 2010 Red Hat, Inc. and/or its affiliates.
Avi Kivity6aa8b732006-12-10 02:21:36 -08008 *
9 * Authors:
10 * Yaniv Kamay <yaniv@qumranet.com>
11 * Avi Kivity <avi@qumranet.com>
12 *
13 * This work is licensed under the terms of the GNU GPL, version 2. See
14 * the COPYING file in the top-level directory.
15 *
16 */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050017
18#define pr_fmt(fmt) "SVM: " fmt
19
Avi Kivityedf88412007-12-16 11:02:48 +020020#include <linux/kvm_host.h>
21
Eddie Dong85f455f2007-07-06 12:20:49 +030022#include "irq.h"
Zhang Xiantao1d737c82007-12-14 09:35:10 +080023#include "mmu.h"
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -030024#include "kvm_cache_regs.h"
Gleb Natapovfe4c7b12009-03-23 11:23:18 +020025#include "x86.h"
Julian Stecklina66f7b722012-12-05 15:26:19 +010026#include "cpuid.h"
Wei Huang25462f72015-06-19 15:45:05 +020027#include "pmu.h"
Avi Kivitye4956062007-06-28 14:15:57 -040028
Avi Kivity6aa8b732006-12-10 02:21:36 -080029#include <linux/module.h>
Josh Triplettae759542012-03-28 11:32:28 -070030#include <linux/mod_devicetable.h>
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +020031#include <linux/kernel.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080032#include <linux/vmalloc.h>
33#include <linux/highmem.h>
Alexey Dobriyane8edc6e2007-05-21 01:22:52 +040034#include <linux/sched.h>
Steven Rostedt (Red Hat)af658dc2015-04-29 14:36:05 -040035#include <linux/trace_events.h>
Tejun Heo5a0e3ad2010-03-24 17:04:11 +090036#include <linux/slab.h>
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -050037#include <linux/amd-iommu.h>
38#include <linux/hashtable.h>
Josh Poimboeufc207aee2017-06-28 10:11:06 -050039#include <linux/frame.h>
Brijesh Singhe9df0942017-12-04 10:57:33 -060040#include <linux/psp-sev.h>
Brijesh Singh1654efc2017-12-04 10:57:34 -060041#include <linux/file.h>
Brijesh Singh89c50582017-12-04 10:57:35 -060042#include <linux/pagemap.h>
43#include <linux/swap.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080044
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -050045#include <asm/apic.h>
Joerg Roedel1018faa2012-02-29 14:57:32 +010046#include <asm/perf_event.h>
Joerg Roedel67ec6602010-05-17 14:43:35 +020047#include <asm/tlbflush.h>
Avi Kivitye4956062007-06-28 14:15:57 -040048#include <asm/desc.h>
Paolo Bonzinifacb0132014-02-21 10:32:27 +010049#include <asm/debugreg.h>
Gleb Natapov631bc482010-10-14 11:22:52 +020050#include <asm/kvm_para.h>
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -050051#include <asm/irq_remapping.h>
Paolo Bonziniecb586b2018-02-22 16:43:17 +010052#include <asm/microcode.h>
David Woodhouse117cc7a2018-01-12 11:11:27 +000053#include <asm/nospec-branch.h>
Avi Kivity6aa8b732006-12-10 02:21:36 -080054
Eduardo Habkost63d11422008-11-17 19:03:20 -020055#include <asm/virtext.h>
Marcelo Tosatti229456f2009-06-17 09:22:14 -030056#include "trace.h"
Eduardo Habkost63d11422008-11-17 19:03:20 -020057
Avi Kivity4ecac3f2008-05-13 13:23:38 +030058#define __ex(x) __kvm_handle_fault_on_reboot(x)
59
Avi Kivity6aa8b732006-12-10 02:21:36 -080060MODULE_AUTHOR("Qumranet");
61MODULE_LICENSE("GPL");
62
Josh Triplettae759542012-03-28 11:32:28 -070063static const struct x86_cpu_id svm_cpu_id[] = {
64 X86_FEATURE_MATCH(X86_FEATURE_SVM),
65 {}
66};
67MODULE_DEVICE_TABLE(x86cpu, svm_cpu_id);
68
Avi Kivity6aa8b732006-12-10 02:21:36 -080069#define IOPM_ALLOC_ORDER 2
70#define MSRPM_ALLOC_ORDER 1
71
Avi Kivity6aa8b732006-12-10 02:21:36 -080072#define SEG_TYPE_LDT 2
73#define SEG_TYPE_BUSY_TSS16 3
74
Andre Przywara6bc31bd2010-04-11 23:07:28 +020075#define SVM_FEATURE_NPT (1 << 0)
76#define SVM_FEATURE_LBRV (1 << 1)
77#define SVM_FEATURE_SVML (1 << 2)
78#define SVM_FEATURE_NRIP (1 << 3)
Andre Przywaraddce97a2010-12-21 11:12:03 +010079#define SVM_FEATURE_TSC_RATE (1 << 4)
80#define SVM_FEATURE_VMCB_CLEAN (1 << 5)
81#define SVM_FEATURE_FLUSH_ASID (1 << 6)
82#define SVM_FEATURE_DECODE_ASSIST (1 << 7)
Andre Przywara6bc31bd2010-04-11 23:07:28 +020083#define SVM_FEATURE_PAUSE_FILTER (1 << 10)
Joerg Roedel80b77062007-03-30 17:02:14 +030084
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -050085#define SVM_AVIC_DOORBELL 0xc001011b
86
Joerg Roedel410e4d52009-08-07 11:49:44 +020087#define NESTED_EXIT_HOST 0 /* Exit handled on host level */
88#define NESTED_EXIT_DONE 1 /* Exit caused nested vmexit */
89#define NESTED_EXIT_CONTINUE 2 /* Further checks needed */
90
Joerg Roedel24e09cb2008-02-13 18:58:47 +010091#define DEBUGCTL_RESERVED_BITS (~(0x3fULL))
92
Joerg Roedelfbc0db72011-03-25 09:44:46 +010093#define TSC_RATIO_RSVD 0xffffff0000000000ULL
Joerg Roedel92a1f122011-03-25 09:44:51 +010094#define TSC_RATIO_MIN 0x0000000000000001ULL
95#define TSC_RATIO_MAX 0x000000ffffffffffULL
Joerg Roedelfbc0db72011-03-25 09:44:46 +010096
Dan Carpenter5446a972016-05-23 13:20:10 +030097#define AVIC_HPA_MASK ~((0xFFFULL << 52) | 0xFFF)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -050098
99/*
100 * 0xff is broadcast, so the max index allowed for physical APIC ID
101 * table is 0xfe. APIC IDs above 0xff are reserved.
102 */
103#define AVIC_MAX_PHYSICAL_ID_COUNT 255
104
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500105#define AVIC_UNACCEL_ACCESS_WRITE_MASK 1
106#define AVIC_UNACCEL_ACCESS_OFFSET_MASK 0xFF0
107#define AVIC_UNACCEL_ACCESS_VECTOR_MASK 0xFFFFFFFF
108
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500109/* AVIC GATAG is encoded using VM and VCPU IDs */
110#define AVIC_VCPU_ID_BITS 8
111#define AVIC_VCPU_ID_MASK ((1 << AVIC_VCPU_ID_BITS) - 1)
112
113#define AVIC_VM_ID_BITS 24
114#define AVIC_VM_ID_NR (1 << AVIC_VM_ID_BITS)
115#define AVIC_VM_ID_MASK ((1 << AVIC_VM_ID_BITS) - 1)
116
117#define AVIC_GATAG(x, y) (((x & AVIC_VM_ID_MASK) << AVIC_VCPU_ID_BITS) | \
118 (y & AVIC_VCPU_ID_MASK))
119#define AVIC_GATAG_TO_VMID(x) ((x >> AVIC_VCPU_ID_BITS) & AVIC_VM_ID_MASK)
120#define AVIC_GATAG_TO_VCPUID(x) (x & AVIC_VCPU_ID_MASK)
121
Joerg Roedel67ec6602010-05-17 14:43:35 +0200122static bool erratum_383_found __read_mostly;
123
Avi Kivity6c8166a2009-05-31 18:15:37 +0300124static const u32 host_save_user_msrs[] = {
125#ifdef CONFIG_X86_64
126 MSR_STAR, MSR_LSTAR, MSR_CSTAR, MSR_SYSCALL_MASK, MSR_KERNEL_GS_BASE,
127 MSR_FS_BASE,
128#endif
129 MSR_IA32_SYSENTER_CS, MSR_IA32_SYSENTER_ESP, MSR_IA32_SYSENTER_EIP,
Paolo Bonzini46896c72015-11-12 14:49:16 +0100130 MSR_TSC_AUX,
Avi Kivity6c8166a2009-05-31 18:15:37 +0300131};
132
133#define NR_HOST_SAVE_USER_MSRS ARRAY_SIZE(host_save_user_msrs)
134
135struct kvm_vcpu;
136
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200137struct nested_state {
138 struct vmcb *hsave;
139 u64 hsave_msr;
Joerg Roedel4a810182010-02-24 18:59:15 +0100140 u64 vm_cr_msr;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200141 u64 vmcb;
142
143 /* These are the merged vectors */
144 u32 *msrpm;
145
146 /* gpa pointers to the real vectors */
147 u64 vmcb_msrpm;
Joerg Roedelce2ac082010-03-01 15:34:39 +0100148 u64 vmcb_iopm;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200149
Joerg Roedelcd3ff652009-10-09 16:08:26 +0200150 /* A VMEXIT is required but not yet emulated */
151 bool exit_required;
152
Joerg Roedelaad42c62009-08-07 11:49:34 +0200153 /* cache for intercepts of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100154 u32 intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100155 u32 intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +0200156 u32 intercept_exceptions;
157 u64 intercept;
158
Joerg Roedel5bd2edc2010-09-10 17:31:02 +0200159 /* Nested Paging related state */
160 u64 nested_cr3;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200161};
162
Joerg Roedel323c3d82010-03-01 15:34:37 +0100163#define MSRPM_OFFSETS 16
164static u32 msrpm_offsets[MSRPM_OFFSETS] __read_mostly;
165
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500166/*
167 * Set osvw_len to higher value when updated Revision Guides
168 * are published and we know what the new status bits are
169 */
170static uint64_t osvw_len = 4, osvw_status;
171
Avi Kivity6c8166a2009-05-31 18:15:37 +0300172struct vcpu_svm {
173 struct kvm_vcpu vcpu;
174 struct vmcb *vmcb;
175 unsigned long vmcb_pa;
176 struct svm_cpu_data *svm_data;
177 uint64_t asid_generation;
178 uint64_t sysenter_esp;
179 uint64_t sysenter_eip;
Paolo Bonzini46896c72015-11-12 14:49:16 +0100180 uint64_t tsc_aux;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300181
Tom Lendackyd1d93fa2018-02-24 00:18:20 +0100182 u64 msr_decfg;
183
Avi Kivity6c8166a2009-05-31 18:15:37 +0300184 u64 next_rip;
185
186 u64 host_user_msrs[NR_HOST_SAVE_USER_MSRS];
Avi Kivityafe9e662010-10-21 12:20:32 +0200187 struct {
Avi Kivitydacccfd2010-10-21 12:20:33 +0200188 u16 fs;
189 u16 gs;
190 u16 ldt;
Avi Kivityafe9e662010-10-21 12:20:32 +0200191 u64 gs_base;
192 } host;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300193
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100194 u64 spec_ctrl;
195
Avi Kivity6c8166a2009-05-31 18:15:37 +0300196 u32 *msrpm;
Avi Kivity6c8166a2009-05-31 18:15:37 +0300197
Avi Kivitybd3d1ec2011-02-03 15:29:52 +0200198 ulong nmi_iret_rip;
199
Joerg Roedele6aa9ab2009-08-07 11:49:33 +0200200 struct nested_state nested;
Jan Kiszka6be7d302009-10-18 13:24:54 +0200201
202 bool nmi_singlestep;
Ladi Prosekab2f4d732017-06-21 09:06:58 +0200203 u64 nmi_singlestep_guest_rflags;
Jan Kiszka66b71382010-02-23 17:47:56 +0100204
205 unsigned int3_injected;
206 unsigned long int3_rip;
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100207
Joerg Roedel6092d3d2015-10-14 15:10:54 +0200208 /* cached guest cpuid flags for faster access */
209 bool nrips_enabled : 1;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500210
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -0500211 u32 ldr_reg;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500212 struct page *avic_backing_page;
213 u64 *avic_physical_id_cache;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -0500214 bool avic_is_running;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500215
216 /*
217 * Per-vcpu list of struct amd_svm_iommu_ir:
218 * This is used mainly to store interrupt remapping information used
219 * when update the vcpu affinity. This avoids the need to scan for
220 * IRTE and try to match ga_tag in the IOMMU driver.
221 */
222 struct list_head ir_list;
223 spinlock_t ir_list_lock;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600224
225 /* which host CPU was used for running this vcpu */
226 unsigned int last_cpu;
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -0500227};
228
229/*
230 * This is a wrapper of struct amd_iommu_ir_data.
231 */
232struct amd_svm_iommu_ir {
233 struct list_head node; /* Used by SVM for per-vcpu ir_list */
234 void *data; /* Storing pointer to struct amd_ir_data */
Avi Kivity6c8166a2009-05-31 18:15:37 +0300235};
236
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500237#define AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK (0xFF)
238#define AVIC_LOGICAL_ID_ENTRY_VALID_MASK (1 << 31)
239
240#define AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK (0xFFULL)
241#define AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK (0xFFFFFFFFFFULL << 12)
242#define AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK (1ULL << 62)
243#define AVIC_PHYSICAL_ID_ENTRY_VALID_MASK (1ULL << 63)
244
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100245static DEFINE_PER_CPU(u64, current_tsc_ratio);
246#define TSC_RATIO_DEFAULT 0x0100000000ULL
247
Joerg Roedel455716f2010-03-01 15:34:35 +0100248#define MSR_INVALID 0xffffffffU
249
Mathias Krause09941fb2012-08-30 01:30:20 +0200250static const struct svm_direct_access_msrs {
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100251 u32 index; /* Index of the MSR */
252 bool always; /* True if intercept is always on */
253} direct_access_msrs[] = {
Brian Gerst8c065852010-07-17 09:03:26 -0400254 { .index = MSR_STAR, .always = true },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100255 { .index = MSR_IA32_SYSENTER_CS, .always = true },
256#ifdef CONFIG_X86_64
257 { .index = MSR_GS_BASE, .always = true },
258 { .index = MSR_FS_BASE, .always = true },
259 { .index = MSR_KERNEL_GS_BASE, .always = true },
260 { .index = MSR_LSTAR, .always = true },
261 { .index = MSR_CSTAR, .always = true },
262 { .index = MSR_SYSCALL_MASK, .always = true },
263#endif
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100264 { .index = MSR_IA32_SPEC_CTRL, .always = false },
Ashok Raj15d45072018-02-01 22:59:43 +0100265 { .index = MSR_IA32_PRED_CMD, .always = false },
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100266 { .index = MSR_IA32_LASTBRANCHFROMIP, .always = false },
267 { .index = MSR_IA32_LASTBRANCHTOIP, .always = false },
268 { .index = MSR_IA32_LASTINTFROMIP, .always = false },
269 { .index = MSR_IA32_LASTINTTOIP, .always = false },
270 { .index = MSR_INVALID, .always = false },
Avi Kivity6aa8b732006-12-10 02:21:36 -0800271};
272
273/* enable NPT for AMD64 and X86 with PAE */
274#if defined(CONFIG_X86_64) || defined(CONFIG_X86_PAE)
275static bool npt_enabled = true;
276#else
Joerg Roedele0231712010-02-24 18:59:10 +0100277static bool npt_enabled;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800278#endif
279
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100280/* allow nested paging (virtualized MMU) for all guests */
281static int npt = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800282module_param(npt, int, S_IRUGO);
283
Davidlohr Buesoe2358852012-01-17 14:09:50 +0100284/* allow nested virtualization in KVM/SVM */
285static int nested = true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800286module_param(nested, int, S_IRUGO);
287
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500288/* enable / disable AVIC */
289static int avic;
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500290#ifdef CONFIG_X86_LOCAL_APIC
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500291module_param(avic, int, S_IRUGO);
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -0500292#endif
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500293
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -0500294/* enable/disable Virtual VMLOAD VMSAVE */
295static int vls = true;
296module_param(vls, int, 0444);
297
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500298/* enable/disable Virtual GIF */
299static int vgif = true;
300module_param(vgif, int, 0444);
Suravee Suthikulpanit5ea11f22016-08-23 13:52:41 -0500301
Brijesh Singhe9df0942017-12-04 10:57:33 -0600302/* enable/disable SEV support */
303static int sev = IS_ENABLED(CONFIG_AMD_MEM_ENCRYPT_ACTIVE_BY_DEFAULT);
304module_param(sev, int, 0444);
305
Brijesh Singh7607b712018-02-19 10:14:44 -0600306static u8 rsm_ins_bytes[] = "\x0f\xaa";
307
Paolo Bonzini79a80592015-09-21 07:46:55 +0200308static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0);
Wanpeng Lic2ba05c2017-12-12 17:33:03 -0800309static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa);
Joerg Roedela5c38322009-08-07 11:49:32 +0200310static void svm_complete_interrupts(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800311
Joerg Roedel410e4d52009-08-07 11:49:44 +0200312static int nested_svm_exit_handled(struct vcpu_svm *svm);
Joerg Roedelb8e88bc2010-02-19 16:23:02 +0100313static int nested_svm_intercept(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800314static int nested_svm_vmexit(struct vcpu_svm *svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800315static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
316 bool has_error_code, u32 error_code);
317
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100318enum {
Joerg Roedel116a0a22010-12-03 11:45:49 +0100319 VMCB_INTERCEPTS, /* Intercept vectors, TSC offset,
320 pause filter count */
Joerg Roedelf56838e2010-12-03 11:45:50 +0100321 VMCB_PERM_MAP, /* IOPM Base and MSRPM Base */
Joerg Roedeld48086d2010-12-03 11:45:51 +0100322 VMCB_ASID, /* ASID */
Joerg Roedeldecdbf62010-12-03 11:45:52 +0100323 VMCB_INTR, /* int_ctl, int_vector */
Joerg Roedelb2747162010-12-03 11:45:53 +0100324 VMCB_NPT, /* npt_en, nCR3, gPAT */
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100325 VMCB_CR, /* CR0, CR3, CR4, EFER */
Joerg Roedel72214b92010-12-03 11:45:55 +0100326 VMCB_DR, /* DR6, DR7 */
Joerg Roedel17a703c2010-12-03 11:45:56 +0100327 VMCB_DT, /* GDT, IDT */
Joerg Roedel060d0c92010-12-03 11:45:57 +0100328 VMCB_SEG, /* CS, DS, SS, ES, CPL */
Joerg Roedel0574dec2010-12-03 11:45:58 +0100329 VMCB_CR2, /* CR2 only */
Joerg Roedelb53ba3f2010-12-03 11:45:59 +0100330 VMCB_LBR, /* DBGCTL, BR_FROM, BR_TO, LAST_EX_FROM, LAST_EX_TO */
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500331 VMCB_AVIC, /* AVIC APIC_BAR, AVIC APIC_BACKING_PAGE,
332 * AVIC PHYSICAL_TABLE pointer,
333 * AVIC LOGICAL_TABLE pointer
334 */
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100335 VMCB_DIRTY_MAX,
336};
337
Joerg Roedel0574dec2010-12-03 11:45:58 +0100338/* TPR and CR2 are always written before VMRUN */
339#define VMCB_ALWAYS_DIRTY_MASK ((1U << VMCB_INTR) | (1U << VMCB_CR2))
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100340
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500341#define VMCB_AVIC_APIC_BAR_MASK 0xFFFFFFFFFF000ULL
342
Brijesh Singhed3cd232017-12-04 10:57:32 -0600343static unsigned int max_sev_asid;
Brijesh Singh1654efc2017-12-04 10:57:34 -0600344static unsigned int min_sev_asid;
345static unsigned long *sev_asid_bitmap;
Brijesh Singh89c50582017-12-04 10:57:35 -0600346#define __sme_page_pa(x) __sme_set(page_to_pfn(x) << PAGE_SHIFT)
Brijesh Singh1654efc2017-12-04 10:57:34 -0600347
Brijesh Singh1e80fdc2017-12-04 10:57:38 -0600348struct enc_region {
349 struct list_head list;
350 unsigned long npages;
351 struct page **pages;
352 unsigned long uaddr;
353 unsigned long size;
354};
355
Brijesh Singh1654efc2017-12-04 10:57:34 -0600356static inline bool svm_sev_enabled(void)
357{
358 return max_sev_asid;
359}
360
361static inline bool sev_guest(struct kvm *kvm)
362{
363 struct kvm_sev_info *sev = &kvm->arch.sev_info;
364
365 return sev->active;
366}
Brijesh Singhed3cd232017-12-04 10:57:32 -0600367
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600368static inline int sev_get_asid(struct kvm *kvm)
369{
370 struct kvm_sev_info *sev = &kvm->arch.sev_info;
371
372 return sev->asid;
373}
374
Roedel, Joerg8d28fec2010-12-03 13:15:21 +0100375static inline void mark_all_dirty(struct vmcb *vmcb)
376{
377 vmcb->control.clean = 0;
378}
379
380static inline void mark_all_clean(struct vmcb *vmcb)
381{
382 vmcb->control.clean = ((1 << VMCB_DIRTY_MAX) - 1)
383 & ~VMCB_ALWAYS_DIRTY_MASK;
384}
385
386static inline void mark_dirty(struct vmcb *vmcb, int bit)
387{
388 vmcb->control.clean &= ~(1 << bit);
389}
390
Avi Kivity6aa8b732006-12-10 02:21:36 -0800391static inline struct vcpu_svm *to_svm(struct kvm_vcpu *vcpu)
392{
393 return container_of(vcpu, struct vcpu_svm, vcpu);
394}
395
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -0500396static inline void avic_update_vapic_bar(struct vcpu_svm *svm, u64 data)
397{
398 svm->vmcb->control.avic_vapic_bar = data & VMCB_AVIC_APIC_BAR_MASK;
399 mark_dirty(svm->vmcb, VMCB_AVIC);
400}
401
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -0500402static inline bool avic_vcpu_is_running(struct kvm_vcpu *vcpu)
403{
404 struct vcpu_svm *svm = to_svm(vcpu);
405 u64 *entry = svm->avic_physical_id_cache;
406
407 if (!entry)
408 return false;
409
410 return (READ_ONCE(*entry) & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
411}
412
Joerg Roedel384c6362010-11-30 18:03:56 +0100413static void recalc_intercepts(struct vcpu_svm *svm)
414{
415 struct vmcb_control_area *c, *h;
416 struct nested_state *g;
417
Joerg Roedel116a0a22010-12-03 11:45:49 +0100418 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
419
Joerg Roedel384c6362010-11-30 18:03:56 +0100420 if (!is_guest_mode(&svm->vcpu))
421 return;
422
423 c = &svm->vmcb->control;
424 h = &svm->nested.hsave->control;
425 g = &svm->nested;
426
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100427 c->intercept_cr = h->intercept_cr | g->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100428 c->intercept_dr = h->intercept_dr | g->intercept_dr;
Paolo Bonzinibd895252018-01-11 16:55:24 +0100429 c->intercept_exceptions = h->intercept_exceptions | g->intercept_exceptions;
Joerg Roedel384c6362010-11-30 18:03:56 +0100430 c->intercept = h->intercept | g->intercept;
431}
432
Roedel, Joerg4ee546b2010-12-03 10:50:51 +0100433static inline struct vmcb *get_host_vmcb(struct vcpu_svm *svm)
434{
435 if (is_guest_mode(&svm->vcpu))
436 return svm->nested.hsave;
437 else
438 return svm->vmcb;
439}
440
441static inline void set_cr_intercept(struct vcpu_svm *svm, int bit)
442{
443 struct vmcb *vmcb = get_host_vmcb(svm);
444
445 vmcb->control.intercept_cr |= (1U << bit);
446
447 recalc_intercepts(svm);
448}
449
450static inline void clr_cr_intercept(struct vcpu_svm *svm, int bit)
451{
452 struct vmcb *vmcb = get_host_vmcb(svm);
453
454 vmcb->control.intercept_cr &= ~(1U << bit);
455
456 recalc_intercepts(svm);
457}
458
459static inline bool is_cr_intercept(struct vcpu_svm *svm, int bit)
460{
461 struct vmcb *vmcb = get_host_vmcb(svm);
462
463 return vmcb->control.intercept_cr & (1U << bit);
464}
465
Paolo Bonzini5315c712014-03-03 13:08:29 +0100466static inline void set_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100467{
468 struct vmcb *vmcb = get_host_vmcb(svm);
469
Paolo Bonzini5315c712014-03-03 13:08:29 +0100470 vmcb->control.intercept_dr = (1 << INTERCEPT_DR0_READ)
471 | (1 << INTERCEPT_DR1_READ)
472 | (1 << INTERCEPT_DR2_READ)
473 | (1 << INTERCEPT_DR3_READ)
474 | (1 << INTERCEPT_DR4_READ)
475 | (1 << INTERCEPT_DR5_READ)
476 | (1 << INTERCEPT_DR6_READ)
477 | (1 << INTERCEPT_DR7_READ)
478 | (1 << INTERCEPT_DR0_WRITE)
479 | (1 << INTERCEPT_DR1_WRITE)
480 | (1 << INTERCEPT_DR2_WRITE)
481 | (1 << INTERCEPT_DR3_WRITE)
482 | (1 << INTERCEPT_DR4_WRITE)
483 | (1 << INTERCEPT_DR5_WRITE)
484 | (1 << INTERCEPT_DR6_WRITE)
485 | (1 << INTERCEPT_DR7_WRITE);
Joerg Roedel3aed0412010-11-30 18:03:58 +0100486
487 recalc_intercepts(svm);
488}
489
Paolo Bonzini5315c712014-03-03 13:08:29 +0100490static inline void clr_dr_intercepts(struct vcpu_svm *svm)
Joerg Roedel3aed0412010-11-30 18:03:58 +0100491{
492 struct vmcb *vmcb = get_host_vmcb(svm);
493
Paolo Bonzini5315c712014-03-03 13:08:29 +0100494 vmcb->control.intercept_dr = 0;
Joerg Roedel3aed0412010-11-30 18:03:58 +0100495
496 recalc_intercepts(svm);
497}
498
Joerg Roedel18c918c2010-11-30 18:03:59 +0100499static inline void set_exception_intercept(struct vcpu_svm *svm, int bit)
500{
501 struct vmcb *vmcb = get_host_vmcb(svm);
502
503 vmcb->control.intercept_exceptions |= (1U << bit);
504
505 recalc_intercepts(svm);
506}
507
508static inline void clr_exception_intercept(struct vcpu_svm *svm, int bit)
509{
510 struct vmcb *vmcb = get_host_vmcb(svm);
511
512 vmcb->control.intercept_exceptions &= ~(1U << bit);
513
514 recalc_intercepts(svm);
515}
516
Joerg Roedel8a05a1b82010-11-30 18:04:00 +0100517static inline void set_intercept(struct vcpu_svm *svm, int bit)
518{
519 struct vmcb *vmcb = get_host_vmcb(svm);
520
521 vmcb->control.intercept |= (1ULL << bit);
522
523 recalc_intercepts(svm);
524}
525
526static inline void clr_intercept(struct vcpu_svm *svm, int bit)
527{
528 struct vmcb *vmcb = get_host_vmcb(svm);
529
530 vmcb->control.intercept &= ~(1ULL << bit);
531
532 recalc_intercepts(svm);
533}
534
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500535static inline bool vgif_enabled(struct vcpu_svm *svm)
536{
537 return !!(svm->vmcb->control.int_ctl & V_GIF_ENABLE_MASK);
538}
539
Joerg Roedel2af91942009-08-07 11:49:28 +0200540static inline void enable_gif(struct vcpu_svm *svm)
541{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500542 if (vgif_enabled(svm))
543 svm->vmcb->control.int_ctl |= V_GIF_MASK;
544 else
545 svm->vcpu.arch.hflags |= HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200546}
547
548static inline void disable_gif(struct vcpu_svm *svm)
549{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500550 if (vgif_enabled(svm))
551 svm->vmcb->control.int_ctl &= ~V_GIF_MASK;
552 else
553 svm->vcpu.arch.hflags &= ~HF_GIF_MASK;
Joerg Roedel2af91942009-08-07 11:49:28 +0200554}
555
556static inline bool gif_set(struct vcpu_svm *svm)
557{
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -0500558 if (vgif_enabled(svm))
559 return !!(svm->vmcb->control.int_ctl & V_GIF_MASK);
560 else
561 return !!(svm->vcpu.arch.hflags & HF_GIF_MASK);
Joerg Roedel2af91942009-08-07 11:49:28 +0200562}
563
Avi Kivity6aa8b732006-12-10 02:21:36 -0800564static unsigned long iopm_base;
565
566struct kvm_ldttss_desc {
567 u16 limit0;
568 u16 base0;
Joerg Roedele0231712010-02-24 18:59:10 +0100569 unsigned base1:8, type:5, dpl:2, p:1;
570 unsigned limit1:4, zero0:3, g:1, base2:8;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800571 u32 base3;
572 u32 zero1;
573} __attribute__((packed));
574
575struct svm_cpu_data {
576 int cpu;
577
Avi Kivity5008fdf2007-04-02 13:05:50 +0300578 u64 asid_generation;
579 u32 max_asid;
580 u32 next_asid;
Brijesh Singh4faefff2017-12-04 10:57:25 -0600581 u32 min_asid;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800582 struct kvm_ldttss_desc *tss_desc;
583
584 struct page *save_area;
Ashok Raj15d45072018-02-01 22:59:43 +0100585 struct vmcb *current_vmcb;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600586
587 /* index = sev_asid, value = vmcb pointer */
588 struct vmcb **sev_vmcbs;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800589};
590
591static DEFINE_PER_CPU(struct svm_cpu_data *, svm_data);
592
593struct svm_init_data {
594 int cpu;
595 int r;
596};
597
Mathias Krause09941fb2012-08-30 01:30:20 +0200598static const u32 msrpm_ranges[] = {0, 0xc0000000, 0xc0010000};
Avi Kivity6aa8b732006-12-10 02:21:36 -0800599
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +0200600#define NUM_MSR_MAPS ARRAY_SIZE(msrpm_ranges)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800601#define MSRS_RANGE_SIZE 2048
602#define MSRS_IN_RANGE (MSRS_RANGE_SIZE * 8 / 2)
603
Joerg Roedel455716f2010-03-01 15:34:35 +0100604static u32 svm_msrpm_offset(u32 msr)
605{
606 u32 offset;
607 int i;
608
609 for (i = 0; i < NUM_MSR_MAPS; i++) {
610 if (msr < msrpm_ranges[i] ||
611 msr >= msrpm_ranges[i] + MSRS_IN_RANGE)
612 continue;
613
614 offset = (msr - msrpm_ranges[i]) / 4; /* 4 msrs per u8 */
615 offset += (i * MSRS_RANGE_SIZE); /* add range offset */
616
617 /* Now we have the u8 offset - but need the u32 offset */
618 return offset / 4;
619 }
620
621 /* MSR not in any range */
622 return MSR_INVALID;
623}
624
Avi Kivity6aa8b732006-12-10 02:21:36 -0800625#define MAX_INST_SIZE 15
626
Avi Kivity6aa8b732006-12-10 02:21:36 -0800627static inline void clgi(void)
628{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300629 asm volatile (__ex(SVM_CLGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800630}
631
632static inline void stgi(void)
633{
Avi Kivity4ecac3f2008-05-13 13:23:38 +0300634 asm volatile (__ex(SVM_STGI));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800635}
636
637static inline void invlpga(unsigned long addr, u32 asid)
638{
Joerg Roedele0231712010-02-24 18:59:10 +0100639 asm volatile (__ex(SVM_INVLPGA) : : "a"(addr), "c"(asid));
Avi Kivity6aa8b732006-12-10 02:21:36 -0800640}
641
Yu Zhang855feb62017-08-24 20:27:55 +0800642static int get_npt_level(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +0200643{
644#ifdef CONFIG_X86_64
Yu Zhang2a7266a2017-08-24 20:27:54 +0800645 return PT64_ROOT_4LEVEL;
Joerg Roedel4b161842010-09-10 17:31:03 +0200646#else
647 return PT32E_ROOT_LEVEL;
648#endif
649}
650
Avi Kivity6aa8b732006-12-10 02:21:36 -0800651static void svm_set_efer(struct kvm_vcpu *vcpu, u64 efer)
652{
Zachary Amsden6dc696d2010-05-26 15:09:43 -1000653 vcpu->arch.efer = efer;
Joerg Roedel709ddeb2008-02-07 13:47:45 +0100654 if (!npt_enabled && !(efer & EFER_LMA))
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -0600655 efer &= ~EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800656
Alexander Graf9962d032008-11-25 20:17:02 +0100657 to_svm(vcpu)->vmcb->save.efer = efer | EFER_SVME;
Joerg Roedeldcca1a62010-12-03 11:45:54 +0100658 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800659}
660
Avi Kivity6aa8b732006-12-10 02:21:36 -0800661static int is_external_interrupt(u32 info)
662{
663 info &= SVM_EVTINJ_TYPE_MASK | SVM_EVTINJ_VALID;
664 return info == (SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR);
665}
666
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200667static u32 svm_get_interrupt_shadow(struct kvm_vcpu *vcpu)
Glauber Costa2809f5d2009-05-12 16:21:05 -0400668{
669 struct vcpu_svm *svm = to_svm(vcpu);
670 u32 ret = 0;
671
672 if (svm->vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK)
Paolo Bonzini37ccdcb2014-05-20 14:29:47 +0200673 ret = KVM_X86_SHADOW_INT_STI | KVM_X86_SHADOW_INT_MOV_SS;
674 return ret;
Glauber Costa2809f5d2009-05-12 16:21:05 -0400675}
676
677static void svm_set_interrupt_shadow(struct kvm_vcpu *vcpu, int mask)
678{
679 struct vcpu_svm *svm = to_svm(vcpu);
680
681 if (mask == 0)
682 svm->vmcb->control.int_state &= ~SVM_INTERRUPT_SHADOW_MASK;
683 else
684 svm->vmcb->control.int_state |= SVM_INTERRUPT_SHADOW_MASK;
685
686}
687
Avi Kivity6aa8b732006-12-10 02:21:36 -0800688static void skip_emulated_instruction(struct kvm_vcpu *vcpu)
689{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400690 struct vcpu_svm *svm = to_svm(vcpu);
691
Bandan Dasf1047652015-06-11 02:05:33 -0400692 if (svm->vmcb->control.next_rip != 0) {
Dirk Müllerd2922422015-10-01 13:43:42 +0200693 WARN_ON_ONCE(!static_cpu_has(X86_FEATURE_NRIPS));
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200694 svm->next_rip = svm->vmcb->control.next_rip;
Bandan Dasf1047652015-06-11 02:05:33 -0400695 }
Andre Przywara6bc31bd2010-04-11 23:07:28 +0200696
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -0400697 if (!svm->next_rip) {
Andre Przywara51d8b662010-12-21 11:12:02 +0100698 if (emulate_instruction(vcpu, EMULTYPE_SKIP) !=
Gleb Natapovf629cf82009-05-11 13:35:49 +0300699 EMULATE_DONE)
700 printk(KERN_DEBUG "%s: NOP\n", __func__);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800701 return;
702 }
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300703 if (svm->next_rip - kvm_rip_read(vcpu) > MAX_INST_SIZE)
704 printk(KERN_ERR "%s: ip 0x%lx next 0x%llx\n",
705 __func__, kvm_rip_read(vcpu), svm->next_rip);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800706
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -0300707 kvm_rip_write(vcpu, svm->next_rip);
Glauber Costa2809f5d2009-05-12 16:21:05 -0400708 svm_set_interrupt_shadow(vcpu, 0);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800709}
710
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700711static void svm_queue_exception(struct kvm_vcpu *vcpu)
Jan Kiszka116a4752010-02-23 17:47:54 +0100712{
713 struct vcpu_svm *svm = to_svm(vcpu);
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700714 unsigned nr = vcpu->arch.exception.nr;
715 bool has_error_code = vcpu->arch.exception.has_error_code;
Wanpeng Li664f8e22017-08-24 03:35:09 -0700716 bool reinject = vcpu->arch.exception.injected;
Wanpeng Licfcd20e2017-07-13 18:30:39 -0700717 u32 error_code = vcpu->arch.exception.error_code;
Jan Kiszka116a4752010-02-23 17:47:54 +0100718
Joerg Roedele0231712010-02-24 18:59:10 +0100719 /*
720 * If we are within a nested VM we'd better #VMEXIT and let the guest
721 * handle the exception
722 */
Joerg Roedelce7ddec2010-04-22 12:33:13 +0200723 if (!reinject &&
724 nested_svm_check_exception(svm, nr, has_error_code, error_code))
Jan Kiszka116a4752010-02-23 17:47:54 +0100725 return;
726
Avi Kivity2a6b20b2010-11-09 16:15:42 +0200727 if (nr == BP_VECTOR && !static_cpu_has(X86_FEATURE_NRIPS)) {
Jan Kiszka66b71382010-02-23 17:47:56 +0100728 unsigned long rip, old_rip = kvm_rip_read(&svm->vcpu);
729
730 /*
731 * For guest debugging where we have to reinject #BP if some
732 * INT3 is guest-owned:
733 * Emulate nRIP by moving RIP forward. Will fail if injection
734 * raises a fault that is not intercepted. Still better than
735 * failing in all cases.
736 */
737 skip_emulated_instruction(&svm->vcpu);
738 rip = kvm_rip_read(&svm->vcpu);
739 svm->int3_rip = rip + svm->vmcb->save.cs.base;
740 svm->int3_injected = rip - old_rip;
741 }
742
Jan Kiszka116a4752010-02-23 17:47:54 +0100743 svm->vmcb->control.event_inj = nr
744 | SVM_EVTINJ_VALID
745 | (has_error_code ? SVM_EVTINJ_VALID_ERR : 0)
746 | SVM_EVTINJ_TYPE_EXEPT;
747 svm->vmcb->control.event_inj_err = error_code;
748}
749
Joerg Roedel67ec6602010-05-17 14:43:35 +0200750static void svm_init_erratum_383(void)
751{
752 u32 low, high;
753 int err;
754 u64 val;
755
Borislav Petkove6ee94d2013-03-20 15:07:27 +0100756 if (!static_cpu_has_bug(X86_BUG_AMD_TLB_MMATCH))
Joerg Roedel67ec6602010-05-17 14:43:35 +0200757 return;
758
759 /* Use _safe variants to not break nested virtualization */
760 val = native_read_msr_safe(MSR_AMD64_DC_CFG, &err);
761 if (err)
762 return;
763
764 val |= (1ULL << 47);
765
766 low = lower_32_bits(val);
767 high = upper_32_bits(val);
768
769 native_write_msr_safe(MSR_AMD64_DC_CFG, low, high);
770
771 erratum_383_found = true;
772}
773
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500774static void svm_init_osvw(struct kvm_vcpu *vcpu)
775{
776 /*
777 * Guests should see errata 400 and 415 as fixed (assuming that
778 * HLT and IO instructions are intercepted).
779 */
780 vcpu->arch.osvw.length = (osvw_len >= 3) ? (osvw_len) : 3;
781 vcpu->arch.osvw.status = osvw_status & ~(6ULL);
782
783 /*
784 * By increasing VCPU's osvw.length to 3 we are telling the guest that
785 * all osvw.status bits inside that length, including bit 0 (which is
786 * reserved for erratum 298), are valid. However, if host processor's
787 * osvw_len is 0 then osvw_status[0] carries no information. We need to
788 * be conservative here and therefore we tell the guest that erratum 298
789 * is present (because we really don't know).
790 */
791 if (osvw_len == 0 && boot_cpu_data.x86 == 0x10)
792 vcpu->arch.osvw.status |= 1;
793}
794
Avi Kivity6aa8b732006-12-10 02:21:36 -0800795static int has_svm(void)
796{
Eduardo Habkost63d11422008-11-17 19:03:20 -0200797 const char *msg;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800798
Eduardo Habkost63d11422008-11-17 19:03:20 -0200799 if (!cpu_has_svm(&msg)) {
Joe Perchesff81ff12009-01-08 11:05:17 -0800800 printk(KERN_INFO "has_svm: %s\n", msg);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800801 return 0;
802 }
803
Avi Kivity6aa8b732006-12-10 02:21:36 -0800804 return 1;
805}
806
Radim Krčmář13a34e02014-08-28 15:13:03 +0200807static void svm_hardware_disable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800808{
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100809 /* Make sure we clean up behind us */
810 if (static_cpu_has(X86_FEATURE_TSCRATEMSR))
811 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
812
Eduardo Habkost2c8dcee2008-11-17 19:03:21 -0200813 cpu_svm_disable();
Joerg Roedel1018faa2012-02-29 14:57:32 +0100814
815 amd_pmu_disable_virt();
Avi Kivity6aa8b732006-12-10 02:21:36 -0800816}
817
Radim Krčmář13a34e02014-08-28 15:13:03 +0200818static int svm_hardware_enable(void)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800819{
820
Tejun Heo0fe1e002009-10-29 22:34:14 +0900821 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800822 uint64_t efer;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800823 struct desc_struct *gdt;
824 int me = raw_smp_processor_id();
825
Alexander Graf10474ae2009-09-15 11:37:46 +0200826 rdmsrl(MSR_EFER, efer);
827 if (efer & EFER_SVME)
828 return -EBUSY;
829
Avi Kivity6aa8b732006-12-10 02:21:36 -0800830 if (!has_svm()) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200831 pr_err("%s: err EOPNOTSUPP on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200832 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800833 }
Tejun Heo0fe1e002009-10-29 22:34:14 +0900834 sd = per_cpu(svm_data, me);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900835 if (!sd) {
Borislav Petkov1f5b77f2012-10-20 20:20:04 +0200836 pr_err("%s: svm_data is NULL on %d\n", __func__, me);
Alexander Graf10474ae2009-09-15 11:37:46 +0200837 return -EINVAL;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800838 }
839
Tejun Heo0fe1e002009-10-29 22:34:14 +0900840 sd->asid_generation = 1;
841 sd->max_asid = cpuid_ebx(SVM_CPUID_FUNC) - 1;
842 sd->next_asid = sd->max_asid + 1;
Brijesh Singhed3cd232017-12-04 10:57:32 -0600843 sd->min_asid = max_sev_asid + 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800844
Thomas Garnier45fc8752017-03-14 10:05:08 -0700845 gdt = get_current_gdt_rw();
Tejun Heo0fe1e002009-10-29 22:34:14 +0900846 sd->tss_desc = (struct kvm_ldttss_desc *)(gdt + GDT_ENTRY_TSS);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800847
Alexander Graf9962d032008-11-25 20:17:02 +0100848 wrmsrl(MSR_EFER, efer | EFER_SVME);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800849
Linus Torvaldsd0316552009-12-14 09:58:24 -0800850 wrmsrl(MSR_VM_HSAVE_PA, page_to_pfn(sd->save_area) << PAGE_SHIFT);
Alexander Graf10474ae2009-09-15 11:37:46 +0200851
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100852 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
853 wrmsrl(MSR_AMD64_TSC_RATIO, TSC_RATIO_DEFAULT);
Christoph Lameter89cbc762014-08-17 12:30:40 -0500854 __this_cpu_write(current_tsc_ratio, TSC_RATIO_DEFAULT);
Joerg Roedelfbc0db72011-03-25 09:44:46 +0100855 }
856
Boris Ostrovsky2b036c62012-01-09 14:00:35 -0500857
858 /*
859 * Get OSVW bits.
860 *
861 * Note that it is possible to have a system with mixed processor
862 * revisions and therefore different OSVW bits. If bits are not the same
863 * on different processors then choose the worst case (i.e. if erratum
864 * is present on one processor and not on another then assume that the
865 * erratum is present everywhere).
866 */
867 if (cpu_has(&boot_cpu_data, X86_FEATURE_OSVW)) {
868 uint64_t len, status = 0;
869 int err;
870
871 len = native_read_msr_safe(MSR_AMD64_OSVW_ID_LENGTH, &err);
872 if (!err)
873 status = native_read_msr_safe(MSR_AMD64_OSVW_STATUS,
874 &err);
875
876 if (err)
877 osvw_status = osvw_len = 0;
878 else {
879 if (len < osvw_len)
880 osvw_len = len;
881 osvw_status |= status;
882 osvw_status &= (1ULL << osvw_len) - 1;
883 }
884 } else
885 osvw_status = osvw_len = 0;
886
Joerg Roedel67ec6602010-05-17 14:43:35 +0200887 svm_init_erratum_383();
888
Joerg Roedel1018faa2012-02-29 14:57:32 +0100889 amd_pmu_enable_virt();
890
Alexander Graf10474ae2009-09-15 11:37:46 +0200891 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800892}
893
Joerg Roedel0da1db752008-07-02 16:02:11 +0200894static void svm_cpu_uninit(int cpu)
895{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900896 struct svm_cpu_data *sd = per_cpu(svm_data, raw_smp_processor_id());
Joerg Roedel0da1db752008-07-02 16:02:11 +0200897
Tejun Heo0fe1e002009-10-29 22:34:14 +0900898 if (!sd)
Joerg Roedel0da1db752008-07-02 16:02:11 +0200899 return;
900
901 per_cpu(svm_data, raw_smp_processor_id()) = NULL;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600902 kfree(sd->sev_vmcbs);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900903 __free_page(sd->save_area);
904 kfree(sd);
Joerg Roedel0da1db752008-07-02 16:02:11 +0200905}
906
Avi Kivity6aa8b732006-12-10 02:21:36 -0800907static int svm_cpu_init(int cpu)
908{
Tejun Heo0fe1e002009-10-29 22:34:14 +0900909 struct svm_cpu_data *sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800910 int r;
911
Tejun Heo0fe1e002009-10-29 22:34:14 +0900912 sd = kzalloc(sizeof(struct svm_cpu_data), GFP_KERNEL);
913 if (!sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800914 return -ENOMEM;
Tejun Heo0fe1e002009-10-29 22:34:14 +0900915 sd->cpu = cpu;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800916 r = -ENOMEM;
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600917 sd->save_area = alloc_page(GFP_KERNEL);
Tejun Heo0fe1e002009-10-29 22:34:14 +0900918 if (!sd->save_area)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800919 goto err_1;
920
Brijesh Singh70cd94e2017-12-04 10:57:34 -0600921 if (svm_sev_enabled()) {
922 r = -ENOMEM;
923 sd->sev_vmcbs = kmalloc((max_sev_asid + 1) * sizeof(void *), GFP_KERNEL);
924 if (!sd->sev_vmcbs)
925 goto err_1;
926 }
927
Tejun Heo0fe1e002009-10-29 22:34:14 +0900928 per_cpu(svm_data, cpu) = sd;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800929
930 return 0;
931
932err_1:
Tejun Heo0fe1e002009-10-29 22:34:14 +0900933 kfree(sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -0800934 return r;
935
936}
937
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100938static bool valid_msr_intercept(u32 index)
Avi Kivity6aa8b732006-12-10 02:21:36 -0800939{
940 int i;
941
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100942 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++)
943 if (direct_access_msrs[i].index == index)
944 return true;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800945
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100946 return false;
947}
948
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +0100949static bool msr_write_intercepted(struct kvm_vcpu *vcpu, unsigned msr)
950{
951 u8 bit_write;
952 unsigned long tmp;
953 u32 offset;
954 u32 *msrpm;
955
956 msrpm = is_guest_mode(vcpu) ? to_svm(vcpu)->nested.msrpm:
957 to_svm(vcpu)->msrpm;
958
959 offset = svm_msrpm_offset(msr);
960 bit_write = 2 * (msr & 0x0f) + 1;
961 tmp = msrpm[offset];
962
963 BUG_ON(offset == MSR_INVALID);
964
965 return !!test_bit(bit_write, &tmp);
966}
967
Avi Kivity6aa8b732006-12-10 02:21:36 -0800968static void set_msr_interception(u32 *msrpm, unsigned msr,
969 int read, int write)
970{
Joerg Roedel455716f2010-03-01 15:34:35 +0100971 u8 bit_read, bit_write;
972 unsigned long tmp;
973 u32 offset;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800974
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100975 /*
976 * If this warning triggers extend the direct_access_msrs list at the
977 * beginning of the file
978 */
979 WARN_ON(!valid_msr_intercept(msr));
980
Joerg Roedel455716f2010-03-01 15:34:35 +0100981 offset = svm_msrpm_offset(msr);
982 bit_read = 2 * (msr & 0x0f);
983 bit_write = 2 * (msr & 0x0f) + 1;
984 tmp = msrpm[offset];
Avi Kivity6aa8b732006-12-10 02:21:36 -0800985
Joerg Roedel455716f2010-03-01 15:34:35 +0100986 BUG_ON(offset == MSR_INVALID);
987
988 read ? clear_bit(bit_read, &tmp) : set_bit(bit_read, &tmp);
989 write ? clear_bit(bit_write, &tmp) : set_bit(bit_write, &tmp);
990
991 msrpm[offset] = tmp;
Avi Kivity6aa8b732006-12-10 02:21:36 -0800992}
993
Joerg Roedelf65c2292008-02-13 18:58:46 +0100994static void svm_vcpu_init_msrpm(u32 *msrpm)
995{
Joerg Roedelac72a9b2010-03-01 15:34:36 +0100996 int i;
997
Joerg Roedelf65c2292008-02-13 18:58:46 +0100998 memset(msrpm, 0xff, PAGE_SIZE * (1 << MSRPM_ALLOC_ORDER));
999
Joerg Roedelac72a9b2010-03-01 15:34:36 +01001000 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1001 if (!direct_access_msrs[i].always)
1002 continue;
1003
1004 set_msr_interception(msrpm, direct_access_msrs[i].index, 1, 1);
1005 }
Joerg Roedelf65c2292008-02-13 18:58:46 +01001006}
1007
Joerg Roedel323c3d82010-03-01 15:34:37 +01001008static void add_msr_offset(u32 offset)
1009{
1010 int i;
1011
1012 for (i = 0; i < MSRPM_OFFSETS; ++i) {
1013
1014 /* Offset already in list? */
1015 if (msrpm_offsets[i] == offset)
1016 return;
1017
1018 /* Slot used by another offset? */
1019 if (msrpm_offsets[i] != MSR_INVALID)
1020 continue;
1021
1022 /* Add offset to list */
1023 msrpm_offsets[i] = offset;
1024
1025 return;
1026 }
1027
1028 /*
1029 * If this BUG triggers the msrpm_offsets table has an overflow. Just
1030 * increase MSRPM_OFFSETS in this case.
1031 */
1032 BUG();
1033}
1034
1035static void init_msrpm_offsets(void)
1036{
1037 int i;
1038
1039 memset(msrpm_offsets, 0xff, sizeof(msrpm_offsets));
1040
1041 for (i = 0; direct_access_msrs[i].index != MSR_INVALID; i++) {
1042 u32 offset;
1043
1044 offset = svm_msrpm_offset(direct_access_msrs[i].index);
1045 BUG_ON(offset == MSR_INVALID);
1046
1047 add_msr_offset(offset);
1048 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08001049}
1050
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001051static void svm_enable_lbrv(struct vcpu_svm *svm)
1052{
1053 u32 *msrpm = svm->msrpm;
1054
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001055 svm->vmcb->control.virt_ext |= LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001056 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 1, 1);
1057 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 1, 1);
1058 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 1, 1);
1059 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 1, 1);
1060}
1061
1062static void svm_disable_lbrv(struct vcpu_svm *svm)
1063{
1064 u32 *msrpm = svm->msrpm;
1065
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05001066 svm->vmcb->control.virt_ext &= ~LBR_CTL_ENABLE_MASK;
Joerg Roedel24e09cb2008-02-13 18:58:47 +01001067 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHFROMIP, 0, 0);
1068 set_msr_interception(msrpm, MSR_IA32_LASTBRANCHTOIP, 0, 0);
1069 set_msr_interception(msrpm, MSR_IA32_LASTINTFROMIP, 0, 0);
1070 set_msr_interception(msrpm, MSR_IA32_LASTINTTOIP, 0, 0);
1071}
1072
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001073static void disable_nmi_singlestep(struct vcpu_svm *svm)
1074{
1075 svm->nmi_singlestep = false;
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001076
Ladi Prosekab2f4d732017-06-21 09:06:58 +02001077 if (!(svm->vcpu.guest_debug & KVM_GUESTDBG_SINGLESTEP)) {
1078 /* Clear our flags if they were not set by the guest */
1079 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
1080 svm->vmcb->save.rflags &= ~X86_EFLAGS_TF;
1081 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
1082 svm->vmcb->save.rflags &= ~X86_EFLAGS_RF;
1083 }
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02001084}
1085
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001086/* Note:
1087 * This hash table is used to map VM_ID to a struct kvm_arch,
1088 * when handling AMD IOMMU GALOG notification to schedule in
1089 * a particular vCPU.
1090 */
1091#define SVM_VM_DATA_HASH_BITS 8
David Hildenbrand681bcea2017-01-24 22:21:16 +01001092static DEFINE_HASHTABLE(svm_vm_data_hash, SVM_VM_DATA_HASH_BITS);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001093static u32 next_vm_id = 0;
1094static bool next_vm_id_wrapped = 0;
David Hildenbrand681bcea2017-01-24 22:21:16 +01001095static DEFINE_SPINLOCK(svm_vm_data_hash_lock);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001096
1097/* Note:
1098 * This function is called from IOMMU driver to notify
1099 * SVM to schedule in a particular vCPU of a particular VM.
1100 */
1101static int avic_ga_log_notifier(u32 ga_tag)
1102{
1103 unsigned long flags;
1104 struct kvm_arch *ka = NULL;
1105 struct kvm_vcpu *vcpu = NULL;
1106 u32 vm_id = AVIC_GATAG_TO_VMID(ga_tag);
1107 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(ga_tag);
1108
1109 pr_debug("SVM: %s: vm_id=%#x, vcpu_id=%#x\n", __func__, vm_id, vcpu_id);
1110
1111 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1112 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1113 struct kvm *kvm = container_of(ka, struct kvm, arch);
1114 struct kvm_arch *vm_data = &kvm->arch;
1115
1116 if (vm_data->avic_vm_id != vm_id)
1117 continue;
1118 vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
1119 break;
1120 }
1121 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1122
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001123 /* Note:
1124 * At this point, the IOMMU should have already set the pending
1125 * bit in the vAPIC backing page. So, we just need to schedule
1126 * in the vcpu.
1127 */
Paolo Bonzini1cf53582017-10-10 12:51:56 +02001128 if (vcpu)
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001129 kvm_vcpu_wake_up(vcpu);
1130
1131 return 0;
1132}
1133
Brijesh Singhe9df0942017-12-04 10:57:33 -06001134static __init int sev_hardware_setup(void)
1135{
1136 struct sev_user_data_status *status;
1137 int rc;
1138
1139 /* Maximum number of encrypted guests supported simultaneously */
1140 max_sev_asid = cpuid_ecx(0x8000001F);
1141
1142 if (!max_sev_asid)
1143 return 1;
1144
Brijesh Singh1654efc2017-12-04 10:57:34 -06001145 /* Minimum ASID value that should be used for SEV guest */
1146 min_sev_asid = cpuid_edx(0x8000001F);
1147
1148 /* Initialize SEV ASID bitmap */
1149 sev_asid_bitmap = kcalloc(BITS_TO_LONGS(max_sev_asid),
1150 sizeof(unsigned long), GFP_KERNEL);
1151 if (!sev_asid_bitmap)
1152 return 1;
1153
Brijesh Singhe9df0942017-12-04 10:57:33 -06001154 status = kmalloc(sizeof(*status), GFP_KERNEL);
1155 if (!status)
1156 return 1;
1157
1158 /*
1159 * Check SEV platform status.
1160 *
1161 * PLATFORM_STATUS can be called in any state, if we failed to query
1162 * the PLATFORM status then either PSP firmware does not support SEV
1163 * feature or SEV firmware is dead.
1164 */
1165 rc = sev_platform_status(status, NULL);
1166 if (rc)
1167 goto err;
1168
1169 pr_info("SEV supported\n");
1170
1171err:
1172 kfree(status);
1173 return rc;
1174}
1175
Avi Kivity6aa8b732006-12-10 02:21:36 -08001176static __init int svm_hardware_setup(void)
1177{
1178 int cpu;
1179 struct page *iopm_pages;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001180 void *iopm_va;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001181 int r;
1182
Avi Kivity6aa8b732006-12-10 02:21:36 -08001183 iopm_pages = alloc_pages(GFP_KERNEL, IOPM_ALLOC_ORDER);
1184
1185 if (!iopm_pages)
1186 return -ENOMEM;
Anthony Liguoric8681332007-04-30 09:48:11 +03001187
1188 iopm_va = page_address(iopm_pages);
1189 memset(iopm_va, 0xff, PAGE_SIZE * (1 << IOPM_ALLOC_ORDER));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001190 iopm_base = page_to_pfn(iopm_pages) << PAGE_SHIFT;
1191
Joerg Roedel323c3d82010-03-01 15:34:37 +01001192 init_msrpm_offsets();
1193
Joerg Roedel50a37eb2008-01-31 14:57:38 +01001194 if (boot_cpu_has(X86_FEATURE_NX))
1195 kvm_enable_efer_bits(EFER_NX);
1196
Alexander Graf1b2fd702009-02-02 16:23:51 +01001197 if (boot_cpu_has(X86_FEATURE_FXSR_OPT))
1198 kvm_enable_efer_bits(EFER_FFXSR);
1199
Joerg Roedel92a1f122011-03-25 09:44:51 +01001200 if (boot_cpu_has(X86_FEATURE_TSCRATEMSR)) {
Joerg Roedel92a1f122011-03-25 09:44:51 +01001201 kvm_has_tsc_control = true;
Haozhong Zhangbc9b9612015-10-20 15:39:01 +08001202 kvm_max_tsc_scaling_ratio = TSC_RATIO_MAX;
1203 kvm_tsc_scaling_ratio_frac_bits = 32;
Joerg Roedel92a1f122011-03-25 09:44:51 +01001204 }
1205
Alexander Graf236de052008-11-25 20:17:10 +01001206 if (nested) {
1207 printk(KERN_INFO "kvm: Nested Virtualization enabled\n");
Joerg Roedeleec4b142010-05-05 16:04:44 +02001208 kvm_enable_efer_bits(EFER_SVME | EFER_LMSLE);
Alexander Graf236de052008-11-25 20:17:10 +01001209 }
1210
Brijesh Singhe9df0942017-12-04 10:57:33 -06001211 if (sev) {
1212 if (boot_cpu_has(X86_FEATURE_SEV) &&
1213 IS_ENABLED(CONFIG_KVM_AMD_SEV)) {
1214 r = sev_hardware_setup();
1215 if (r)
1216 sev = false;
1217 } else {
1218 sev = false;
1219 }
1220 }
1221
Zachary Amsden3230bb42009-09-29 11:38:37 -10001222 for_each_possible_cpu(cpu) {
Avi Kivity6aa8b732006-12-10 02:21:36 -08001223 r = svm_cpu_init(cpu);
1224 if (r)
Joerg Roedelf65c2292008-02-13 18:58:46 +01001225 goto err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001226 }
Joerg Roedel33bd6a02008-02-07 13:47:38 +01001227
Avi Kivity2a6b20b2010-11-09 16:15:42 +02001228 if (!boot_cpu_has(X86_FEATURE_NPT))
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001229 npt_enabled = false;
1230
Joerg Roedel6c7dac72008-02-07 13:47:40 +01001231 if (npt_enabled && !npt) {
1232 printk(KERN_INFO "kvm: Nested Paging disabled\n");
1233 npt_enabled = false;
1234 }
1235
Joerg Roedel18552672008-02-07 13:47:41 +01001236 if (npt_enabled) {
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001237 printk(KERN_INFO "kvm: Nested Paging enabled\n");
Joerg Roedel18552672008-02-07 13:47:41 +01001238 kvm_enable_tdp();
Joerg Roedel5f4cb662008-07-14 20:36:36 +02001239 } else
1240 kvm_disable_tdp();
Joerg Roedele3da3ac2008-02-07 13:47:39 +01001241
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001242 if (avic) {
1243 if (!npt_enabled ||
1244 !boot_cpu_has(X86_FEATURE_AVIC) ||
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001245 !IS_ENABLED(CONFIG_X86_LOCAL_APIC)) {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001246 avic = false;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001247 } else {
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001248 pr_info("AVIC enabled\n");
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001249
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001250 amd_iommu_register_ga_log_notifier(&avic_ga_log_notifier);
1251 }
Suravee Suthikulpanit5b8abf12016-06-15 17:24:36 -05001252 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001253
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001254 if (vls) {
1255 if (!npt_enabled ||
Borislav Petkov5442c262017-08-01 20:55:52 +02001256 !boot_cpu_has(X86_FEATURE_V_VMSAVE_VMLOAD) ||
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001257 !IS_ENABLED(CONFIG_X86_64)) {
1258 vls = false;
1259 } else {
1260 pr_info("Virtual VMLOAD VMSAVE supported\n");
1261 }
1262 }
1263
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001264 if (vgif) {
1265 if (!boot_cpu_has(X86_FEATURE_VGIF))
1266 vgif = false;
1267 else
1268 pr_info("Virtual GIF supported\n");
1269 }
1270
Avi Kivity6aa8b732006-12-10 02:21:36 -08001271 return 0;
1272
Joerg Roedelf65c2292008-02-13 18:58:46 +01001273err:
Avi Kivity6aa8b732006-12-10 02:21:36 -08001274 __free_pages(iopm_pages, IOPM_ALLOC_ORDER);
1275 iopm_base = 0;
1276 return r;
1277}
1278
1279static __exit void svm_hardware_unsetup(void)
1280{
Joerg Roedel0da1db752008-07-02 16:02:11 +02001281 int cpu;
1282
Brijesh Singh1654efc2017-12-04 10:57:34 -06001283 if (svm_sev_enabled())
1284 kfree(sev_asid_bitmap);
1285
Zachary Amsden3230bb42009-09-29 11:38:37 -10001286 for_each_possible_cpu(cpu)
Joerg Roedel0da1db752008-07-02 16:02:11 +02001287 svm_cpu_uninit(cpu);
1288
Avi Kivity6aa8b732006-12-10 02:21:36 -08001289 __free_pages(pfn_to_page(iopm_base >> PAGE_SHIFT), IOPM_ALLOC_ORDER);
Joerg Roedelf65c2292008-02-13 18:58:46 +01001290 iopm_base = 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001291}
1292
1293static void init_seg(struct vmcb_seg *seg)
1294{
1295 seg->selector = 0;
1296 seg->attrib = SVM_SELECTOR_P_MASK | SVM_SELECTOR_S_MASK |
Joerg Roedele0231712010-02-24 18:59:10 +01001297 SVM_SELECTOR_WRITE_MASK; /* Read/Write Data Segment */
Avi Kivity6aa8b732006-12-10 02:21:36 -08001298 seg->limit = 0xffff;
1299 seg->base = 0;
1300}
1301
1302static void init_sys_seg(struct vmcb_seg *seg, uint32_t type)
1303{
1304 seg->selector = 0;
1305 seg->attrib = SVM_SELECTOR_P_MASK | type;
1306 seg->limit = 0xffff;
1307 seg->base = 0;
1308}
1309
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001310static void svm_write_tsc_offset(struct kvm_vcpu *vcpu, u64 offset)
1311{
1312 struct vcpu_svm *svm = to_svm(vcpu);
1313 u64 g_tsc_offset = 0;
1314
Joerg Roedel20307532010-11-29 17:51:48 +01001315 if (is_guest_mode(vcpu)) {
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001316 g_tsc_offset = svm->vmcb->control.tsc_offset -
1317 svm->nested.hsave->control.tsc_offset;
1318 svm->nested.hsave->control.tsc_offset = offset;
Yoshihiro YUNOMAE489223e2013-06-12 16:43:44 +09001319 } else
1320 trace_kvm_write_tsc_offset(vcpu->vcpu_id,
1321 svm->vmcb->control.tsc_offset,
1322 offset);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001323
1324 svm->vmcb->control.tsc_offset = offset + g_tsc_offset;
Joerg Roedel116a0a22010-12-03 11:45:49 +01001325
1326 mark_dirty(svm->vmcb, VMCB_INTERCEPTS);
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10001327}
1328
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001329static void avic_init_vmcb(struct vcpu_svm *svm)
1330{
1331 struct vmcb *vmcb = svm->vmcb;
1332 struct kvm_arch *vm_data = &svm->vcpu.kvm->arch;
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001333 phys_addr_t bpa = __sme_set(page_to_phys(svm->avic_backing_page));
1334 phys_addr_t lpa = __sme_set(page_to_phys(vm_data->avic_logical_id_table_page));
1335 phys_addr_t ppa = __sme_set(page_to_phys(vm_data->avic_physical_id_table_page));
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001336
1337 vmcb->control.avic_backing_page = bpa & AVIC_HPA_MASK;
1338 vmcb->control.avic_logical_id = lpa & AVIC_HPA_MASK;
1339 vmcb->control.avic_physical_id = ppa & AVIC_HPA_MASK;
1340 vmcb->control.avic_physical_id |= AVIC_MAX_PHYSICAL_ID_COUNT;
1341 vmcb->control.int_ctl |= AVIC_ENABLE_MASK;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001342}
1343
Paolo Bonzini56908912015-10-19 11:30:19 +02001344static void init_vmcb(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001345{
Joerg Roedele6101a92008-02-13 18:58:45 +01001346 struct vmcb_control_area *control = &svm->vmcb->control;
1347 struct vmcb_save_area *save = &svm->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001348
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001349 svm->vcpu.arch.hflags = 0;
Avi Kivitybff78272010-01-07 13:16:08 +02001350
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001351 set_cr_intercept(svm, INTERCEPT_CR0_READ);
1352 set_cr_intercept(svm, INTERCEPT_CR3_READ);
1353 set_cr_intercept(svm, INTERCEPT_CR4_READ);
1354 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
1355 set_cr_intercept(svm, INTERCEPT_CR3_WRITE);
1356 set_cr_intercept(svm, INTERCEPT_CR4_WRITE);
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05001357 if (!kvm_vcpu_apicv_active(&svm->vcpu))
1358 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001359
Paolo Bonzini5315c712014-03-03 13:08:29 +01001360 set_dr_intercepts(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001361
Joerg Roedel18c918c2010-11-30 18:03:59 +01001362 set_exception_intercept(svm, PF_VECTOR);
1363 set_exception_intercept(svm, UD_VECTOR);
1364 set_exception_intercept(svm, MC_VECTOR);
Eric Northup54a20552015-11-03 18:03:53 +01001365 set_exception_intercept(svm, AC_VECTOR);
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01001366 set_exception_intercept(svm, DB_VECTOR);
Liran Alon97184202018-03-12 13:12:52 +02001367 /*
1368 * Guest access to VMware backdoor ports could legitimately
1369 * trigger #GP because of TSS I/O permission bitmap.
1370 * We intercept those #GP and allow access to them anyway
1371 * as VMware does.
1372 */
1373 if (enable_vmware_backdoor)
1374 set_exception_intercept(svm, GP_VECTOR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001375
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001376 set_intercept(svm, INTERCEPT_INTR);
1377 set_intercept(svm, INTERCEPT_NMI);
1378 set_intercept(svm, INTERCEPT_SMI);
1379 set_intercept(svm, INTERCEPT_SELECTIVE_CR0);
Avi Kivity332b56e2011-11-10 14:57:24 +02001380 set_intercept(svm, INTERCEPT_RDPMC);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001381 set_intercept(svm, INTERCEPT_CPUID);
1382 set_intercept(svm, INTERCEPT_INVD);
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001383 set_intercept(svm, INTERCEPT_INVLPG);
1384 set_intercept(svm, INTERCEPT_INVLPGA);
1385 set_intercept(svm, INTERCEPT_IOIO_PROT);
1386 set_intercept(svm, INTERCEPT_MSR_PROT);
1387 set_intercept(svm, INTERCEPT_TASK_SWITCH);
1388 set_intercept(svm, INTERCEPT_SHUTDOWN);
1389 set_intercept(svm, INTERCEPT_VMRUN);
1390 set_intercept(svm, INTERCEPT_VMMCALL);
1391 set_intercept(svm, INTERCEPT_VMLOAD);
1392 set_intercept(svm, INTERCEPT_VMSAVE);
1393 set_intercept(svm, INTERCEPT_STGI);
1394 set_intercept(svm, INTERCEPT_CLGI);
1395 set_intercept(svm, INTERCEPT_SKINIT);
1396 set_intercept(svm, INTERCEPT_WBINVD);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01001397 set_intercept(svm, INTERCEPT_XSETBV);
Brijesh Singh7607b712018-02-19 10:14:44 -06001398 set_intercept(svm, INTERCEPT_RSM);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001399
Wanpeng Li4d5422c2018-03-12 04:53:02 -07001400 if (!kvm_mwait_in_guest(svm->vcpu.kvm)) {
Michael S. Tsirkin668fffa32017-04-21 12:27:17 +02001401 set_intercept(svm, INTERCEPT_MONITOR);
1402 set_intercept(svm, INTERCEPT_MWAIT);
1403 }
1404
Wanpeng Licaa057a2018-03-12 04:53:03 -07001405 if (!kvm_hlt_in_guest(svm->vcpu.kvm))
1406 set_intercept(svm, INTERCEPT_HLT);
1407
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001408 control->iopm_base_pa = __sme_set(iopm_base);
1409 control->msrpm_base_pa = __sme_set(__pa(svm->msrpm));
Avi Kivity6aa8b732006-12-10 02:21:36 -08001410 control->int_ctl = V_INTR_MASKING_MASK;
1411
1412 init_seg(&save->es);
1413 init_seg(&save->ss);
1414 init_seg(&save->ds);
1415 init_seg(&save->fs);
1416 init_seg(&save->gs);
1417
1418 save->cs.selector = 0xf000;
Paolo Bonzini04b66832013-03-19 16:30:26 +01001419 save->cs.base = 0xffff0000;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001420 /* Executable/Readable Code Segment */
1421 save->cs.attrib = SVM_SELECTOR_READ_MASK | SVM_SELECTOR_P_MASK |
1422 SVM_SELECTOR_S_MASK | SVM_SELECTOR_CODE_MASK;
1423 save->cs.limit = 0xffff;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001424
1425 save->gdtr.limit = 0xffff;
1426 save->idtr.limit = 0xffff;
1427
1428 init_sys_seg(&save->ldtr, SEG_TYPE_LDT);
1429 init_sys_seg(&save->tr, SEG_TYPE_BUSY_TSS16);
1430
Paolo Bonzini56908912015-10-19 11:30:19 +02001431 svm_set_efer(&svm->vcpu, 0);
Mike Dayd77c26f2007-10-08 09:02:08 -04001432 save->dr6 = 0xffff0ff0;
Avi Kivityf6e78472010-08-02 15:30:20 +03001433 kvm_set_rflags(&svm->vcpu, 2);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001434 save->rip = 0x0000fff0;
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03001435 svm->vcpu.arch.regs[VCPU_REGS_RIP] = save->rip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001436
Joerg Roedele0231712010-02-24 18:59:10 +01001437 /*
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001438 * svm_set_cr0() sets PG and WP and clears NW and CD on save->cr0.
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001439 * It also updates the guest-visible cr0 value.
Avi Kivity6aa8b732006-12-10 02:21:36 -08001440 */
Paolo Bonzini79a80592015-09-21 07:46:55 +02001441 svm_set_cr0(&svm->vcpu, X86_CR0_NW | X86_CR0_CD | X86_CR0_ET);
Igor Mammedovebae8712015-09-18 15:39:05 +02001442 kvm_mmu_reset_context(&svm->vcpu);
Eduardo Habkost18fa0002009-10-24 02:49:59 -02001443
Rusty Russell66aee912007-07-17 23:34:16 +10001444 save->cr4 = X86_CR4_PAE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001445 /* rdx = ?? */
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001446
1447 if (npt_enabled) {
1448 /* Setup VMCB for Nested Paging */
Tom Lendackycea3a192017-12-04 10:57:24 -06001449 control->nested_ctl |= SVM_NESTED_CTL_NP_ENABLE;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001450 clr_intercept(svm, INTERCEPT_INVLPG);
Joerg Roedel18c918c2010-11-30 18:03:59 +01001451 clr_exception_intercept(svm, PF_VECTOR);
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01001452 clr_cr_intercept(svm, INTERCEPT_CR3_READ);
1453 clr_cr_intercept(svm, INTERCEPT_CR3_WRITE);
Radim Krčmář74545702015-04-27 15:11:25 +02001454 save->g_pat = svm->vcpu.arch.pat;
Joerg Roedel709ddeb2008-02-07 13:47:45 +01001455 save->cr3 = 0;
1456 save->cr4 = 0;
1457 }
Joerg Roedelf40f6a42010-12-03 15:25:15 +01001458 svm->asid_generation = 0;
Alexander Graf1371d902008-11-25 20:17:04 +01001459
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02001460 svm->nested.vmcb = 0;
Joerg Roedel2af91942009-08-07 11:49:28 +02001461 svm->vcpu.arch.hflags = 0;
1462
Wanpeng Lib31c1142018-03-12 04:53:04 -07001463 if (boot_cpu_has(X86_FEATURE_PAUSEFILTER) &&
1464 !kvm_pause_in_guest(svm->vcpu.kvm)) {
Mark Langsdorf565d0992009-10-06 14:25:02 -05001465 control->pause_filter_count = 3000;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01001466 set_intercept(svm, INTERCEPT_PAUSE);
Mark Langsdorf565d0992009-10-06 14:25:02 -05001467 }
1468
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001469 if (kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001470 avic_init_vmcb(svm);
1471
Janakarajan Natarajan89c8a492017-07-06 15:50:47 -05001472 /*
1473 * If hardware supports Virtual VMLOAD VMSAVE then enable it
1474 * in VMCB and clear intercepts to avoid #VMEXIT.
1475 */
1476 if (vls) {
1477 clr_intercept(svm, INTERCEPT_VMLOAD);
1478 clr_intercept(svm, INTERCEPT_VMSAVE);
1479 svm->vmcb->control.virt_ext |= VIRTUAL_VMLOAD_VMSAVE_ENABLE_MASK;
1480 }
1481
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05001482 if (vgif) {
1483 clr_intercept(svm, INTERCEPT_STGI);
1484 clr_intercept(svm, INTERCEPT_CLGI);
1485 svm->vmcb->control.int_ctl |= V_GIF_ENABLE_MASK;
1486 }
1487
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001488 if (sev_guest(svm->vcpu.kvm)) {
Brijesh Singh1654efc2017-12-04 10:57:34 -06001489 svm->vmcb->control.nested_ctl |= SVM_NESTED_CTL_SEV_ENABLE;
Brijesh Singh35c6f6492017-12-04 10:57:39 -06001490 clr_exception_intercept(svm, UD_VECTOR);
1491 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001492
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01001493 mark_all_dirty(svm->vmcb);
1494
Joerg Roedel2af91942009-08-07 11:49:28 +02001495 enable_gif(svm);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001496
1497}
1498
Dan Carpenterd3e7dec2017-05-18 10:38:53 +03001499static u64 *avic_get_physical_id_entry(struct kvm_vcpu *vcpu,
1500 unsigned int index)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001501{
1502 u64 *avic_physical_id_table;
1503 struct kvm_arch *vm_data = &vcpu->kvm->arch;
1504
1505 if (index >= AVIC_MAX_PHYSICAL_ID_COUNT)
1506 return NULL;
1507
1508 avic_physical_id_table = page_address(vm_data->avic_physical_id_table_page);
1509
1510 return &avic_physical_id_table[index];
1511}
1512
1513/**
1514 * Note:
1515 * AVIC hardware walks the nested page table to check permissions,
1516 * but does not use the SPA address specified in the leaf page
1517 * table entry since it uses address in the AVIC_BACKING_PAGE pointer
1518 * field of the VMCB. Therefore, we set up the
1519 * APIC_ACCESS_PAGE_PRIVATE_MEMSLOT (4KB) here.
1520 */
1521static int avic_init_access_page(struct kvm_vcpu *vcpu)
1522{
1523 struct kvm *kvm = vcpu->kvm;
1524 int ret;
1525
1526 if (kvm->arch.apic_access_page_done)
1527 return 0;
1528
1529 ret = x86_set_memory_region(kvm,
1530 APIC_ACCESS_PAGE_PRIVATE_MEMSLOT,
1531 APIC_DEFAULT_PHYS_BASE,
1532 PAGE_SIZE);
1533 if (ret)
1534 return ret;
1535
1536 kvm->arch.apic_access_page_done = true;
1537 return 0;
1538}
1539
1540static int avic_init_backing_page(struct kvm_vcpu *vcpu)
1541{
1542 int ret;
1543 u64 *entry, new_entry;
1544 int id = vcpu->vcpu_id;
1545 struct vcpu_svm *svm = to_svm(vcpu);
1546
1547 ret = avic_init_access_page(vcpu);
1548 if (ret)
1549 return ret;
1550
1551 if (id >= AVIC_MAX_PHYSICAL_ID_COUNT)
1552 return -EINVAL;
1553
1554 if (!svm->vcpu.arch.apic->regs)
1555 return -EINVAL;
1556
1557 svm->avic_backing_page = virt_to_page(svm->vcpu.arch.apic->regs);
1558
1559 /* Setting AVIC backing page address in the phy APIC ID table */
1560 entry = avic_get_physical_id_entry(vcpu, id);
1561 if (!entry)
1562 return -EINVAL;
1563
1564 new_entry = READ_ONCE(*entry);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05001565 new_entry = __sme_set((page_to_phys(svm->avic_backing_page) &
1566 AVIC_PHYSICAL_ID_ENTRY_BACKING_PAGE_MASK) |
1567 AVIC_PHYSICAL_ID_ENTRY_VALID_MASK);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001568 WRITE_ONCE(*entry, new_entry);
1569
1570 svm->avic_physical_id_cache = entry;
1571
1572 return 0;
1573}
1574
Brijesh Singh1654efc2017-12-04 10:57:34 -06001575static void __sev_asid_free(int asid)
1576{
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001577 struct svm_cpu_data *sd;
1578 int cpu, pos;
Brijesh Singh1654efc2017-12-04 10:57:34 -06001579
1580 pos = asid - 1;
1581 clear_bit(pos, sev_asid_bitmap);
Brijesh Singh70cd94e2017-12-04 10:57:34 -06001582
1583 for_each_possible_cpu(cpu) {
1584 sd = per_cpu(svm_data, cpu);
1585 sd->sev_vmcbs[pos] = NULL;
1586 }
Brijesh Singh1654efc2017-12-04 10:57:34 -06001587}
1588
1589static void sev_asid_free(struct kvm *kvm)
1590{
1591 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1592
1593 __sev_asid_free(sev->asid);
1594}
1595
Brijesh Singh59414c92017-12-04 10:57:35 -06001596static void sev_unbind_asid(struct kvm *kvm, unsigned int handle)
1597{
1598 struct sev_data_decommission *decommission;
1599 struct sev_data_deactivate *data;
1600
1601 if (!handle)
1602 return;
1603
1604 data = kzalloc(sizeof(*data), GFP_KERNEL);
1605 if (!data)
1606 return;
1607
1608 /* deactivate handle */
1609 data->handle = handle;
1610 sev_guest_deactivate(data, NULL);
1611
1612 wbinvd_on_all_cpus();
1613 sev_guest_df_flush(NULL);
1614 kfree(data);
1615
1616 decommission = kzalloc(sizeof(*decommission), GFP_KERNEL);
1617 if (!decommission)
1618 return;
1619
1620 /* decommission handle */
1621 decommission->handle = handle;
1622 sev_guest_decommission(decommission, NULL);
1623
1624 kfree(decommission);
1625}
1626
Brijesh Singh89c50582017-12-04 10:57:35 -06001627static struct page **sev_pin_memory(struct kvm *kvm, unsigned long uaddr,
1628 unsigned long ulen, unsigned long *n,
1629 int write)
1630{
1631 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1632 unsigned long npages, npinned, size;
1633 unsigned long locked, lock_limit;
1634 struct page **pages;
1635 int first, last;
1636
1637 /* Calculate number of pages. */
1638 first = (uaddr & PAGE_MASK) >> PAGE_SHIFT;
1639 last = ((uaddr + ulen - 1) & PAGE_MASK) >> PAGE_SHIFT;
1640 npages = (last - first + 1);
1641
1642 locked = sev->pages_locked + npages;
1643 lock_limit = rlimit(RLIMIT_MEMLOCK) >> PAGE_SHIFT;
1644 if (locked > lock_limit && !capable(CAP_IPC_LOCK)) {
1645 pr_err("SEV: %lu locked pages exceed the lock limit of %lu.\n", locked, lock_limit);
1646 return NULL;
1647 }
1648
1649 /* Avoid using vmalloc for smaller buffers. */
1650 size = npages * sizeof(struct page *);
1651 if (size > PAGE_SIZE)
1652 pages = vmalloc(size);
1653 else
1654 pages = kmalloc(size, GFP_KERNEL);
1655
1656 if (!pages)
1657 return NULL;
1658
1659 /* Pin the user virtual address. */
1660 npinned = get_user_pages_fast(uaddr, npages, write ? FOLL_WRITE : 0, pages);
1661 if (npinned != npages) {
1662 pr_err("SEV: Failure locking %lu pages.\n", npages);
1663 goto err;
1664 }
1665
1666 *n = npages;
1667 sev->pages_locked = locked;
1668
1669 return pages;
1670
1671err:
1672 if (npinned > 0)
1673 release_pages(pages, npinned);
1674
1675 kvfree(pages);
1676 return NULL;
1677}
1678
1679static void sev_unpin_memory(struct kvm *kvm, struct page **pages,
1680 unsigned long npages)
1681{
1682 struct kvm_sev_info *sev = &kvm->arch.sev_info;
1683
1684 release_pages(pages, npages);
1685 kvfree(pages);
1686 sev->pages_locked -= npages;
1687}
1688
1689static void sev_clflush_pages(struct page *pages[], unsigned long npages)
1690{
1691 uint8_t *page_virtual;
1692 unsigned long i;
1693
1694 if (npages == 0 || pages == NULL)
1695 return;
1696
1697 for (i = 0; i < npages; i++) {
1698 page_virtual = kmap_atomic(pages[i]);
1699 clflush_cache_range(page_virtual, PAGE_SIZE);
1700 kunmap_atomic(page_virtual);
1701 }
1702}
1703
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001704static void __unregister_enc_region_locked(struct kvm *kvm,
1705 struct enc_region *region)
1706{
1707 /*
1708 * The guest may change the memory encryption attribute from C=0 -> C=1
1709 * or vice versa for this memory range. Lets make sure caches are
1710 * flushed to ensure that guest data gets written into memory with
1711 * correct C-bit.
1712 */
1713 sev_clflush_pages(region->pages, region->npages);
1714
1715 sev_unpin_memory(kvm, region->pages, region->npages);
1716 list_del(&region->list);
1717 kfree(region);
1718}
1719
Brijesh Singh1654efc2017-12-04 10:57:34 -06001720static void sev_vm_destroy(struct kvm *kvm)
1721{
Brijesh Singh59414c92017-12-04 10:57:35 -06001722 struct kvm_sev_info *sev = &kvm->arch.sev_info;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001723 struct list_head *head = &sev->regions_list;
1724 struct list_head *pos, *q;
Brijesh Singh59414c92017-12-04 10:57:35 -06001725
Brijesh Singh1654efc2017-12-04 10:57:34 -06001726 if (!sev_guest(kvm))
1727 return;
1728
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06001729 mutex_lock(&kvm->lock);
1730
1731 /*
1732 * if userspace was terminated before unregistering the memory regions
1733 * then lets unpin all the registered memory.
1734 */
1735 if (!list_empty(head)) {
1736 list_for_each_safe(pos, q, head) {
1737 __unregister_enc_region_locked(kvm,
1738 list_entry(pos, struct enc_region, list));
1739 }
1740 }
1741
1742 mutex_unlock(&kvm->lock);
1743
Brijesh Singh59414c92017-12-04 10:57:35 -06001744 sev_unbind_asid(kvm, sev->handle);
Brijesh Singh1654efc2017-12-04 10:57:34 -06001745 sev_asid_free(kvm);
1746}
1747
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001748static void avic_vm_destroy(struct kvm *kvm)
1749{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001750 unsigned long flags;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001751 struct kvm_arch *vm_data = &kvm->arch;
1752
Dmitry Vyukov3863dff2017-01-24 14:06:48 +01001753 if (!avic)
1754 return;
1755
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001756 if (vm_data->avic_logical_id_table_page)
1757 __free_page(vm_data->avic_logical_id_table_page);
1758 if (vm_data->avic_physical_id_table_page)
1759 __free_page(vm_data->avic_physical_id_table_page);
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001760
1761 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
1762 hash_del(&vm_data->hnode);
1763 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001764}
1765
Brijesh Singh1654efc2017-12-04 10:57:34 -06001766static void svm_vm_destroy(struct kvm *kvm)
1767{
1768 avic_vm_destroy(kvm);
1769 sev_vm_destroy(kvm);
1770}
1771
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001772static int avic_vm_init(struct kvm *kvm)
1773{
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001774 unsigned long flags;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001775 int err = -ENOMEM;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001776 struct kvm_arch *vm_data = &kvm->arch;
1777 struct page *p_page;
1778 struct page *l_page;
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001779 struct kvm_arch *ka;
1780 u32 vm_id;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001781
1782 if (!avic)
1783 return 0;
1784
1785 /* Allocating physical APIC ID table (4KB) */
1786 p_page = alloc_page(GFP_KERNEL);
1787 if (!p_page)
1788 goto free_avic;
1789
1790 vm_data->avic_physical_id_table_page = p_page;
1791 clear_page(page_address(p_page));
1792
1793 /* Allocating logical APIC ID table (4KB) */
1794 l_page = alloc_page(GFP_KERNEL);
1795 if (!l_page)
1796 goto free_avic;
1797
1798 vm_data->avic_logical_id_table_page = l_page;
1799 clear_page(page_address(l_page));
1800
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001801 spin_lock_irqsave(&svm_vm_data_hash_lock, flags);
Denys Vlasenko3f0d4db2017-08-11 22:11:58 +02001802 again:
1803 vm_id = next_vm_id = (next_vm_id + 1) & AVIC_VM_ID_MASK;
1804 if (vm_id == 0) { /* id is 1-based, zero is not okay */
1805 next_vm_id_wrapped = 1;
1806 goto again;
1807 }
1808 /* Is it still in use? Only possible if wrapped at least once */
1809 if (next_vm_id_wrapped) {
1810 hash_for_each_possible(svm_vm_data_hash, ka, hnode, vm_id) {
1811 struct kvm *k2 = container_of(ka, struct kvm, arch);
1812 struct kvm_arch *vd2 = &k2->arch;
1813 if (vd2->avic_vm_id == vm_id)
1814 goto again;
1815 }
1816 }
1817 vm_data->avic_vm_id = vm_id;
Suravee Suthikulpanit5881f732016-08-23 13:52:42 -05001818 hash_add(svm_vm_data_hash, &vm_data->hnode, vm_data->avic_vm_id);
1819 spin_unlock_irqrestore(&svm_vm_data_hash_lock, flags);
1820
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001821 return 0;
1822
1823free_avic:
1824 avic_vm_destroy(kvm);
1825 return err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001826}
1827
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001828static inline int
1829avic_update_iommu_vcpu_affinity(struct kvm_vcpu *vcpu, int cpu, bool r)
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001830{
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001831 int ret = 0;
1832 unsigned long flags;
1833 struct amd_svm_iommu_ir *ir;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001834 struct vcpu_svm *svm = to_svm(vcpu);
1835
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001836 if (!kvm_arch_has_assigned_device(vcpu->kvm))
1837 return 0;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001838
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001839 /*
1840 * Here, we go through the per-vcpu ir_list to update all existing
1841 * interrupt remapping table entry targeting this vcpu.
1842 */
1843 spin_lock_irqsave(&svm->ir_list_lock, flags);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001844
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001845 if (list_empty(&svm->ir_list))
1846 goto out;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001847
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001848 list_for_each_entry(ir, &svm->ir_list, node) {
1849 ret = amd_iommu_update_ga(cpu, r, ir->data);
1850 if (ret)
1851 break;
1852 }
1853out:
1854 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
1855 return ret;
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001856}
1857
1858static void avic_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
1859{
1860 u64 entry;
1861 /* ID = 0xff (broadcast), ID > 0xff (reserved) */
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05001862 int h_physical_id = kvm_cpu_get_apicid(cpu);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001863 struct vcpu_svm *svm = to_svm(vcpu);
1864
1865 if (!kvm_vcpu_apicv_active(vcpu))
1866 return;
1867
1868 if (WARN_ON(h_physical_id >= AVIC_MAX_PHYSICAL_ID_COUNT))
1869 return;
1870
1871 entry = READ_ONCE(*(svm->avic_physical_id_cache));
1872 WARN_ON(entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK);
1873
1874 entry &= ~AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK;
1875 entry |= (h_physical_id & AVIC_PHYSICAL_ID_ENTRY_HOST_PHYSICAL_ID_MASK);
1876
1877 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1878 if (svm->avic_is_running)
1879 entry |= AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1880
1881 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001882 avic_update_iommu_vcpu_affinity(vcpu, h_physical_id,
1883 svm->avic_is_running);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001884}
1885
1886static void avic_vcpu_put(struct kvm_vcpu *vcpu)
1887{
1888 u64 entry;
1889 struct vcpu_svm *svm = to_svm(vcpu);
1890
1891 if (!kvm_vcpu_apicv_active(vcpu))
1892 return;
1893
1894 entry = READ_ONCE(*(svm->avic_physical_id_cache));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001895 if (entry & AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK)
1896 avic_update_iommu_vcpu_affinity(vcpu, -1, 0);
1897
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001898 entry &= ~AVIC_PHYSICAL_ID_ENTRY_IS_RUNNING_MASK;
1899 WRITE_ONCE(*(svm->avic_physical_id_cache), entry);
Avi Kivity6aa8b732006-12-10 02:21:36 -08001900}
1901
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05001902/**
1903 * This function is called during VCPU halt/unhalt.
1904 */
1905static void avic_set_running(struct kvm_vcpu *vcpu, bool is_run)
1906{
1907 struct vcpu_svm *svm = to_svm(vcpu);
1908
1909 svm->avic_is_running = is_run;
1910 if (is_run)
1911 avic_vcpu_load(vcpu, vcpu->cpu);
1912 else
1913 avic_vcpu_put(vcpu);
1914}
1915
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001916static void svm_vcpu_reset(struct kvm_vcpu *vcpu, bool init_event)
Avi Kivity04d2cc72007-09-10 18:10:54 +03001917{
1918 struct vcpu_svm *svm = to_svm(vcpu);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001919 u32 dummy;
1920 u32 eax = 1;
Avi Kivity04d2cc72007-09-10 18:10:54 +03001921
Wanpeng Li518e7b92018-02-28 14:03:31 +08001922 vcpu->arch.microcode_version = 0x01000065;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01001923 svm->spec_ctrl = 0;
1924
Nadav Amitd28bc9d2015-04-13 14:34:08 +03001925 if (!init_event) {
1926 svm->vcpu.arch.apic_base = APIC_DEFAULT_PHYS_BASE |
1927 MSR_IA32_APICBASE_ENABLE;
1928 if (kvm_vcpu_is_reset_bsp(&svm->vcpu))
1929 svm->vcpu.arch.apic_base |= MSR_IA32_APICBASE_BSP;
1930 }
Paolo Bonzini56908912015-10-19 11:30:19 +02001931 init_vmcb(svm);
Avi Kivity70433382007-11-07 12:57:23 +02001932
Yu Zhange911eb32017-08-24 20:27:52 +08001933 kvm_cpuid(vcpu, &eax, &dummy, &dummy, &dummy, true);
Julian Stecklina66f7b722012-12-05 15:26:19 +01001934 kvm_register_write(vcpu, VCPU_REGS_RDX, eax);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001935
1936 if (kvm_vcpu_apicv_active(vcpu) && !init_event)
1937 avic_update_vapic_bar(svm, APIC_DEFAULT_PHYS_BASE);
Avi Kivity04d2cc72007-09-10 18:10:54 +03001938}
1939
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001940static int avic_init_vcpu(struct vcpu_svm *svm)
1941{
1942 int ret;
1943
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05001944 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001945 return 0;
1946
1947 ret = avic_init_backing_page(&svm->vcpu);
1948 if (ret)
1949 return ret;
1950
1951 INIT_LIST_HEAD(&svm->ir_list);
1952 spin_lock_init(&svm->ir_list_lock);
1953
1954 return ret;
1955}
1956
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001957static struct kvm_vcpu *svm_create_vcpu(struct kvm *kvm, unsigned int id)
Avi Kivity6aa8b732006-12-10 02:21:36 -08001958{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04001959 struct vcpu_svm *svm;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001960 struct page *page;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001961 struct page *msrpm_pages;
Alexander Grafb286d5d2008-11-25 20:17:05 +01001962 struct page *hsave_page;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001963 struct page *nested_msrpm_pages;
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001964 int err;
Avi Kivity6aa8b732006-12-10 02:21:36 -08001965
Rusty Russellc16f8622007-07-30 21:12:19 +10001966 svm = kmem_cache_zalloc(kvm_vcpu_cache, GFP_KERNEL);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10001967 if (!svm) {
1968 err = -ENOMEM;
1969 goto out;
1970 }
1971
1972 err = kvm_vcpu_init(&svm->vcpu, kvm, id);
1973 if (err)
1974 goto free_svm;
1975
Joerg Roedelf65c2292008-02-13 18:58:46 +01001976 err = -ENOMEM;
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001977 page = alloc_page(GFP_KERNEL);
1978 if (!page)
1979 goto uninit;
1980
Joerg Roedelf65c2292008-02-13 18:58:46 +01001981 msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1982 if (!msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001983 goto free_page1;
Alexander Graf3d6368e2008-11-25 20:17:07 +01001984
1985 nested_msrpm_pages = alloc_pages(GFP_KERNEL, MSRPM_ALLOC_ORDER);
1986 if (!nested_msrpm_pages)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001987 goto free_page2;
Joerg Roedelf65c2292008-02-13 18:58:46 +01001988
Alexander Grafb286d5d2008-11-25 20:17:05 +01001989 hsave_page = alloc_page(GFP_KERNEL);
1990 if (!hsave_page)
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09001991 goto free_page3;
1992
Suravee Suthikulpanitdfa20092017-09-12 10:42:40 -05001993 err = avic_init_vcpu(svm);
1994 if (err)
1995 goto free_page4;
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05001996
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05001997 /* We initialize this flag to true to make sure that the is_running
1998 * bit would be set the first time the vcpu is loaded.
1999 */
2000 svm->avic_is_running = true;
2001
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002002 svm->nested.hsave = page_address(hsave_page);
Alexander Grafb286d5d2008-11-25 20:17:05 +01002003
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09002004 svm->msrpm = page_address(msrpm_pages);
2005 svm_vcpu_init_msrpm(svm->msrpm);
2006
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002007 svm->nested.msrpm = page_address(nested_msrpm_pages);
Joerg Roedel323c3d82010-03-01 15:34:37 +01002008 svm_vcpu_init_msrpm(svm->nested.msrpm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01002009
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002010 svm->vmcb = page_address(page);
2011 clear_page(svm->vmcb);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002012 svm->vmcb_pa = __sme_set(page_to_pfn(page) << PAGE_SHIFT);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002013 svm->asid_generation = 0;
Paolo Bonzini56908912015-10-19 11:30:19 +02002014 init_vmcb(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002015
Boris Ostrovsky2b036c62012-01-09 14:00:35 -05002016 svm_init_osvw(&svm->vcpu);
2017
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002018 return &svm->vcpu;
Avi Kivity36241b82006-12-22 01:05:20 -08002019
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05002020free_page4:
2021 __free_page(hsave_page);
Takuya Yoshikawab7af4042010-03-09 14:55:19 +09002022free_page3:
2023 __free_pages(nested_msrpm_pages, MSRPM_ALLOC_ORDER);
2024free_page2:
2025 __free_pages(msrpm_pages, MSRPM_ALLOC_ORDER);
2026free_page1:
2027 __free_page(page);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002028uninit:
2029 kvm_vcpu_uninit(&svm->vcpu);
2030free_svm:
Rusty Russella4770342007-08-01 14:46:11 +10002031 kmem_cache_free(kvm_vcpu_cache, svm);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002032out:
2033 return ERR_PTR(err);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002034}
2035
2036static void svm_free_vcpu(struct kvm_vcpu *vcpu)
2037{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002038 struct vcpu_svm *svm = to_svm(vcpu);
2039
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002040 __free_page(pfn_to_page(__sme_clr(svm->vmcb_pa) >> PAGE_SHIFT));
Joerg Roedelf65c2292008-02-13 18:58:46 +01002041 __free_pages(virt_to_page(svm->msrpm), MSRPM_ALLOC_ORDER);
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02002042 __free_page(virt_to_page(svm->nested.hsave));
2043 __free_pages(virt_to_page(svm->nested.msrpm), MSRPM_ALLOC_ORDER);
Rusty Russellfb3f0f52007-07-27 17:16:56 +10002044 kvm_vcpu_uninit(vcpu);
Rusty Russella4770342007-08-01 14:46:11 +10002045 kmem_cache_free(kvm_vcpu_cache, svm);
Ashok Raj15d45072018-02-01 22:59:43 +01002046 /*
2047 * The vmcb page can be recycled, causing a false negative in
2048 * svm_vcpu_load(). So do a full IBPB now.
2049 */
2050 indirect_branch_prediction_barrier();
Avi Kivity6aa8b732006-12-10 02:21:36 -08002051}
2052
Avi Kivity15ad7142007-07-11 18:17:21 +03002053static void svm_vcpu_load(struct kvm_vcpu *vcpu, int cpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002054{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002055 struct vcpu_svm *svm = to_svm(vcpu);
Ashok Raj15d45072018-02-01 22:59:43 +01002056 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity15ad7142007-07-11 18:17:21 +03002057 int i;
Avi Kivity0cc50642007-03-25 12:07:27 +02002058
Avi Kivity0cc50642007-03-25 12:07:27 +02002059 if (unlikely(cpu != vcpu->cpu)) {
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03002060 svm->asid_generation = 0;
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01002061 mark_all_dirty(svm->vmcb);
Avi Kivity0cc50642007-03-25 12:07:27 +02002062 }
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002063
Avi Kivity82ca2d12010-10-21 12:20:34 +02002064#ifdef CONFIG_X86_64
2065 rdmsrl(MSR_GS_BASE, to_svm(vcpu)->host.gs_base);
2066#endif
Avi Kivitydacccfd2010-10-21 12:20:33 +02002067 savesegment(fs, svm->host.fs);
2068 savesegment(gs, svm->host.gs);
2069 svm->host.ldt = kvm_read_ldt();
2070
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002071 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002072 rdmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002073
Haozhong Zhangad7218832015-10-20 15:39:02 +08002074 if (static_cpu_has(X86_FEATURE_TSCRATEMSR)) {
2075 u64 tsc_ratio = vcpu->arch.tsc_scaling_ratio;
2076 if (tsc_ratio != __this_cpu_read(current_tsc_ratio)) {
2077 __this_cpu_write(current_tsc_ratio, tsc_ratio);
2078 wrmsrl(MSR_AMD64_TSC_RATIO, tsc_ratio);
2079 }
Joerg Roedelfbc0db72011-03-25 09:44:46 +01002080 }
Paolo Bonzini46896c72015-11-12 14:49:16 +01002081 /* This assumes that the kernel never uses MSR_TSC_AUX */
2082 if (static_cpu_has(X86_FEATURE_RDTSCP))
2083 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002084
Ashok Raj15d45072018-02-01 22:59:43 +01002085 if (sd->current_vmcb != svm->vmcb) {
2086 sd->current_vmcb = svm->vmcb;
2087 indirect_branch_prediction_barrier();
2088 }
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002089 avic_vcpu_load(vcpu, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002090}
2091
2092static void svm_vcpu_put(struct kvm_vcpu *vcpu)
2093{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002094 struct vcpu_svm *svm = to_svm(vcpu);
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002095 int i;
2096
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002097 avic_vcpu_put(vcpu);
2098
Avi Kivitye1beb1d2007-11-18 13:50:24 +02002099 ++vcpu->stat.host_state_reload;
Avi Kivitydacccfd2010-10-21 12:20:33 +02002100 kvm_load_ldt(svm->host.ldt);
2101#ifdef CONFIG_X86_64
2102 loadsegment(fs, svm->host.fs);
Andy Lutomirski296f7812016-04-26 12:23:29 -07002103 wrmsrl(MSR_KERNEL_GS_BASE, current->thread.gsbase);
Joerg Roedel893a5ab2011-01-14 16:45:01 +01002104 load_gs_index(svm->host.gs);
Avi Kivitydacccfd2010-10-21 12:20:33 +02002105#else
Avi Kivity831ca602011-03-08 16:09:51 +02002106#ifdef CONFIG_X86_32_LAZY_GS
Avi Kivitydacccfd2010-10-21 12:20:33 +02002107 loadsegment(gs, svm->host.gs);
2108#endif
Avi Kivity831ca602011-03-08 16:09:51 +02002109#endif
Anthony Liguori94dfbdb2007-04-29 11:56:06 +03002110 for (i = 0; i < NR_HOST_SAVE_USER_MSRS; i++)
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002111 wrmsrl(host_save_user_msrs[i], svm->host_user_msrs[i]);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002112}
2113
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05002114static void svm_vcpu_blocking(struct kvm_vcpu *vcpu)
2115{
2116 avic_set_running(vcpu, false);
2117}
2118
2119static void svm_vcpu_unblocking(struct kvm_vcpu *vcpu)
2120{
2121 avic_set_running(vcpu, true);
2122}
2123
Avi Kivity6aa8b732006-12-10 02:21:36 -08002124static unsigned long svm_get_rflags(struct kvm_vcpu *vcpu)
2125{
Ladi Prosek9b611742017-06-21 09:06:59 +02002126 struct vcpu_svm *svm = to_svm(vcpu);
2127 unsigned long rflags = svm->vmcb->save.rflags;
2128
2129 if (svm->nmi_singlestep) {
2130 /* Hide our flags if they were not set by the guest */
2131 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF))
2132 rflags &= ~X86_EFLAGS_TF;
2133 if (!(svm->nmi_singlestep_guest_rflags & X86_EFLAGS_RF))
2134 rflags &= ~X86_EFLAGS_RF;
2135 }
2136 return rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002137}
2138
2139static void svm_set_rflags(struct kvm_vcpu *vcpu, unsigned long rflags)
2140{
Ladi Prosek9b611742017-06-21 09:06:59 +02002141 if (to_svm(vcpu)->nmi_singlestep)
2142 rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
2143
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002144 /*
Andrea Gelminibb3541f2016-05-21 14:14:44 +02002145 * Any change of EFLAGS.VM is accompanied by a reload of SS
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002146 * (caused by either a task switch or an inter-privilege IRET),
2147 * so we do not need to update the CPL here.
2148 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002149 to_svm(vcpu)->vmcb->save.rflags = rflags;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002150}
2151
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002152static void svm_cache_reg(struct kvm_vcpu *vcpu, enum kvm_reg reg)
2153{
2154 switch (reg) {
2155 case VCPU_EXREG_PDPTR:
2156 BUG_ON(!npt_enabled);
Avi Kivity9f8fe502010-12-05 17:30:00 +02002157 load_pdptrs(vcpu, vcpu->arch.walk_mmu, kvm_read_cr3(vcpu));
Avi Kivity6de4f3a2009-05-31 22:58:47 +03002158 break;
2159 default:
2160 BUG();
2161 }
2162}
2163
Alexander Graff0b85052008-11-25 20:17:01 +01002164static void svm_set_vintr(struct vcpu_svm *svm)
2165{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002166 set_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002167}
2168
2169static void svm_clear_vintr(struct vcpu_svm *svm)
2170{
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01002171 clr_intercept(svm, INTERCEPT_VINTR);
Alexander Graff0b85052008-11-25 20:17:01 +01002172}
2173
Avi Kivity6aa8b732006-12-10 02:21:36 -08002174static struct vmcb_seg *svm_seg(struct kvm_vcpu *vcpu, int seg)
2175{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002176 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002177
2178 switch (seg) {
2179 case VCPU_SREG_CS: return &save->cs;
2180 case VCPU_SREG_DS: return &save->ds;
2181 case VCPU_SREG_ES: return &save->es;
2182 case VCPU_SREG_FS: return &save->fs;
2183 case VCPU_SREG_GS: return &save->gs;
2184 case VCPU_SREG_SS: return &save->ss;
2185 case VCPU_SREG_TR: return &save->tr;
2186 case VCPU_SREG_LDTR: return &save->ldtr;
2187 }
2188 BUG();
Al Viro8b6d44c2007-02-09 16:38:40 +00002189 return NULL;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002190}
2191
2192static u64 svm_get_segment_base(struct kvm_vcpu *vcpu, int seg)
2193{
2194 struct vmcb_seg *s = svm_seg(vcpu, seg);
2195
2196 return s->base;
2197}
2198
2199static void svm_get_segment(struct kvm_vcpu *vcpu,
2200 struct kvm_segment *var, int seg)
2201{
2202 struct vmcb_seg *s = svm_seg(vcpu, seg);
2203
2204 var->base = s->base;
2205 var->limit = s->limit;
2206 var->selector = s->selector;
2207 var->type = s->attrib & SVM_SELECTOR_TYPE_MASK;
2208 var->s = (s->attrib >> SVM_SELECTOR_S_SHIFT) & 1;
2209 var->dpl = (s->attrib >> SVM_SELECTOR_DPL_SHIFT) & 3;
2210 var->present = (s->attrib >> SVM_SELECTOR_P_SHIFT) & 1;
2211 var->avl = (s->attrib >> SVM_SELECTOR_AVL_SHIFT) & 1;
2212 var->l = (s->attrib >> SVM_SELECTOR_L_SHIFT) & 1;
2213 var->db = (s->attrib >> SVM_SELECTOR_DB_SHIFT) & 1;
Jim Mattson80112c82014-07-08 09:47:41 +05302214
2215 /*
2216 * AMD CPUs circa 2014 track the G bit for all segments except CS.
2217 * However, the SVM spec states that the G bit is not observed by the
2218 * CPU, and some VMware virtual CPUs drop the G bit for all segments.
2219 * So let's synthesize a legal G bit for all segments, this helps
2220 * running KVM nested. It also helps cross-vendor migration, because
2221 * Intel's vmentry has a check on the 'G' bit.
2222 */
2223 var->g = s->limit > 0xfffff;
Amit Shah25022ac2008-10-27 09:04:17 +00002224
Joerg Roedele0231712010-02-24 18:59:10 +01002225 /*
2226 * AMD's VMCB does not have an explicit unusable field, so emulate it
Andre Przywara19bca6a2009-04-28 12:45:30 +02002227 * for cross vendor migration purposes by "not present"
2228 */
Gioh Kim8eae9572017-05-30 15:24:45 +02002229 var->unusable = !var->present;
Andre Przywara19bca6a2009-04-28 12:45:30 +02002230
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002231 switch (seg) {
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002232 case VCPU_SREG_TR:
2233 /*
2234 * Work around a bug where the busy flag in the tr selector
2235 * isn't exposed
2236 */
Amit Shahc0d09822008-10-27 09:04:18 +00002237 var->type |= 0x2;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002238 break;
2239 case VCPU_SREG_DS:
2240 case VCPU_SREG_ES:
2241 case VCPU_SREG_FS:
2242 case VCPU_SREG_GS:
2243 /*
2244 * The accessed bit must always be set in the segment
2245 * descriptor cache, although it can be cleared in the
2246 * descriptor, the cached bit always remains at 1. Since
2247 * Intel has a check on this, set it here to support
2248 * cross-vendor migration.
2249 */
2250 if (!var->unusable)
2251 var->type |= 0x1;
2252 break;
Andre Przywarab586eb02009-04-28 12:45:43 +02002253 case VCPU_SREG_SS:
Joerg Roedele0231712010-02-24 18:59:10 +01002254 /*
2255 * On AMD CPUs sometimes the DB bit in the segment
Andre Przywarab586eb02009-04-28 12:45:43 +02002256 * descriptor is left as 1, although the whole segment has
2257 * been made unusable. Clear it here to pass an Intel VMX
2258 * entry check when cross vendor migrating.
2259 */
2260 if (var->unusable)
2261 var->db = 0;
Roman Pend9c1b542017-06-01 10:55:03 +02002262 /* This is symmetric with svm_set_segment() */
Jan Kiszka33b458d2014-06-29 17:12:43 +02002263 var->dpl = to_svm(vcpu)->vmcb->save.cpl;
Andre Przywarab586eb02009-04-28 12:45:43 +02002264 break;
Andre Przywara1fbdc7a2009-01-11 22:39:44 +01002265 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08002266}
2267
Izik Eidus2e4d2652008-03-24 19:38:34 +02002268static int svm_get_cpl(struct kvm_vcpu *vcpu)
2269{
2270 struct vmcb_save_area *save = &to_svm(vcpu)->vmcb->save;
2271
2272 return save->cpl;
2273}
2274
Gleb Natapov89a27f42010-02-16 10:51:48 +02002275static void svm_get_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002276{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002277 struct vcpu_svm *svm = to_svm(vcpu);
2278
Gleb Natapov89a27f42010-02-16 10:51:48 +02002279 dt->size = svm->vmcb->save.idtr.limit;
2280 dt->address = svm->vmcb->save.idtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002281}
2282
Gleb Natapov89a27f42010-02-16 10:51:48 +02002283static void svm_set_idt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002284{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002285 struct vcpu_svm *svm = to_svm(vcpu);
2286
Gleb Natapov89a27f42010-02-16 10:51:48 +02002287 svm->vmcb->save.idtr.limit = dt->size;
2288 svm->vmcb->save.idtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002289 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002290}
2291
Gleb Natapov89a27f42010-02-16 10:51:48 +02002292static void svm_get_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002293{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002294 struct vcpu_svm *svm = to_svm(vcpu);
2295
Gleb Natapov89a27f42010-02-16 10:51:48 +02002296 dt->size = svm->vmcb->save.gdtr.limit;
2297 dt->address = svm->vmcb->save.gdtr.base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002298}
2299
Gleb Natapov89a27f42010-02-16 10:51:48 +02002300static void svm_set_gdt(struct kvm_vcpu *vcpu, struct desc_ptr *dt)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002301{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002302 struct vcpu_svm *svm = to_svm(vcpu);
2303
Gleb Natapov89a27f42010-02-16 10:51:48 +02002304 svm->vmcb->save.gdtr.limit = dt->size;
2305 svm->vmcb->save.gdtr.base = dt->address ;
Joerg Roedel17a703c2010-12-03 11:45:56 +01002306 mark_dirty(svm->vmcb, VMCB_DT);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002307}
2308
Avi Kivitye8467fd2009-12-29 18:43:06 +02002309static void svm_decache_cr0_guest_bits(struct kvm_vcpu *vcpu)
2310{
2311}
2312
Avi Kivityaff48ba2010-12-05 18:56:11 +02002313static void svm_decache_cr3(struct kvm_vcpu *vcpu)
2314{
2315}
2316
Anthony Liguori25c4c272007-04-27 09:29:21 +03002317static void svm_decache_cr4_guest_bits(struct kvm_vcpu *vcpu)
Avi Kivity399badf2007-01-05 16:36:38 -08002318{
2319}
2320
Avi Kivityd2251572010-01-06 10:55:27 +02002321static void update_cr0_intercept(struct vcpu_svm *svm)
2322{
2323 ulong gcr0 = svm->vcpu.arch.cr0;
2324 u64 *hcr0 = &svm->vmcb->save.cr0;
2325
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002326 *hcr0 = (*hcr0 & ~SVM_CR0_SELECTIVE_MASK)
2327 | (gcr0 & SVM_CR0_SELECTIVE_MASK);
Avi Kivityd2251572010-01-06 10:55:27 +02002328
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002329 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002330
Paolo Bonzinibd7e5b02017-02-03 21:18:52 -08002331 if (gcr0 == *hcr0) {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002332 clr_cr_intercept(svm, INTERCEPT_CR0_READ);
2333 clr_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002334 } else {
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01002335 set_cr_intercept(svm, INTERCEPT_CR0_READ);
2336 set_cr_intercept(svm, INTERCEPT_CR0_WRITE);
Avi Kivityd2251572010-01-06 10:55:27 +02002337 }
2338}
2339
Avi Kivity6aa8b732006-12-10 02:21:36 -08002340static void svm_set_cr0(struct kvm_vcpu *vcpu, unsigned long cr0)
2341{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002342 struct vcpu_svm *svm = to_svm(vcpu);
2343
Avi Kivity05b3e0c2006-12-13 00:33:45 -08002344#ifdef CONFIG_X86_64
Avi Kivityf6801df2010-01-21 15:31:50 +02002345 if (vcpu->arch.efer & EFER_LME) {
Rusty Russell707d92fa2007-07-17 23:19:08 +10002346 if (!is_paging(vcpu) && (cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002347 vcpu->arch.efer |= EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002348 svm->vmcb->save.efer |= EFER_LMA | EFER_LME;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002349 }
2350
Mike Dayd77c26f2007-10-08 09:02:08 -04002351 if (is_paging(vcpu) && !(cr0 & X86_CR0_PG)) {
Avi Kivityf6801df2010-01-21 15:31:50 +02002352 vcpu->arch.efer &= ~EFER_LMA;
Carlo Marcelo Arenas Belon2b5203e2007-12-01 06:17:11 -06002353 svm->vmcb->save.efer &= ~(EFER_LMA | EFER_LME);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002354 }
2355 }
2356#endif
Zhang Xiantaoad312c72007-12-13 23:50:52 +08002357 vcpu->arch.cr0 = cr0;
Avi Kivity888f9f32010-01-10 12:14:04 +02002358
2359 if (!npt_enabled)
2360 cr0 |= X86_CR0_PG | X86_CR0_WP;
Avi Kivity02daab22009-12-30 12:40:26 +02002361
Paolo Bonzinibcf166a2015-10-01 13:19:55 +02002362 /*
2363 * re-enable caching here because the QEMU bios
2364 * does not do it - this results in some delay at
2365 * reboot
2366 */
2367 if (kvm_check_has_quirk(vcpu->kvm, KVM_X86_QUIRK_CD_NW_CLEARED))
2368 cr0 &= ~(X86_CR0_CD | X86_CR0_NW);
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002369 svm->vmcb->save.cr0 = cr0;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002370 mark_dirty(svm->vmcb, VMCB_CR);
Avi Kivityd2251572010-01-06 10:55:27 +02002371 update_cr0_intercept(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002372}
2373
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002374static int svm_set_cr4(struct kvm_vcpu *vcpu, unsigned long cr4)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002375{
Andy Lutomirski1e02ce42014-10-24 15:58:08 -07002376 unsigned long host_cr4_mce = cr4_read_shadow() & X86_CR4_MCE;
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002377 unsigned long old_cr4 = to_svm(vcpu)->vmcb->save.cr4;
2378
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002379 if (cr4 & X86_CR4_VMXE)
2380 return 1;
2381
Joerg Roedele5eab0c2008-09-09 19:11:51 +02002382 if (npt_enabled && ((old_cr4 ^ cr4) & X86_CR4_PGE))
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08002383 svm_flush_tlb(vcpu, true);
Joerg Roedel6394b642008-04-09 14:15:29 +02002384
Joerg Roedelec077262008-04-09 14:15:28 +02002385 vcpu->arch.cr4 = cr4;
2386 if (!npt_enabled)
2387 cr4 |= X86_CR4_PAE;
Joerg Roedel6394b642008-04-09 14:15:29 +02002388 cr4 |= host_cr4_mce;
Joerg Roedelec077262008-04-09 14:15:28 +02002389 to_svm(vcpu)->vmcb->save.cr4 = cr4;
Joerg Roedeldcca1a62010-12-03 11:45:54 +01002390 mark_dirty(to_svm(vcpu)->vmcb, VMCB_CR);
Nadav Har'El5e1746d2011-05-25 23:03:24 +03002391 return 0;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002392}
2393
2394static void svm_set_segment(struct kvm_vcpu *vcpu,
2395 struct kvm_segment *var, int seg)
2396{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002397 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002398 struct vmcb_seg *s = svm_seg(vcpu, seg);
2399
2400 s->base = var->base;
2401 s->limit = var->limit;
2402 s->selector = var->selector;
Roman Pend9c1b542017-06-01 10:55:03 +02002403 s->attrib = (var->type & SVM_SELECTOR_TYPE_MASK);
2404 s->attrib |= (var->s & 1) << SVM_SELECTOR_S_SHIFT;
2405 s->attrib |= (var->dpl & 3) << SVM_SELECTOR_DPL_SHIFT;
2406 s->attrib |= ((var->present & 1) && !var->unusable) << SVM_SELECTOR_P_SHIFT;
2407 s->attrib |= (var->avl & 1) << SVM_SELECTOR_AVL_SHIFT;
2408 s->attrib |= (var->l & 1) << SVM_SELECTOR_L_SHIFT;
2409 s->attrib |= (var->db & 1) << SVM_SELECTOR_DB_SHIFT;
2410 s->attrib |= (var->g & 1) << SVM_SELECTOR_G_SHIFT;
Paolo Bonziniae9fedc2014-05-14 09:39:49 +02002411
2412 /*
2413 * This is always accurate, except if SYSRET returned to a segment
2414 * with SS.DPL != 3. Intel does not have this quirk, and always
2415 * forces SS.DPL to 3 on sysret, so we ignore that case; fixing it
2416 * would entail passing the CPL to userspace and back.
2417 */
2418 if (seg == VCPU_SREG_SS)
Roman Pend9c1b542017-06-01 10:55:03 +02002419 /* This is symmetric with svm_get_segment() */
2420 svm->vmcb->save.cpl = (var->dpl & 3);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002421
Joerg Roedel060d0c92010-12-03 11:45:57 +01002422 mark_dirty(svm->vmcb, VMCB_SEG);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002423}
2424
Paolo Bonzinicbdb9672015-11-10 09:14:39 +01002425static void update_bp_intercept(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002426{
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002427 struct vcpu_svm *svm = to_svm(vcpu);
2428
Joerg Roedel18c918c2010-11-30 18:03:59 +01002429 clr_exception_intercept(svm, BP_VECTOR);
Gleb Natapov44c11432009-05-11 13:35:52 +03002430
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002431 if (vcpu->guest_debug & KVM_GUESTDBG_ENABLE) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002432 if (vcpu->guest_debug & KVM_GUESTDBG_USE_SW_BP)
Joerg Roedel18c918c2010-11-30 18:03:59 +01002433 set_exception_intercept(svm, BP_VECTOR);
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002434 } else
2435 vcpu->guest_debug = 0;
Gleb Natapov44c11432009-05-11 13:35:52 +03002436}
2437
Tejun Heo0fe1e002009-10-29 22:34:14 +09002438static void new_asid(struct vcpu_svm *svm, struct svm_cpu_data *sd)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002439{
Tejun Heo0fe1e002009-10-29 22:34:14 +09002440 if (sd->next_asid > sd->max_asid) {
2441 ++sd->asid_generation;
Brijesh Singh4faefff2017-12-04 10:57:25 -06002442 sd->next_asid = sd->min_asid;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002443 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ALL_ASID;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002444 }
2445
Tejun Heo0fe1e002009-10-29 22:34:14 +09002446 svm->asid_generation = sd->asid_generation;
2447 svm->vmcb->control.asid = sd->next_asid++;
Joerg Roedeld48086d2010-12-03 11:45:51 +01002448
2449 mark_dirty(svm->vmcb, VMCB_ASID);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002450}
2451
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01002452static u64 svm_get_dr6(struct kvm_vcpu *vcpu)
2453{
2454 return to_svm(vcpu)->vmcb->save.dr6;
2455}
2456
2457static void svm_set_dr6(struct kvm_vcpu *vcpu, unsigned long value)
2458{
2459 struct vcpu_svm *svm = to_svm(vcpu);
2460
2461 svm->vmcb->save.dr6 = value;
2462 mark_dirty(svm->vmcb, VMCB_DR);
2463}
2464
Paolo Bonzinifacb0132014-02-21 10:32:27 +01002465static void svm_sync_dirty_debug_regs(struct kvm_vcpu *vcpu)
2466{
2467 struct vcpu_svm *svm = to_svm(vcpu);
2468
2469 get_debugreg(vcpu->arch.db[0], 0);
2470 get_debugreg(vcpu->arch.db[1], 1);
2471 get_debugreg(vcpu->arch.db[2], 2);
2472 get_debugreg(vcpu->arch.db[3], 3);
2473 vcpu->arch.dr6 = svm_get_dr6(vcpu);
2474 vcpu->arch.dr7 = svm->vmcb->save.dr7;
2475
2476 vcpu->arch.switch_db_regs &= ~KVM_DEBUGREG_WONT_EXIT;
2477 set_dr_intercepts(svm);
2478}
2479
Gleb Natapov020df072010-04-13 10:05:23 +03002480static void svm_set_dr7(struct kvm_vcpu *vcpu, unsigned long value)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002481{
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002482 struct vcpu_svm *svm = to_svm(vcpu);
Jan Kiszka42dbaa52008-12-15 13:52:10 +01002483
Gleb Natapov020df072010-04-13 10:05:23 +03002484 svm->vmcb->save.dr7 = value;
Joerg Roedel72214b92010-12-03 11:45:55 +01002485 mark_dirty(svm->vmcb, VMCB_DR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002486}
2487
Avi Kivity851ba692009-08-24 11:10:17 +03002488static int pf_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002489{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002490 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002491 u64 error_code = svm->vmcb->control.exit_info_1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002492
Wanpeng Li1261bfa2017-07-13 18:30:40 -07002493 return kvm_handle_page_fault(&svm->vcpu, error_code, fault_address,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06002494 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2495 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002496 svm->vmcb->control.insn_len);
2497}
2498
2499static int npf_interception(struct vcpu_svm *svm)
2500{
Brijesh Singh0ede79e2017-12-04 10:57:39 -06002501 u64 fault_address = __sme_clr(svm->vmcb->control.exit_info_2);
Paolo Bonzinid0006532017-08-11 18:36:43 +02002502 u64 error_code = svm->vmcb->control.exit_info_1;
2503
2504 trace_kvm_page_fault(fault_address, error_code);
2505 return kvm_mmu_page_fault(&svm->vcpu, fault_address, error_code,
Brijesh Singh00b10fe2017-12-04 10:57:40 -06002506 static_cpu_has(X86_FEATURE_DECODEASSISTS) ?
2507 svm->vmcb->control.insn_bytes : NULL,
Paolo Bonzinid0006532017-08-11 18:36:43 +02002508 svm->vmcb->control.insn_len);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002509}
2510
Avi Kivity851ba692009-08-24 11:10:17 +03002511static int db_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002512{
Avi Kivity851ba692009-08-24 11:10:17 +03002513 struct kvm_run *kvm_run = svm->vcpu.run;
2514
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002515 if (!(svm->vcpu.guest_debug &
Gleb Natapov44c11432009-05-11 13:35:52 +03002516 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) &&
Jan Kiszka6be7d302009-10-18 13:24:54 +02002517 !svm->nmi_singlestep) {
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002518 kvm_queue_exception(&svm->vcpu, DB_VECTOR);
2519 return 1;
2520 }
Gleb Natapov44c11432009-05-11 13:35:52 +03002521
Jan Kiszka6be7d302009-10-18 13:24:54 +02002522 if (svm->nmi_singlestep) {
Ladi Prosek4aebd0e2017-06-21 09:06:57 +02002523 disable_nmi_singlestep(svm);
Gleb Natapov44c11432009-05-11 13:35:52 +03002524 }
2525
2526 if (svm->vcpu.guest_debug &
Joerg Roedele0231712010-02-24 18:59:10 +01002527 (KVM_GUESTDBG_SINGLESTEP | KVM_GUESTDBG_USE_HW_BP)) {
Gleb Natapov44c11432009-05-11 13:35:52 +03002528 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2529 kvm_run->debug.arch.pc =
2530 svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2531 kvm_run->debug.arch.exception = DB_VECTOR;
2532 return 0;
2533 }
2534
2535 return 1;
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002536}
2537
Avi Kivity851ba692009-08-24 11:10:17 +03002538static int bp_interception(struct vcpu_svm *svm)
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002539{
Avi Kivity851ba692009-08-24 11:10:17 +03002540 struct kvm_run *kvm_run = svm->vcpu.run;
2541
Jan Kiszkad0bfb942008-12-15 13:52:10 +01002542 kvm_run->exit_reason = KVM_EXIT_DEBUG;
2543 kvm_run->debug.arch.pc = svm->vmcb->save.cs.base + svm->vmcb->save.rip;
2544 kvm_run->debug.arch.exception = BP_VECTOR;
2545 return 0;
2546}
2547
Avi Kivity851ba692009-08-24 11:10:17 +03002548static int ud_interception(struct vcpu_svm *svm)
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002549{
2550 int er;
2551
Andre Przywara51d8b662010-12-21 11:12:02 +01002552 er = emulate_instruction(&svm->vcpu, EMULTYPE_TRAP_UD);
Liran Alon61cb57c2017-11-05 16:56:32 +02002553 if (er == EMULATE_USER_EXIT)
2554 return 0;
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002555 if (er != EMULATE_DONE)
Avi Kivity7ee5d9402007-11-25 15:22:50 +02002556 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05002557 return 1;
2558}
2559
Eric Northup54a20552015-11-03 18:03:53 +01002560static int ac_interception(struct vcpu_svm *svm)
2561{
2562 kvm_queue_exception_e(&svm->vcpu, AC_VECTOR, 0);
2563 return 1;
2564}
2565
Liran Alon97184202018-03-12 13:12:52 +02002566static int gp_interception(struct vcpu_svm *svm)
2567{
2568 struct kvm_vcpu *vcpu = &svm->vcpu;
2569 u32 error_code = svm->vmcb->control.exit_info_1;
2570 int er;
2571
2572 WARN_ON_ONCE(!enable_vmware_backdoor);
2573
2574 er = emulate_instruction(vcpu,
2575 EMULTYPE_VMWARE | EMULTYPE_NO_UD_ON_FAIL);
2576 if (er == EMULATE_USER_EXIT)
2577 return 0;
2578 else if (er != EMULATE_DONE)
2579 kvm_queue_exception_e(vcpu, GP_VECTOR, error_code);
2580 return 1;
2581}
2582
Joerg Roedel67ec6602010-05-17 14:43:35 +02002583static bool is_erratum_383(void)
2584{
2585 int err, i;
2586 u64 value;
2587
2588 if (!erratum_383_found)
2589 return false;
2590
2591 value = native_read_msr_safe(MSR_IA32_MC0_STATUS, &err);
2592 if (err)
2593 return false;
2594
2595 /* Bit 62 may or may not be set for this mce */
2596 value &= ~(1ULL << 62);
2597
2598 if (value != 0xb600000000010015ULL)
2599 return false;
2600
2601 /* Clear MCi_STATUS registers */
2602 for (i = 0; i < 6; ++i)
2603 native_write_msr_safe(MSR_IA32_MCx_STATUS(i), 0, 0);
2604
2605 value = native_read_msr_safe(MSR_IA32_MCG_STATUS, &err);
2606 if (!err) {
2607 u32 low, high;
2608
2609 value &= ~(1ULL << 2);
2610 low = lower_32_bits(value);
2611 high = upper_32_bits(value);
2612
2613 native_write_msr_safe(MSR_IA32_MCG_STATUS, low, high);
2614 }
2615
2616 /* Flush tlb to evict multi-match entries */
2617 __flush_tlb_all();
2618
2619 return true;
2620}
2621
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002622static void svm_handle_mce(struct vcpu_svm *svm)
Joerg Roedel53371b52008-04-09 14:15:30 +02002623{
Joerg Roedel67ec6602010-05-17 14:43:35 +02002624 if (is_erratum_383()) {
2625 /*
2626 * Erratum 383 triggered. Guest state is corrupt so kill the
2627 * guest.
2628 */
2629 pr_err("KVM: Guest triggered AMD Erratum 383\n");
2630
Avi Kivitya8eeb042010-05-10 12:34:53 +03002631 kvm_make_request(KVM_REQ_TRIPLE_FAULT, &svm->vcpu);
Joerg Roedel67ec6602010-05-17 14:43:35 +02002632
2633 return;
2634 }
2635
Joerg Roedel53371b52008-04-09 14:15:30 +02002636 /*
2637 * On an #MC intercept the MCE handler is not called automatically in
2638 * the host. So do it by hand here.
2639 */
2640 asm volatile (
2641 "int $0x12\n");
2642 /* not sure if we ever come back to this point */
2643
Joerg Roedelfe5913e2010-05-17 14:43:34 +02002644 return;
2645}
2646
2647static int mc_interception(struct vcpu_svm *svm)
2648{
Joerg Roedel53371b52008-04-09 14:15:30 +02002649 return 1;
2650}
2651
Avi Kivity851ba692009-08-24 11:10:17 +03002652static int shutdown_interception(struct vcpu_svm *svm)
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002653{
Avi Kivity851ba692009-08-24 11:10:17 +03002654 struct kvm_run *kvm_run = svm->vcpu.run;
2655
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002656 /*
2657 * VMCB is undefined after a SHUTDOWN intercept
2658 * so reinitialize it.
2659 */
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04002660 clear_page(svm->vmcb);
Paolo Bonzini56908912015-10-19 11:30:19 +02002661 init_vmcb(svm);
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08002662
2663 kvm_run->exit_reason = KVM_EXIT_SHUTDOWN;
2664 return 0;
2665}
2666
Avi Kivity851ba692009-08-24 11:10:17 +03002667static int io_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002668{
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002669 struct kvm_vcpu *vcpu = &svm->vcpu;
Mike Dayd77c26f2007-10-08 09:02:08 -04002670 u32 io_info = svm->vmcb->control.exit_info_1; /* address size bug? */
Sean Christophersondca7f122018-03-08 08:57:27 -08002671 int size, in, string;
Avi Kivity039576c2007-03-20 12:46:50 +02002672 unsigned port;
Avi Kivity6aa8b732006-12-10 02:21:36 -08002673
Rusty Russelle756fc62007-07-30 20:07:08 +10002674 ++svm->vcpu.stat.io_exits;
Laurent Viviere70669a2007-08-05 10:36:40 +03002675 string = (io_info & SVM_IOIO_STR_MASK) != 0;
Avi Kivity039576c2007-03-20 12:46:50 +02002676 in = (io_info & SVM_IOIO_TYPE_MASK) != 0;
Tom Lendacky8370c3d2016-11-23 12:01:50 -05002677 if (string)
Andre Przywara51d8b662010-12-21 11:12:02 +01002678 return emulate_instruction(vcpu, 0) == EMULATE_DONE;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002679
Avi Kivity039576c2007-03-20 12:46:50 +02002680 port = io_info >> 16;
2681 size = (io_info & SVM_IOIO_SIZE_MASK) >> SVM_IOIO_SIZE_SHIFT;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002682 svm->next_rip = svm->vmcb->control.exit_info_2;
Gleb Natapovcf8f70b2010-03-18 15:20:23 +02002683
Sean Christophersondca7f122018-03-08 08:57:27 -08002684 return kvm_fast_pio(&svm->vcpu, size, port, in);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002685}
2686
Avi Kivity851ba692009-08-24 11:10:17 +03002687static int nmi_interception(struct vcpu_svm *svm)
Joerg Roedelc47f0982008-04-30 17:56:00 +02002688{
2689 return 1;
2690}
2691
Avi Kivity851ba692009-08-24 11:10:17 +03002692static int intr_interception(struct vcpu_svm *svm)
Joerg Roedela0698052008-04-30 17:56:01 +02002693{
2694 ++svm->vcpu.stat.irq_exits;
2695 return 1;
2696}
2697
Avi Kivity851ba692009-08-24 11:10:17 +03002698static int nop_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002699{
2700 return 1;
2701}
2702
Avi Kivity851ba692009-08-24 11:10:17 +03002703static int halt_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08002704{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002705 svm->next_rip = kvm_rip_read(&svm->vcpu) + 1;
Rusty Russelle756fc62007-07-30 20:07:08 +10002706 return kvm_emulate_halt(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08002707}
2708
Avi Kivity851ba692009-08-24 11:10:17 +03002709static int vmmcall_interception(struct vcpu_svm *svm)
Avi Kivity02e235b2007-02-19 14:37:47 +02002710{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03002711 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Andrey Smetanin0d9c0552016-02-11 16:44:59 +03002712 return kvm_emulate_hypercall(&svm->vcpu);
Avi Kivity02e235b2007-02-19 14:37:47 +02002713}
2714
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002715static unsigned long nested_svm_get_tdp_cr3(struct kvm_vcpu *vcpu)
2716{
2717 struct vcpu_svm *svm = to_svm(vcpu);
2718
2719 return svm->nested.nested_cr3;
2720}
2721
Avi Kivitye4e517b2011-07-28 11:36:17 +03002722static u64 nested_svm_get_tdp_pdptr(struct kvm_vcpu *vcpu, int index)
2723{
2724 struct vcpu_svm *svm = to_svm(vcpu);
2725 u64 cr3 = svm->nested.nested_cr3;
2726 u64 pdpte;
2727 int ret;
2728
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002729 ret = kvm_vcpu_read_guest_page(vcpu, gpa_to_gfn(__sme_clr(cr3)), &pdpte,
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002730 offset_in_page(cr3) + index * 8, 8);
Avi Kivitye4e517b2011-07-28 11:36:17 +03002731 if (ret)
2732 return 0;
2733 return pdpte;
2734}
2735
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002736static void nested_svm_set_tdp_cr3(struct kvm_vcpu *vcpu,
2737 unsigned long root)
2738{
2739 struct vcpu_svm *svm = to_svm(vcpu);
2740
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05002741 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01002742 mark_dirty(svm->vmcb, VMCB_NPT);
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08002743 svm_flush_tlb(vcpu, true);
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002744}
2745
Avi Kivity6389ee92010-11-29 16:12:30 +02002746static void nested_svm_inject_npf_exit(struct kvm_vcpu *vcpu,
2747 struct x86_exception *fault)
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002748{
2749 struct vcpu_svm *svm = to_svm(vcpu);
2750
Paolo Bonzini5e352512014-09-02 13:18:37 +02002751 if (svm->vmcb->control.exit_code != SVM_EXIT_NPF) {
2752 /*
2753 * TODO: track the cause of the nested page fault, and
2754 * correctly fill in the high bits of exit_info_1.
2755 */
2756 svm->vmcb->control.exit_code = SVM_EXIT_NPF;
2757 svm->vmcb->control.exit_code_hi = 0;
2758 svm->vmcb->control.exit_info_1 = (1ULL << 32);
2759 svm->vmcb->control.exit_info_2 = fault->address;
2760 }
2761
2762 svm->vmcb->control.exit_info_1 &= ~0xffffffffULL;
2763 svm->vmcb->control.exit_info_1 |= fault->error_code;
2764
2765 /*
2766 * The present bit is always zero for page structure faults on real
2767 * hardware.
2768 */
2769 if (svm->vmcb->control.exit_info_1 & (2ULL << 32))
2770 svm->vmcb->control.exit_info_1 &= ~1;
Joerg Roedel5bd2edc2010-09-10 17:31:02 +02002771
2772 nested_svm_vmexit(svm);
2773}
2774
Paolo Bonzini8a3c1a332013-10-02 16:56:13 +02002775static void nested_svm_init_mmu_context(struct kvm_vcpu *vcpu)
Joerg Roedel4b161842010-09-10 17:31:03 +02002776{
Paolo Bonziniad896af2013-10-02 16:56:14 +02002777 WARN_ON(mmu_is_nested(vcpu));
2778 kvm_init_shadow_mmu(vcpu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002779 vcpu->arch.mmu.set_cr3 = nested_svm_set_tdp_cr3;
2780 vcpu->arch.mmu.get_cr3 = nested_svm_get_tdp_cr3;
Avi Kivitye4e517b2011-07-28 11:36:17 +03002781 vcpu->arch.mmu.get_pdptr = nested_svm_get_tdp_pdptr;
Joerg Roedel4b161842010-09-10 17:31:03 +02002782 vcpu->arch.mmu.inject_page_fault = nested_svm_inject_npf_exit;
Yu Zhang855feb62017-08-24 20:27:55 +08002783 vcpu->arch.mmu.shadow_root_level = get_npt_level(vcpu);
Xiao Guangrongc258b622015-08-05 12:04:24 +08002784 reset_shadow_zero_bits_mask(vcpu, &vcpu->arch.mmu);
Joerg Roedel4b161842010-09-10 17:31:03 +02002785 vcpu->arch.walk_mmu = &vcpu->arch.nested_mmu;
Joerg Roedel4b161842010-09-10 17:31:03 +02002786}
2787
2788static void nested_svm_uninit_mmu_context(struct kvm_vcpu *vcpu)
2789{
2790 vcpu->arch.walk_mmu = &vcpu->arch.mmu;
2791}
2792
Alexander Grafc0725422008-11-25 20:17:03 +01002793static int nested_svm_check_permissions(struct vcpu_svm *svm)
2794{
Dan Carpentere9196ce2017-05-18 10:39:53 +03002795 if (!(svm->vcpu.arch.efer & EFER_SVME) ||
2796 !is_paging(&svm->vcpu)) {
Alexander Grafc0725422008-11-25 20:17:03 +01002797 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
2798 return 1;
2799 }
2800
2801 if (svm->vmcb->save.cpl) {
2802 kvm_inject_gp(&svm->vcpu, 0);
2803 return 1;
2804 }
2805
Dan Carpentere9196ce2017-05-18 10:39:53 +03002806 return 0;
Alexander Grafc0725422008-11-25 20:17:03 +01002807}
2808
Alexander Grafcf74a782008-11-25 20:17:08 +01002809static int nested_svm_check_exception(struct vcpu_svm *svm, unsigned nr,
2810 bool has_error_code, u32 error_code)
2811{
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002812 int vmexit;
2813
Joerg Roedel20307532010-11-29 17:51:48 +01002814 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel0295ad72009-08-07 11:49:37 +02002815 return 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01002816
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002817 vmexit = nested_svm_intercept(svm);
2818 if (vmexit != NESTED_EXIT_DONE)
2819 return 0;
2820
Joerg Roedel0295ad72009-08-07 11:49:37 +02002821 svm->vmcb->control.exit_code = SVM_EXIT_EXCP_BASE + nr;
2822 svm->vmcb->control.exit_code_hi = 0;
2823 svm->vmcb->control.exit_info_1 = error_code;
Paolo Bonzinib96fb432017-07-27 12:29:32 +02002824
2825 /*
2826 * FIXME: we should not write CR2 when L1 intercepts an L2 #PF exception.
2827 * The fix is to add the ancillary datum (CR2 or DR6) to structs
2828 * kvm_queued_exception and kvm_vcpu_events, so that CR2 and DR6 can be
2829 * written only when inject_pending_event runs (DR6 would written here
2830 * too). This should be conditional on a new capability---if the
2831 * capability is disabled, kvm_multiple_exception would write the
2832 * ancillary information to CR2 or DR6, for backwards ABI-compatibility.
2833 */
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002834 if (svm->vcpu.arch.exception.nested_apf)
2835 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.apf.nested_apf_token;
2836 else
2837 svm->vmcb->control.exit_info_2 = svm->vcpu.arch.cr2;
Joerg Roedel0295ad72009-08-07 11:49:37 +02002838
Wanpeng Liadfe20f2017-07-13 18:30:41 -07002839 svm->nested.exit_required = true;
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01002840 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01002841}
2842
Joerg Roedel8fe54652010-02-19 16:23:01 +01002843/* This function returns true if it is save to enable the irq window */
2844static inline bool nested_svm_intr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002845{
Joerg Roedel20307532010-11-29 17:51:48 +01002846 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002847 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002848
Joerg Roedel26666952009-08-07 11:49:46 +02002849 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002850 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002851
Joerg Roedel26666952009-08-07 11:49:46 +02002852 if (!(svm->vcpu.arch.hflags & HF_HIF_MASK))
Joerg Roedel8fe54652010-02-19 16:23:01 +01002853 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002854
Gleb Natapova0a07cd2010-09-20 10:15:32 +02002855 /*
2856 * if vmexit was already requested (by intercepted exception
2857 * for instance) do not overwrite it with "external interrupt"
2858 * vmexit.
2859 */
2860 if (svm->nested.exit_required)
2861 return false;
2862
Joerg Roedel197717d2010-02-24 18:59:19 +01002863 svm->vmcb->control.exit_code = SVM_EXIT_INTR;
2864 svm->vmcb->control.exit_info_1 = 0;
2865 svm->vmcb->control.exit_info_2 = 0;
Joerg Roedel26666952009-08-07 11:49:46 +02002866
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002867 if (svm->nested.intercept & 1ULL) {
2868 /*
2869 * The #vmexit can't be emulated here directly because this
Guo Chaoc5ec2e52012-06-28 15:16:43 +08002870 * code path runs with irqs and preemption disabled. A
Joerg Roedelcd3ff652009-10-09 16:08:26 +02002871 * #vmexit emulation might sleep. Only signal request for
2872 * the #vmexit here.
2873 */
2874 svm->nested.exit_required = true;
Joerg Roedel236649d2009-10-09 16:08:30 +02002875 trace_kvm_nested_intr_vmexit(svm->vmcb->save.rip);
Joerg Roedel8fe54652010-02-19 16:23:01 +01002876 return false;
Alexander Grafcf74a782008-11-25 20:17:08 +01002877 }
2878
Joerg Roedel8fe54652010-02-19 16:23:01 +01002879 return true;
Alexander Grafcf74a782008-11-25 20:17:08 +01002880}
2881
Joerg Roedel887f5002010-02-24 18:59:12 +01002882/* This function returns true if it is save to enable the nmi window */
2883static inline bool nested_svm_nmi(struct vcpu_svm *svm)
2884{
Joerg Roedel20307532010-11-29 17:51:48 +01002885 if (!is_guest_mode(&svm->vcpu))
Joerg Roedel887f5002010-02-24 18:59:12 +01002886 return true;
2887
2888 if (!(svm->nested.intercept & (1ULL << INTERCEPT_NMI)))
2889 return true;
2890
2891 svm->vmcb->control.exit_code = SVM_EXIT_NMI;
2892 svm->nested.exit_required = true;
2893
2894 return false;
2895}
2896
Joerg Roedel7597f122010-02-19 16:23:00 +01002897static void *nested_svm_map(struct vcpu_svm *svm, u64 gpa, struct page **_page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002898{
2899 struct page *page;
2900
Joerg Roedel6c3bd3d2010-02-19 16:23:04 +01002901 might_sleep();
2902
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002903 page = kvm_vcpu_gfn_to_page(&svm->vcpu, gpa >> PAGE_SHIFT);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002904 if (is_error_page(page))
2905 goto error;
2906
Joerg Roedel7597f122010-02-19 16:23:00 +01002907 *_page = page;
2908
2909 return kmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002910
2911error:
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002912 kvm_inject_gp(&svm->vcpu, 0);
2913
2914 return NULL;
2915}
2916
Joerg Roedel7597f122010-02-19 16:23:00 +01002917static void nested_svm_unmap(struct page *page)
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002918{
Joerg Roedel7597f122010-02-19 16:23:00 +01002919 kunmap(page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02002920 kvm_release_page_dirty(page);
2921}
2922
Joerg Roedelce2ac082010-03-01 15:34:39 +01002923static int nested_svm_intercept_ioio(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002924{
Jan Kiszka9bf41832014-06-30 10:54:17 +02002925 unsigned port, size, iopm_len;
2926 u16 val, mask;
2927 u8 start_bit;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002928 u64 gpa;
2929
2930 if (!(svm->nested.intercept & (1ULL << INTERCEPT_IOIO_PROT)))
2931 return NESTED_EXIT_HOST;
2932
2933 port = svm->vmcb->control.exit_info_1 >> 16;
Jan Kiszka9bf41832014-06-30 10:54:17 +02002934 size = (svm->vmcb->control.exit_info_1 & SVM_IOIO_SIZE_MASK) >>
2935 SVM_IOIO_SIZE_SHIFT;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002936 gpa = svm->nested.vmcb_iopm + (port / 8);
Jan Kiszka9bf41832014-06-30 10:54:17 +02002937 start_bit = port % 8;
2938 iopm_len = (start_bit + size > 8) ? 2 : 1;
2939 mask = (0xf >> (4 - size)) << start_bit;
2940 val = 0;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002941
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002942 if (kvm_vcpu_read_guest(&svm->vcpu, gpa, &val, iopm_len))
Jan Kiszka9bf41832014-06-30 10:54:17 +02002943 return NESTED_EXIT_DONE;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002944
Jan Kiszka9bf41832014-06-30 10:54:17 +02002945 return (val & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedelce2ac082010-03-01 15:34:39 +01002946}
2947
Joerg Roedeld2477822010-03-01 15:34:34 +01002948static int nested_svm_exit_handled_msr(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01002949{
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002950 u32 offset, msr, value;
2951 int write, mask;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002952
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002953 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
Joerg Roedeld2477822010-03-01 15:34:34 +01002954 return NESTED_EXIT_HOST;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002955
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002956 msr = svm->vcpu.arch.regs[VCPU_REGS_RCX];
2957 offset = svm_msrpm_offset(msr);
2958 write = svm->vmcb->control.exit_info_1 & 1;
2959 mask = 1 << ((2 * (msr & 0xf)) + write);
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002960
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002961 if (offset == MSR_INVALID)
2962 return NESTED_EXIT_DONE;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002963
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002964 /* Offset is in 32 bit units but need in 8 bit units */
2965 offset *= 4;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002966
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02002967 if (kvm_vcpu_read_guest(&svm->vcpu, svm->nested.vmcb_msrpm + offset, &value, 4))
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002968 return NESTED_EXIT_DONE;
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02002969
Joerg Roedel0d6b3532010-03-01 15:34:38 +01002970 return (value & mask) ? NESTED_EXIT_DONE : NESTED_EXIT_HOST;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002971}
2972
Ladi Prosekab2f4d732017-06-21 09:06:58 +02002973/* DB exceptions for our internal use must not cause vmexit */
2974static int nested_svm_intercept_db(struct vcpu_svm *svm)
2975{
2976 unsigned long dr6;
2977
2978 /* if we're not singlestepping, it's not ours */
2979 if (!svm->nmi_singlestep)
2980 return NESTED_EXIT_DONE;
2981
2982 /* if it's not a singlestep exception, it's not ours */
2983 if (kvm_get_dr(&svm->vcpu, 6, &dr6))
2984 return NESTED_EXIT_DONE;
2985 if (!(dr6 & DR6_BS))
2986 return NESTED_EXIT_DONE;
2987
2988 /* if the guest is singlestepping, it should get the vmexit */
2989 if (svm->nmi_singlestep_guest_rflags & X86_EFLAGS_TF) {
2990 disable_nmi_singlestep(svm);
2991 return NESTED_EXIT_DONE;
2992 }
2993
2994 /* it's ours, the nested hypervisor must not see this one */
2995 return NESTED_EXIT_HOST;
2996}
2997
Joerg Roedel410e4d52009-08-07 11:49:44 +02002998static int nested_svm_exit_special(struct vcpu_svm *svm)
Joerg Roedel4c2161a2009-08-07 11:49:35 +02002999{
Alexander Grafcf74a782008-11-25 20:17:08 +01003000 u32 exit_code = svm->vmcb->control.exit_code;
Joerg Roedel4c2161a2009-08-07 11:49:35 +02003001
Joerg Roedel410e4d52009-08-07 11:49:44 +02003002 switch (exit_code) {
3003 case SVM_EXIT_INTR:
3004 case SVM_EXIT_NMI:
Joerg Roedelff47a492010-04-22 12:33:14 +02003005 case SVM_EXIT_EXCP_BASE + MC_VECTOR:
Joerg Roedel410e4d52009-08-07 11:49:44 +02003006 return NESTED_EXIT_HOST;
Joerg Roedel410e4d52009-08-07 11:49:44 +02003007 case SVM_EXIT_NPF:
Joerg Roedele0231712010-02-24 18:59:10 +01003008 /* For now we are always handling NPFs when using them */
Joerg Roedel410e4d52009-08-07 11:49:44 +02003009 if (npt_enabled)
3010 return NESTED_EXIT_HOST;
3011 break;
Joerg Roedel410e4d52009-08-07 11:49:44 +02003012 case SVM_EXIT_EXCP_BASE + PF_VECTOR:
Gleb Natapov631bc482010-10-14 11:22:52 +02003013 /* When we're shadowing, trap PFs, but not async PF */
Wanpeng Li1261bfa2017-07-13 18:30:40 -07003014 if (!npt_enabled && svm->vcpu.arch.apf.host_apf_reason == 0)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003015 return NESTED_EXIT_HOST;
3016 break;
3017 default:
3018 break;
Alexander Grafcf74a782008-11-25 20:17:08 +01003019 }
3020
Joerg Roedel410e4d52009-08-07 11:49:44 +02003021 return NESTED_EXIT_CONTINUE;
3022}
3023
3024/*
3025 * If this function returns true, this #vmexit was already handled
3026 */
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01003027static int nested_svm_intercept(struct vcpu_svm *svm)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003028{
3029 u32 exit_code = svm->vmcb->control.exit_code;
3030 int vmexit = NESTED_EXIT_HOST;
3031
Alexander Grafcf74a782008-11-25 20:17:08 +01003032 switch (exit_code) {
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003033 case SVM_EXIT_MSR:
Joerg Roedel3d62d9a2009-08-07 11:49:39 +02003034 vmexit = nested_svm_exit_handled_msr(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003035 break;
Joerg Roedelce2ac082010-03-01 15:34:39 +01003036 case SVM_EXIT_IOIO:
3037 vmexit = nested_svm_intercept_ioio(svm);
3038 break;
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003039 case SVM_EXIT_READ_CR0 ... SVM_EXIT_WRITE_CR8: {
3040 u32 bit = 1U << (exit_code - SVM_EXIT_READ_CR0);
3041 if (svm->nested.intercept_cr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003042 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003043 break;
3044 }
Joerg Roedel3aed0412010-11-30 18:03:58 +01003045 case SVM_EXIT_READ_DR0 ... SVM_EXIT_WRITE_DR7: {
3046 u32 bit = 1U << (exit_code - SVM_EXIT_READ_DR0);
3047 if (svm->nested.intercept_dr & bit)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003048 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003049 break;
3050 }
3051 case SVM_EXIT_EXCP_BASE ... SVM_EXIT_EXCP_BASE + 0x1f: {
3052 u32 excp_bits = 1 << (exit_code - SVM_EXIT_EXCP_BASE);
Ladi Prosekab2f4d732017-06-21 09:06:58 +02003053 if (svm->nested.intercept_exceptions & excp_bits) {
3054 if (exit_code == SVM_EXIT_EXCP_BASE + DB_VECTOR)
3055 vmexit = nested_svm_intercept_db(svm);
3056 else
3057 vmexit = NESTED_EXIT_DONE;
3058 }
Gleb Natapov631bc482010-10-14 11:22:52 +02003059 /* async page fault always cause vmexit */
3060 else if ((exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR) &&
Wanpeng Liadfe20f2017-07-13 18:30:41 -07003061 svm->vcpu.arch.exception.nested_apf != 0)
Gleb Natapov631bc482010-10-14 11:22:52 +02003062 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003063 break;
3064 }
Joerg Roedel228070b2010-04-22 12:33:10 +02003065 case SVM_EXIT_ERR: {
3066 vmexit = NESTED_EXIT_DONE;
3067 break;
3068 }
Alexander Grafcf74a782008-11-25 20:17:08 +01003069 default: {
3070 u64 exit_bits = 1ULL << (exit_code - SVM_EXIT_INTR);
Joerg Roedelaad42c62009-08-07 11:49:34 +02003071 if (svm->nested.intercept & exit_bits)
Joerg Roedel410e4d52009-08-07 11:49:44 +02003072 vmexit = NESTED_EXIT_DONE;
Alexander Grafcf74a782008-11-25 20:17:08 +01003073 }
3074 }
3075
Joerg Roedelb8e88bc2010-02-19 16:23:02 +01003076 return vmexit;
3077}
3078
3079static int nested_svm_exit_handled(struct vcpu_svm *svm)
3080{
3081 int vmexit;
3082
3083 vmexit = nested_svm_intercept(svm);
3084
3085 if (vmexit == NESTED_EXIT_DONE)
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003086 nested_svm_vmexit(svm);
Joerg Roedel9c4e40b92009-08-07 11:49:36 +02003087
3088 return vmexit;
Alexander Grafcf74a782008-11-25 20:17:08 +01003089}
3090
Joerg Roedel0460a972009-08-07 11:49:31 +02003091static inline void copy_vmcb_control_area(struct vmcb *dst_vmcb, struct vmcb *from_vmcb)
3092{
3093 struct vmcb_control_area *dst = &dst_vmcb->control;
3094 struct vmcb_control_area *from = &from_vmcb->control;
3095
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003096 dst->intercept_cr = from->intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003097 dst->intercept_dr = from->intercept_dr;
Joerg Roedel0460a972009-08-07 11:49:31 +02003098 dst->intercept_exceptions = from->intercept_exceptions;
3099 dst->intercept = from->intercept;
3100 dst->iopm_base_pa = from->iopm_base_pa;
3101 dst->msrpm_base_pa = from->msrpm_base_pa;
3102 dst->tsc_offset = from->tsc_offset;
3103 dst->asid = from->asid;
3104 dst->tlb_ctl = from->tlb_ctl;
3105 dst->int_ctl = from->int_ctl;
3106 dst->int_vector = from->int_vector;
3107 dst->int_state = from->int_state;
3108 dst->exit_code = from->exit_code;
3109 dst->exit_code_hi = from->exit_code_hi;
3110 dst->exit_info_1 = from->exit_info_1;
3111 dst->exit_info_2 = from->exit_info_2;
3112 dst->exit_int_info = from->exit_int_info;
3113 dst->exit_int_info_err = from->exit_int_info_err;
3114 dst->nested_ctl = from->nested_ctl;
3115 dst->event_inj = from->event_inj;
3116 dst->event_inj_err = from->event_inj_err;
3117 dst->nested_cr3 = from->nested_cr3;
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003118 dst->virt_ext = from->virt_ext;
Joerg Roedel0460a972009-08-07 11:49:31 +02003119}
3120
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003121static int nested_svm_vmexit(struct vcpu_svm *svm)
Alexander Grafcf74a782008-11-25 20:17:08 +01003122{
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003123 struct vmcb *nested_vmcb;
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02003124 struct vmcb *hsave = svm->nested.hsave;
Joerg Roedel33740e42009-08-07 11:49:29 +02003125 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003126 struct page *page;
Alexander Grafcf74a782008-11-25 20:17:08 +01003127
Joerg Roedel17897f32009-10-09 16:08:29 +02003128 trace_kvm_nested_vmexit_inject(vmcb->control.exit_code,
3129 vmcb->control.exit_info_1,
3130 vmcb->control.exit_info_2,
3131 vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01003132 vmcb->control.exit_int_info_err,
3133 KVM_ISA_SVM);
Joerg Roedel17897f32009-10-09 16:08:29 +02003134
Joerg Roedel7597f122010-02-19 16:23:00 +01003135 nested_vmcb = nested_svm_map(svm, svm->nested.vmcb, &page);
Joerg Roedel34f80cf2009-08-07 11:49:38 +02003136 if (!nested_vmcb)
3137 return 1;
3138
Joerg Roedel20307532010-11-29 17:51:48 +01003139 /* Exit Guest-Mode */
3140 leave_guest_mode(&svm->vcpu);
Joerg Roedel06fc77722010-02-19 16:23:07 +01003141 svm->nested.vmcb = 0;
3142
Alexander Grafcf74a782008-11-25 20:17:08 +01003143 /* Give the current vmcb to the guest */
Joerg Roedel33740e42009-08-07 11:49:29 +02003144 disable_gif(svm);
3145
3146 nested_vmcb->save.es = vmcb->save.es;
3147 nested_vmcb->save.cs = vmcb->save.cs;
3148 nested_vmcb->save.ss = vmcb->save.ss;
3149 nested_vmcb->save.ds = vmcb->save.ds;
3150 nested_vmcb->save.gdtr = vmcb->save.gdtr;
3151 nested_vmcb->save.idtr = vmcb->save.idtr;
Joerg Roedel3f6a9d12010-07-27 18:14:20 +02003152 nested_vmcb->save.efer = svm->vcpu.arch.efer;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003153 nested_vmcb->save.cr0 = kvm_read_cr0(&svm->vcpu);
Avi Kivity9f8fe502010-12-05 17:30:00 +02003154 nested_vmcb->save.cr3 = kvm_read_cr3(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003155 nested_vmcb->save.cr2 = vmcb->save.cr2;
Joerg Roedelcdbbdc12010-02-19 16:23:03 +01003156 nested_vmcb->save.cr4 = svm->vcpu.arch.cr4;
Avi Kivityf6e78472010-08-02 15:30:20 +03003157 nested_vmcb->save.rflags = kvm_get_rflags(&svm->vcpu);
Joerg Roedel33740e42009-08-07 11:49:29 +02003158 nested_vmcb->save.rip = vmcb->save.rip;
3159 nested_vmcb->save.rsp = vmcb->save.rsp;
3160 nested_vmcb->save.rax = vmcb->save.rax;
3161 nested_vmcb->save.dr7 = vmcb->save.dr7;
3162 nested_vmcb->save.dr6 = vmcb->save.dr6;
3163 nested_vmcb->save.cpl = vmcb->save.cpl;
3164
3165 nested_vmcb->control.int_ctl = vmcb->control.int_ctl;
3166 nested_vmcb->control.int_vector = vmcb->control.int_vector;
3167 nested_vmcb->control.int_state = vmcb->control.int_state;
3168 nested_vmcb->control.exit_code = vmcb->control.exit_code;
3169 nested_vmcb->control.exit_code_hi = vmcb->control.exit_code_hi;
3170 nested_vmcb->control.exit_info_1 = vmcb->control.exit_info_1;
3171 nested_vmcb->control.exit_info_2 = vmcb->control.exit_info_2;
3172 nested_vmcb->control.exit_int_info = vmcb->control.exit_int_info;
3173 nested_vmcb->control.exit_int_info_err = vmcb->control.exit_int_info_err;
Joerg Roedel6092d3d2015-10-14 15:10:54 +02003174
3175 if (svm->nrips_enabled)
3176 nested_vmcb->control.next_rip = vmcb->control.next_rip;
Alexander Graf8d23c462009-10-09 16:08:25 +02003177
3178 /*
3179 * If we emulate a VMRUN/#VMEXIT in the same host #vmexit cycle we have
3180 * to make sure that we do not lose injected events. So check event_inj
3181 * here and copy it to exit_int_info if it is valid.
3182 * Exit_int_info and event_inj can't be both valid because the case
3183 * below only happens on a VMRUN instruction intercept which has
3184 * no valid exit_int_info set.
3185 */
3186 if (vmcb->control.event_inj & SVM_EVTINJ_VALID) {
3187 struct vmcb_control_area *nc = &nested_vmcb->control;
3188
3189 nc->exit_int_info = vmcb->control.event_inj;
3190 nc->exit_int_info_err = vmcb->control.event_inj_err;
3191 }
3192
Joerg Roedel33740e42009-08-07 11:49:29 +02003193 nested_vmcb->control.tlb_ctl = 0;
3194 nested_vmcb->control.event_inj = 0;
3195 nested_vmcb->control.event_inj_err = 0;
Alexander Grafcf74a782008-11-25 20:17:08 +01003196
3197 /* We always set V_INTR_MASKING and remember the old value in hflags */
3198 if (!(svm->vcpu.arch.hflags & HF_VINTR_MASK))
3199 nested_vmcb->control.int_ctl &= ~V_INTR_MASKING_MASK;
3200
Alexander Grafcf74a782008-11-25 20:17:08 +01003201 /* Restore the original control entries */
Joerg Roedel0460a972009-08-07 11:49:31 +02003202 copy_vmcb_control_area(vmcb, hsave);
Alexander Grafcf74a782008-11-25 20:17:08 +01003203
Alexander Graf219b65d2009-06-15 15:21:25 +02003204 kvm_clear_exception_queue(&svm->vcpu);
3205 kvm_clear_interrupt_queue(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003206
Joerg Roedel4b161842010-09-10 17:31:03 +02003207 svm->nested.nested_cr3 = 0;
3208
Alexander Grafcf74a782008-11-25 20:17:08 +01003209 /* Restore selected save entries */
3210 svm->vmcb->save.es = hsave->save.es;
3211 svm->vmcb->save.cs = hsave->save.cs;
3212 svm->vmcb->save.ss = hsave->save.ss;
3213 svm->vmcb->save.ds = hsave->save.ds;
3214 svm->vmcb->save.gdtr = hsave->save.gdtr;
3215 svm->vmcb->save.idtr = hsave->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003216 kvm_set_rflags(&svm->vcpu, hsave->save.rflags);
Alexander Grafcf74a782008-11-25 20:17:08 +01003217 svm_set_efer(&svm->vcpu, hsave->save.efer);
3218 svm_set_cr0(&svm->vcpu, hsave->save.cr0 | X86_CR0_PE);
3219 svm_set_cr4(&svm->vcpu, hsave->save.cr4);
3220 if (npt_enabled) {
3221 svm->vmcb->save.cr3 = hsave->save.cr3;
3222 svm->vcpu.arch.cr3 = hsave->save.cr3;
3223 } else {
Avi Kivity23902182010-06-10 17:02:16 +03003224 (void)kvm_set_cr3(&svm->vcpu, hsave->save.cr3);
Alexander Grafcf74a782008-11-25 20:17:08 +01003225 }
3226 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, hsave->save.rax);
3227 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, hsave->save.rsp);
3228 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, hsave->save.rip);
3229 svm->vmcb->save.dr7 = 0;
3230 svm->vmcb->save.cpl = 0;
3231 svm->vmcb->control.exit_int_info = 0;
3232
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003233 mark_all_dirty(svm->vmcb);
3234
Joerg Roedel7597f122010-02-19 16:23:00 +01003235 nested_svm_unmap(page);
Alexander Grafcf74a782008-11-25 20:17:08 +01003236
Joerg Roedel4b161842010-09-10 17:31:03 +02003237 nested_svm_uninit_mmu_context(&svm->vcpu);
Alexander Grafcf74a782008-11-25 20:17:08 +01003238 kvm_mmu_reset_context(&svm->vcpu);
3239 kvm_mmu_load(&svm->vcpu);
3240
3241 return 0;
3242}
Alexander Graf3d6368e2008-11-25 20:17:07 +01003243
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003244static bool nested_svm_vmrun_msrpm(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003245{
Joerg Roedel323c3d82010-03-01 15:34:37 +01003246 /*
3247 * This function merges the msr permission bitmaps of kvm and the
Guo Chaoc5ec2e52012-06-28 15:16:43 +08003248 * nested vmcb. It is optimized in that it only merges the parts where
Joerg Roedel323c3d82010-03-01 15:34:37 +01003249 * the kvm msr permission bitmap may contain zero bits
3250 */
Alexander Graf3d6368e2008-11-25 20:17:07 +01003251 int i;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003252
Joerg Roedel323c3d82010-03-01 15:34:37 +01003253 if (!(svm->nested.intercept & (1ULL << INTERCEPT_MSR_PROT)))
3254 return true;
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003255
Joerg Roedel323c3d82010-03-01 15:34:37 +01003256 for (i = 0; i < MSRPM_OFFSETS; i++) {
3257 u32 value, p;
3258 u64 offset;
3259
3260 if (msrpm_offsets[i] == 0xffffffff)
3261 break;
3262
Joerg Roedel0d6b3532010-03-01 15:34:38 +01003263 p = msrpm_offsets[i];
3264 offset = svm->nested.vmcb_msrpm + (p * 4);
Joerg Roedel323c3d82010-03-01 15:34:37 +01003265
Paolo Bonzini54bf36a2015-04-08 15:39:23 +02003266 if (kvm_vcpu_read_guest(&svm->vcpu, offset, &value, 4))
Joerg Roedel323c3d82010-03-01 15:34:37 +01003267 return false;
3268
3269 svm->nested.msrpm[p] = svm->msrpm[p] | value;
3270 }
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003271
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05003272 svm->vmcb->control.msrpm_base_pa = __sme_set(__pa(svm->nested.msrpm));
Alexander Graf3d6368e2008-11-25 20:17:07 +01003273
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003274 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003275}
3276
Joerg Roedel52c65a302010-08-02 16:46:44 +02003277static bool nested_vmcb_checks(struct vmcb *vmcb)
3278{
3279 if ((vmcb->control.intercept & (1ULL << INTERCEPT_VMRUN)) == 0)
3280 return false;
3281
Joerg Roedeldbe77582010-08-02 16:46:45 +02003282 if (vmcb->control.asid == 0)
3283 return false;
3284
Tom Lendackycea3a192017-12-04 10:57:24 -06003285 if ((vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) &&
3286 !npt_enabled)
Joerg Roedel4b161842010-09-10 17:31:03 +02003287 return false;
3288
Joerg Roedel52c65a302010-08-02 16:46:44 +02003289 return true;
3290}
3291
Ladi Prosekc2634062017-10-11 16:54:44 +02003292static void enter_svm_guest_mode(struct vcpu_svm *svm, u64 vmcb_gpa,
3293 struct vmcb *nested_vmcb, struct page *page)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003294{
Avi Kivityf6e78472010-08-02 15:30:20 +03003295 if (kvm_get_rflags(&svm->vcpu) & X86_EFLAGS_IF)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003296 svm->vcpu.arch.hflags |= HF_HIF_MASK;
3297 else
3298 svm->vcpu.arch.hflags &= ~HF_HIF_MASK;
3299
Tom Lendackycea3a192017-12-04 10:57:24 -06003300 if (nested_vmcb->control.nested_ctl & SVM_NESTED_CTL_NP_ENABLE) {
Joerg Roedel4b161842010-09-10 17:31:03 +02003301 kvm_mmu_unload(&svm->vcpu);
3302 svm->nested.nested_cr3 = nested_vmcb->control.nested_cr3;
3303 nested_svm_init_mmu_context(&svm->vcpu);
3304 }
3305
Alexander Graf3d6368e2008-11-25 20:17:07 +01003306 /* Load the nested guest state */
3307 svm->vmcb->save.es = nested_vmcb->save.es;
3308 svm->vmcb->save.cs = nested_vmcb->save.cs;
3309 svm->vmcb->save.ss = nested_vmcb->save.ss;
3310 svm->vmcb->save.ds = nested_vmcb->save.ds;
3311 svm->vmcb->save.gdtr = nested_vmcb->save.gdtr;
3312 svm->vmcb->save.idtr = nested_vmcb->save.idtr;
Avi Kivityf6e78472010-08-02 15:30:20 +03003313 kvm_set_rflags(&svm->vcpu, nested_vmcb->save.rflags);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003314 svm_set_efer(&svm->vcpu, nested_vmcb->save.efer);
3315 svm_set_cr0(&svm->vcpu, nested_vmcb->save.cr0);
3316 svm_set_cr4(&svm->vcpu, nested_vmcb->save.cr4);
3317 if (npt_enabled) {
3318 svm->vmcb->save.cr3 = nested_vmcb->save.cr3;
3319 svm->vcpu.arch.cr3 = nested_vmcb->save.cr3;
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003320 } else
Avi Kivity23902182010-06-10 17:02:16 +03003321 (void)kvm_set_cr3(&svm->vcpu, nested_vmcb->save.cr3);
Joerg Roedel0e5cbe32010-02-24 18:59:11 +01003322
3323 /* Guest paging mode is active - reset mmu */
3324 kvm_mmu_reset_context(&svm->vcpu);
3325
Joerg Roedeldefbba52009-08-07 11:49:30 +02003326 svm->vmcb->save.cr2 = svm->vcpu.arch.cr2 = nested_vmcb->save.cr2;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003327 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX, nested_vmcb->save.rax);
3328 kvm_register_write(&svm->vcpu, VCPU_REGS_RSP, nested_vmcb->save.rsp);
3329 kvm_register_write(&svm->vcpu, VCPU_REGS_RIP, nested_vmcb->save.rip);
Joerg Roedele0231712010-02-24 18:59:10 +01003330
Alexander Graf3d6368e2008-11-25 20:17:07 +01003331 /* In case we don't even reach vcpu_run, the fields are not updated */
3332 svm->vmcb->save.rax = nested_vmcb->save.rax;
3333 svm->vmcb->save.rsp = nested_vmcb->save.rsp;
3334 svm->vmcb->save.rip = nested_vmcb->save.rip;
3335 svm->vmcb->save.dr7 = nested_vmcb->save.dr7;
3336 svm->vmcb->save.dr6 = nested_vmcb->save.dr6;
3337 svm->vmcb->save.cpl = nested_vmcb->save.cpl;
3338
Joerg Roedelf7138532010-03-01 15:34:40 +01003339 svm->nested.vmcb_msrpm = nested_vmcb->control.msrpm_base_pa & ~0x0fffULL;
Joerg Roedelce2ac082010-03-01 15:34:39 +01003340 svm->nested.vmcb_iopm = nested_vmcb->control.iopm_base_pa & ~0x0fffULL;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003341
Joerg Roedelaad42c62009-08-07 11:49:34 +02003342 /* cache intercepts */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003343 svm->nested.intercept_cr = nested_vmcb->control.intercept_cr;
Joerg Roedel3aed0412010-11-30 18:03:58 +01003344 svm->nested.intercept_dr = nested_vmcb->control.intercept_dr;
Joerg Roedelaad42c62009-08-07 11:49:34 +02003345 svm->nested.intercept_exceptions = nested_vmcb->control.intercept_exceptions;
3346 svm->nested.intercept = nested_vmcb->control.intercept;
3347
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08003348 svm_flush_tlb(&svm->vcpu, true);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003349 svm->vmcb->control.int_ctl = nested_vmcb->control.int_ctl | V_INTR_MASKING_MASK;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003350 if (nested_vmcb->control.int_ctl & V_INTR_MASKING_MASK)
3351 svm->vcpu.arch.hflags |= HF_VINTR_MASK;
3352 else
3353 svm->vcpu.arch.hflags &= ~HF_VINTR_MASK;
3354
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003355 if (svm->vcpu.arch.hflags & HF_VINTR_MASK) {
3356 /* We only want the cr8 intercept bits of the guest */
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01003357 clr_cr_intercept(svm, INTERCEPT_CR8_READ);
3358 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Joerg Roedel88ab24a2010-02-19 16:23:06 +01003359 }
3360
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003361 /* We don't want to see VMMCALLs from a nested guest */
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003362 clr_intercept(svm, INTERCEPT_VMMCALL);
Joerg Roedel0d945bd2010-05-05 16:04:45 +02003363
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05003364 svm->vmcb->control.virt_ext = nested_vmcb->control.virt_ext;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003365 svm->vmcb->control.int_vector = nested_vmcb->control.int_vector;
3366 svm->vmcb->control.int_state = nested_vmcb->control.int_state;
3367 svm->vmcb->control.tsc_offset += nested_vmcb->control.tsc_offset;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003368 svm->vmcb->control.event_inj = nested_vmcb->control.event_inj;
3369 svm->vmcb->control.event_inj_err = nested_vmcb->control.event_inj_err;
3370
Joerg Roedel7597f122010-02-19 16:23:00 +01003371 nested_svm_unmap(page);
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003372
Joerg Roedel20307532010-11-29 17:51:48 +01003373 /* Enter Guest-Mode */
3374 enter_guest_mode(&svm->vcpu);
3375
Joerg Roedel384c6362010-11-30 18:03:56 +01003376 /*
3377 * Merge guest and host intercepts - must be called with vcpu in
3378 * guest-mode to take affect here
3379 */
3380 recalc_intercepts(svm);
3381
Joerg Roedel06fc77722010-02-19 16:23:07 +01003382 svm->nested.vmcb = vmcb_gpa;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003383
Joerg Roedel2af91942009-08-07 11:49:28 +02003384 enable_gif(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003385
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003386 mark_all_dirty(svm->vmcb);
Ladi Prosekc2634062017-10-11 16:54:44 +02003387}
3388
3389static bool nested_svm_vmrun(struct vcpu_svm *svm)
3390{
3391 struct vmcb *nested_vmcb;
3392 struct vmcb *hsave = svm->nested.hsave;
3393 struct vmcb *vmcb = svm->vmcb;
3394 struct page *page;
3395 u64 vmcb_gpa;
3396
3397 vmcb_gpa = svm->vmcb->save.rax;
3398
3399 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
3400 if (!nested_vmcb)
3401 return false;
3402
3403 if (!nested_vmcb_checks(nested_vmcb)) {
3404 nested_vmcb->control.exit_code = SVM_EXIT_ERR;
3405 nested_vmcb->control.exit_code_hi = 0;
3406 nested_vmcb->control.exit_info_1 = 0;
3407 nested_vmcb->control.exit_info_2 = 0;
3408
3409 nested_svm_unmap(page);
3410
3411 return false;
3412 }
3413
3414 trace_kvm_nested_vmrun(svm->vmcb->save.rip, vmcb_gpa,
3415 nested_vmcb->save.rip,
3416 nested_vmcb->control.int_ctl,
3417 nested_vmcb->control.event_inj,
3418 nested_vmcb->control.nested_ctl);
3419
3420 trace_kvm_nested_intercepts(nested_vmcb->control.intercept_cr & 0xffff,
3421 nested_vmcb->control.intercept_cr >> 16,
3422 nested_vmcb->control.intercept_exceptions,
3423 nested_vmcb->control.intercept);
3424
3425 /* Clear internal status */
3426 kvm_clear_exception_queue(&svm->vcpu);
3427 kvm_clear_interrupt_queue(&svm->vcpu);
3428
3429 /*
3430 * Save the old vmcb, so we don't need to pick what we save, but can
3431 * restore everything when a VMEXIT occurs
3432 */
3433 hsave->save.es = vmcb->save.es;
3434 hsave->save.cs = vmcb->save.cs;
3435 hsave->save.ss = vmcb->save.ss;
3436 hsave->save.ds = vmcb->save.ds;
3437 hsave->save.gdtr = vmcb->save.gdtr;
3438 hsave->save.idtr = vmcb->save.idtr;
3439 hsave->save.efer = svm->vcpu.arch.efer;
3440 hsave->save.cr0 = kvm_read_cr0(&svm->vcpu);
3441 hsave->save.cr4 = svm->vcpu.arch.cr4;
3442 hsave->save.rflags = kvm_get_rflags(&svm->vcpu);
3443 hsave->save.rip = kvm_rip_read(&svm->vcpu);
3444 hsave->save.rsp = vmcb->save.rsp;
3445 hsave->save.rax = vmcb->save.rax;
3446 if (npt_enabled)
3447 hsave->save.cr3 = vmcb->save.cr3;
3448 else
3449 hsave->save.cr3 = kvm_read_cr3(&svm->vcpu);
3450
3451 copy_vmcb_control_area(hsave, vmcb);
3452
3453 enter_svm_guest_mode(svm, vmcb_gpa, nested_vmcb, page);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01003454
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003455 return true;
Alexander Graf3d6368e2008-11-25 20:17:07 +01003456}
3457
Joerg Roedel9966bf62009-08-07 11:49:40 +02003458static void nested_svm_vmloadsave(struct vmcb *from_vmcb, struct vmcb *to_vmcb)
Alexander Graf55426752008-11-25 20:17:06 +01003459{
3460 to_vmcb->save.fs = from_vmcb->save.fs;
3461 to_vmcb->save.gs = from_vmcb->save.gs;
3462 to_vmcb->save.tr = from_vmcb->save.tr;
3463 to_vmcb->save.ldtr = from_vmcb->save.ldtr;
3464 to_vmcb->save.kernel_gs_base = from_vmcb->save.kernel_gs_base;
3465 to_vmcb->save.star = from_vmcb->save.star;
3466 to_vmcb->save.lstar = from_vmcb->save.lstar;
3467 to_vmcb->save.cstar = from_vmcb->save.cstar;
3468 to_vmcb->save.sfmask = from_vmcb->save.sfmask;
3469 to_vmcb->save.sysenter_cs = from_vmcb->save.sysenter_cs;
3470 to_vmcb->save.sysenter_esp = from_vmcb->save.sysenter_esp;
3471 to_vmcb->save.sysenter_eip = from_vmcb->save.sysenter_eip;
Alexander Graf55426752008-11-25 20:17:06 +01003472}
3473
Avi Kivity851ba692009-08-24 11:10:17 +03003474static int vmload_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003475{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003476 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003477 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003478 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003479
Alexander Graf55426752008-11-25 20:17:06 +01003480 if (nested_svm_check_permissions(svm))
3481 return 1;
3482
Joerg Roedel7597f122010-02-19 16:23:00 +01003483 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003484 if (!nested_vmcb)
3485 return 1;
3486
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003487 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003488 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003489
Joerg Roedel9966bf62009-08-07 11:49:40 +02003490 nested_svm_vmloadsave(nested_vmcb, svm->vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003491 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003492
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003493 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003494}
3495
Avi Kivity851ba692009-08-24 11:10:17 +03003496static int vmsave_interception(struct vcpu_svm *svm)
Alexander Graf55426752008-11-25 20:17:06 +01003497{
Joerg Roedel9966bf62009-08-07 11:49:40 +02003498 struct vmcb *nested_vmcb;
Joerg Roedel7597f122010-02-19 16:23:00 +01003499 struct page *page;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003500 int ret;
Joerg Roedel9966bf62009-08-07 11:49:40 +02003501
Alexander Graf55426752008-11-25 20:17:06 +01003502 if (nested_svm_check_permissions(svm))
3503 return 1;
3504
Joerg Roedel7597f122010-02-19 16:23:00 +01003505 nested_vmcb = nested_svm_map(svm, svm->vmcb->save.rax, &page);
Joerg Roedel9966bf62009-08-07 11:49:40 +02003506 if (!nested_vmcb)
3507 return 1;
3508
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003509 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003510 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedele3e9ed32011-04-06 12:30:03 +02003511
Joerg Roedel9966bf62009-08-07 11:49:40 +02003512 nested_svm_vmloadsave(svm->vmcb, nested_vmcb);
Joerg Roedel7597f122010-02-19 16:23:00 +01003513 nested_svm_unmap(page);
Alexander Graf55426752008-11-25 20:17:06 +01003514
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003515 return ret;
Alexander Graf55426752008-11-25 20:17:06 +01003516}
3517
Avi Kivity851ba692009-08-24 11:10:17 +03003518static int vmrun_interception(struct vcpu_svm *svm)
Alexander Graf3d6368e2008-11-25 20:17:07 +01003519{
Alexander Graf3d6368e2008-11-25 20:17:07 +01003520 if (nested_svm_check_permissions(svm))
3521 return 1;
3522
Roedel, Joergb75f4eb2010-09-03 14:21:40 +02003523 /* Save rip after vmrun instruction */
3524 kvm_rip_write(&svm->vcpu, kvm_rip_read(&svm->vcpu) + 3);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003525
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003526 if (!nested_svm_vmrun(svm))
Alexander Graf3d6368e2008-11-25 20:17:07 +01003527 return 1;
3528
Joerg Roedel9738b2c2009-08-07 11:49:41 +02003529 if (!nested_svm_vmrun_msrpm(svm))
Joerg Roedel1f8da472009-08-07 11:49:43 +02003530 goto failed;
3531
3532 return 1;
3533
3534failed:
3535
3536 svm->vmcb->control.exit_code = SVM_EXIT_ERR;
3537 svm->vmcb->control.exit_code_hi = 0;
3538 svm->vmcb->control.exit_info_1 = 0;
3539 svm->vmcb->control.exit_info_2 = 0;
3540
3541 nested_svm_vmexit(svm);
Alexander Graf3d6368e2008-11-25 20:17:07 +01003542
3543 return 1;
3544}
3545
Avi Kivity851ba692009-08-24 11:10:17 +03003546static int stgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003547{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003548 int ret;
3549
Alexander Graf1371d902008-11-25 20:17:04 +01003550 if (nested_svm_check_permissions(svm))
3551 return 1;
3552
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003553 /*
3554 * If VGIF is enabled, the STGI intercept is only added to
Ladi Prosekcc3d9672017-10-17 16:02:39 +02003555 * detect the opening of the SMI/NMI window; remove it now.
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05003556 */
3557 if (vgif_enabled(svm))
3558 clr_intercept(svm, INTERCEPT_STGI);
3559
Alexander Graf1371d902008-11-25 20:17:04 +01003560 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003561 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity3842d132010-07-27 12:30:24 +03003562 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003563
Joerg Roedel2af91942009-08-07 11:49:28 +02003564 enable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003565
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003566 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003567}
3568
Avi Kivity851ba692009-08-24 11:10:17 +03003569static int clgi_interception(struct vcpu_svm *svm)
Alexander Graf1371d902008-11-25 20:17:04 +01003570{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003571 int ret;
3572
Alexander Graf1371d902008-11-25 20:17:04 +01003573 if (nested_svm_check_permissions(svm))
3574 return 1;
3575
3576 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003577 ret = kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Graf1371d902008-11-25 20:17:04 +01003578
Joerg Roedel2af91942009-08-07 11:49:28 +02003579 disable_gif(svm);
Alexander Graf1371d902008-11-25 20:17:04 +01003580
3581 /* After a CLGI no interrupts should come */
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05003582 if (!kvm_vcpu_apicv_active(&svm->vcpu)) {
3583 svm_clear_vintr(svm);
3584 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
3585 mark_dirty(svm->vmcb, VMCB_INTR);
3586 }
Joerg Roedeldecdbf62010-12-03 11:45:52 +01003587
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003588 return ret;
Alexander Graf1371d902008-11-25 20:17:04 +01003589}
3590
Avi Kivity851ba692009-08-24 11:10:17 +03003591static int invlpga_interception(struct vcpu_svm *svm)
Alexander Grafff092382009-06-15 15:21:24 +02003592{
3593 struct kvm_vcpu *vcpu = &svm->vcpu;
Alexander Grafff092382009-06-15 15:21:24 +02003594
David Kaplan668f1982015-02-20 16:02:10 -06003595 trace_kvm_invlpga(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RCX),
3596 kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedelec1ff792009-10-09 16:08:31 +02003597
Alexander Grafff092382009-06-15 15:21:24 +02003598 /* Let's treat INVLPGA the same as INVLPG (can be optimized!) */
David Kaplan668f1982015-02-20 16:02:10 -06003599 kvm_mmu_invlpg(vcpu, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Alexander Grafff092382009-06-15 15:21:24 +02003600
3601 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003602 return kvm_skip_emulated_instruction(&svm->vcpu);
Alexander Grafff092382009-06-15 15:21:24 +02003603}
3604
Joerg Roedel532a46b2009-10-09 16:08:32 +02003605static int skinit_interception(struct vcpu_svm *svm)
3606{
David Kaplan668f1982015-02-20 16:02:10 -06003607 trace_kvm_skinit(svm->vmcb->save.rip, kvm_register_read(&svm->vcpu, VCPU_REGS_RAX));
Joerg Roedel532a46b2009-10-09 16:08:32 +02003608
3609 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3610 return 1;
3611}
3612
David Kaplandab429a2015-03-02 13:43:37 -06003613static int wbinvd_interception(struct vcpu_svm *svm)
3614{
Kyle Huey6affcbe2016-11-29 12:40:40 -08003615 return kvm_emulate_wbinvd(&svm->vcpu);
David Kaplandab429a2015-03-02 13:43:37 -06003616}
3617
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003618static int xsetbv_interception(struct vcpu_svm *svm)
3619{
3620 u64 new_bv = kvm_read_edx_eax(&svm->vcpu);
3621 u32 index = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
3622
3623 if (kvm_set_xcr(&svm->vcpu, index, new_bv) == 0) {
3624 svm->next_rip = kvm_rip_read(&svm->vcpu) + 3;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003625 return kvm_skip_emulated_instruction(&svm->vcpu);
Joerg Roedel81dd35d2010-12-07 17:15:06 +01003626 }
3627
3628 return 1;
3629}
3630
Avi Kivity851ba692009-08-24 11:10:17 +03003631static int task_switch_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003632{
Izik Eidus37817f22008-03-24 23:14:53 +02003633 u16 tss_selector;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003634 int reason;
3635 int int_type = svm->vmcb->control.exit_int_info &
3636 SVM_EXITINTINFO_TYPE_MASK;
Gleb Natapov8317c292009-04-12 13:37:02 +03003637 int int_vec = svm->vmcb->control.exit_int_info & SVM_EVTINJ_VEC_MASK;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003638 uint32_t type =
3639 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_TYPE_MASK;
3640 uint32_t idt_v =
3641 svm->vmcb->control.exit_int_info & SVM_EXITINTINFO_VALID;
Jan Kiszkae269fb22010-04-14 15:51:09 +02003642 bool has_error_code = false;
3643 u32 error_code = 0;
Izik Eidus37817f22008-03-24 23:14:53 +02003644
3645 tss_selector = (u16)svm->vmcb->control.exit_info_1;
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003646
Izik Eidus37817f22008-03-24 23:14:53 +02003647 if (svm->vmcb->control.exit_info_2 &
3648 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_IRET))
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003649 reason = TASK_SWITCH_IRET;
3650 else if (svm->vmcb->control.exit_info_2 &
3651 (1ULL << SVM_EXITINFOSHIFT_TS_REASON_JMP))
3652 reason = TASK_SWITCH_JMP;
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003653 else if (idt_v)
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003654 reason = TASK_SWITCH_GATE;
3655 else
3656 reason = TASK_SWITCH_CALL;
3657
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003658 if (reason == TASK_SWITCH_GATE) {
3659 switch (type) {
3660 case SVM_EXITINTINFO_TYPE_NMI:
3661 svm->vcpu.arch.nmi_injected = false;
3662 break;
3663 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszkae269fb22010-04-14 15:51:09 +02003664 if (svm->vmcb->control.exit_info_2 &
3665 (1ULL << SVM_EXITINFOSHIFT_TS_HAS_ERROR_CODE)) {
3666 has_error_code = true;
3667 error_code =
3668 (u32)svm->vmcb->control.exit_info_2;
3669 }
Gleb Natapovfe8e7f82009-04-23 17:03:48 +03003670 kvm_clear_exception_queue(&svm->vcpu);
3671 break;
3672 case SVM_EXITINTINFO_TYPE_INTR:
3673 kvm_clear_interrupt_queue(&svm->vcpu);
3674 break;
3675 default:
3676 break;
3677 }
3678 }
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003679
Gleb Natapov8317c292009-04-12 13:37:02 +03003680 if (reason != TASK_SWITCH_GATE ||
3681 int_type == SVM_EXITINTINFO_TYPE_SOFT ||
3682 (int_type == SVM_EXITINTINFO_TYPE_EXEPT &&
Gleb Natapovf629cf82009-05-11 13:35:49 +03003683 (int_vec == OF_VECTOR || int_vec == BP_VECTOR)))
3684 skip_emulated_instruction(&svm->vcpu);
Gleb Natapov64a7ec02009-03-30 16:03:29 +03003685
Kevin Wolf7f3d35f2012-02-08 14:34:38 +01003686 if (int_type != SVM_EXITINTINFO_TYPE_SOFT)
3687 int_vec = -1;
3688
3689 if (kvm_task_switch(&svm->vcpu, tss_selector, int_vec, reason,
Gleb Natapovacb54512010-04-15 21:03:50 +03003690 has_error_code, error_code) == EMULATE_FAIL) {
3691 svm->vcpu.run->exit_reason = KVM_EXIT_INTERNAL_ERROR;
3692 svm->vcpu.run->internal.suberror = KVM_INTERNAL_ERROR_EMULATION;
3693 svm->vcpu.run->internal.ndata = 0;
3694 return 0;
3695 }
3696 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003697}
3698
Avi Kivity851ba692009-08-24 11:10:17 +03003699static int cpuid_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003700{
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03003701 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Kyle Huey6a908b62016-11-29 12:40:37 -08003702 return kvm_emulate_cpuid(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08003703}
3704
Avi Kivity851ba692009-08-24 11:10:17 +03003705static int iret_interception(struct vcpu_svm *svm)
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003706{
3707 ++svm->vcpu.stat.nmi_window_exits;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01003708 clr_intercept(svm, INTERCEPT_IRET);
Gleb Natapov44c11432009-05-11 13:35:52 +03003709 svm->vcpu.arch.hflags |= HF_IRET_MASK;
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02003710 svm->nmi_iret_rip = kvm_rip_read(&svm->vcpu);
Radim Krčmářf303b4c2014-01-17 20:52:42 +01003711 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03003712 return 1;
3713}
3714
Avi Kivity851ba692009-08-24 11:10:17 +03003715static int invlpg_interception(struct vcpu_svm *svm)
Marcelo Tosattia7052892008-09-23 13:18:35 -03003716{
Andre Przywaradf4f31082010-12-21 11:12:06 +01003717 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3718 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
3719
3720 kvm_mmu_invlpg(&svm->vcpu, svm->vmcb->control.exit_info_1);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003721 return kvm_skip_emulated_instruction(&svm->vcpu);
Marcelo Tosattia7052892008-09-23 13:18:35 -03003722}
3723
Avi Kivity851ba692009-08-24 11:10:17 +03003724static int emulate_on_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003725{
Andre Przywara51d8b662010-12-21 11:12:02 +01003726 return emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003727}
3728
Brijesh Singh7607b712018-02-19 10:14:44 -06003729static int rsm_interception(struct vcpu_svm *svm)
3730{
3731 return x86_emulate_instruction(&svm->vcpu, 0, 0,
3732 rsm_ins_bytes, 2) == EMULATE_DONE;
3733}
3734
Avi Kivity332b56e2011-11-10 14:57:24 +02003735static int rdpmc_interception(struct vcpu_svm *svm)
3736{
3737 int err;
3738
3739 if (!static_cpu_has(X86_FEATURE_NRIPS))
3740 return emulate_on_interception(svm);
3741
3742 err = kvm_rdpmc(&svm->vcpu);
Kyle Huey6affcbe2016-11-29 12:40:40 -08003743 return kvm_complete_insn_gp(&svm->vcpu, err);
Avi Kivity332b56e2011-11-10 14:57:24 +02003744}
3745
Xiubo Li52eb5a62015-03-13 17:39:45 +08003746static bool check_selective_cr0_intercepted(struct vcpu_svm *svm,
3747 unsigned long val)
Joerg Roedel628afd22011-04-04 12:39:36 +02003748{
3749 unsigned long cr0 = svm->vcpu.arch.cr0;
3750 bool ret = false;
3751 u64 intercept;
3752
3753 intercept = svm->nested.intercept;
3754
3755 if (!is_guest_mode(&svm->vcpu) ||
3756 (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0))))
3757 return false;
3758
3759 cr0 &= ~SVM_CR0_SELECTIVE_MASK;
3760 val &= ~SVM_CR0_SELECTIVE_MASK;
3761
3762 if (cr0 ^ val) {
3763 svm->vmcb->control.exit_code = SVM_EXIT_CR0_SEL_WRITE;
3764 ret = (nested_svm_exit_handled(svm) == NESTED_EXIT_DONE);
3765 }
3766
3767 return ret;
3768}
3769
Andre Przywara7ff76d52010-12-21 11:12:04 +01003770#define CR_VALID (1ULL << 63)
3771
3772static int cr_interception(struct vcpu_svm *svm)
3773{
3774 int reg, cr;
3775 unsigned long val;
3776 int err;
3777
3778 if (!static_cpu_has(X86_FEATURE_DECODEASSISTS))
3779 return emulate_on_interception(svm);
3780
3781 if (unlikely((svm->vmcb->control.exit_info_1 & CR_VALID) == 0))
3782 return emulate_on_interception(svm);
3783
3784 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
David Kaplan5e575182015-03-06 14:44:35 -06003785 if (svm->vmcb->control.exit_code == SVM_EXIT_CR0_SEL_WRITE)
3786 cr = SVM_EXIT_WRITE_CR0 - SVM_EXIT_READ_CR0;
3787 else
3788 cr = svm->vmcb->control.exit_code - SVM_EXIT_READ_CR0;
Andre Przywara7ff76d52010-12-21 11:12:04 +01003789
3790 err = 0;
3791 if (cr >= 16) { /* mov to cr */
3792 cr -= 16;
3793 val = kvm_register_read(&svm->vcpu, reg);
3794 switch (cr) {
3795 case 0:
Joerg Roedel628afd22011-04-04 12:39:36 +02003796 if (!check_selective_cr0_intercepted(svm, val))
3797 err = kvm_set_cr0(&svm->vcpu, val);
Joerg Roedel977b2d02011-04-18 11:42:52 +02003798 else
3799 return 1;
3800
Andre Przywara7ff76d52010-12-21 11:12:04 +01003801 break;
3802 case 3:
3803 err = kvm_set_cr3(&svm->vcpu, val);
3804 break;
3805 case 4:
3806 err = kvm_set_cr4(&svm->vcpu, val);
3807 break;
3808 case 8:
3809 err = kvm_set_cr8(&svm->vcpu, val);
3810 break;
3811 default:
3812 WARN(1, "unhandled write to CR%d", cr);
3813 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3814 return 1;
3815 }
3816 } else { /* mov from cr */
3817 switch (cr) {
3818 case 0:
3819 val = kvm_read_cr0(&svm->vcpu);
3820 break;
3821 case 2:
3822 val = svm->vcpu.arch.cr2;
3823 break;
3824 case 3:
Avi Kivity9f8fe502010-12-05 17:30:00 +02003825 val = kvm_read_cr3(&svm->vcpu);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003826 break;
3827 case 4:
3828 val = kvm_read_cr4(&svm->vcpu);
3829 break;
3830 case 8:
3831 val = kvm_get_cr8(&svm->vcpu);
3832 break;
3833 default:
3834 WARN(1, "unhandled read from CR%d", cr);
3835 kvm_queue_exception(&svm->vcpu, UD_VECTOR);
3836 return 1;
3837 }
3838 kvm_register_write(&svm->vcpu, reg, val);
3839 }
Kyle Huey6affcbe2016-11-29 12:40:40 -08003840 return kvm_complete_insn_gp(&svm->vcpu, err);
Andre Przywara7ff76d52010-12-21 11:12:04 +01003841}
3842
Andre Przywaracae37972010-12-21 11:12:05 +01003843static int dr_interception(struct vcpu_svm *svm)
3844{
3845 int reg, dr;
3846 unsigned long val;
Andre Przywaracae37972010-12-21 11:12:05 +01003847
Paolo Bonzinifacb0132014-02-21 10:32:27 +01003848 if (svm->vcpu.guest_debug == 0) {
3849 /*
3850 * No more DR vmexits; force a reload of the debug registers
3851 * and reenter on this instruction. The next vmexit will
3852 * retrieve the full state of the debug registers.
3853 */
3854 clr_dr_intercepts(svm);
3855 svm->vcpu.arch.switch_db_regs |= KVM_DEBUGREG_WONT_EXIT;
3856 return 1;
3857 }
3858
Andre Przywaracae37972010-12-21 11:12:05 +01003859 if (!boot_cpu_has(X86_FEATURE_DECODEASSISTS))
3860 return emulate_on_interception(svm);
3861
3862 reg = svm->vmcb->control.exit_info_1 & SVM_EXITINFO_REG_MASK;
3863 dr = svm->vmcb->control.exit_code - SVM_EXIT_READ_DR0;
3864
3865 if (dr >= 16) { /* mov to DRn */
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003866 if (!kvm_require_dr(&svm->vcpu, dr - 16))
3867 return 1;
Andre Przywaracae37972010-12-21 11:12:05 +01003868 val = kvm_register_read(&svm->vcpu, reg);
3869 kvm_set_dr(&svm->vcpu, dr - 16, val);
3870 } else {
Nadav Amit16f8a6f2014-10-03 01:10:05 +03003871 if (!kvm_require_dr(&svm->vcpu, dr))
3872 return 1;
3873 kvm_get_dr(&svm->vcpu, dr, &val);
3874 kvm_register_write(&svm->vcpu, reg, val);
Andre Przywaracae37972010-12-21 11:12:05 +01003875 }
3876
Ladi Prosekb742c1e2017-06-22 09:05:26 +02003877 return kvm_skip_emulated_instruction(&svm->vcpu);
Andre Przywaracae37972010-12-21 11:12:05 +01003878}
3879
Avi Kivity851ba692009-08-24 11:10:17 +03003880static int cr8_write_interception(struct vcpu_svm *svm)
Joerg Roedel1d075432007-12-06 21:02:25 +01003881{
Avi Kivity851ba692009-08-24 11:10:17 +03003882 struct kvm_run *kvm_run = svm->vcpu.run;
Andre Przywaraeea1cff2010-12-21 11:12:00 +01003883 int r;
Avi Kivity851ba692009-08-24 11:10:17 +03003884
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003885 u8 cr8_prev = kvm_get_cr8(&svm->vcpu);
3886 /* instruction emulation calls kvm_set_cr8() */
Andre Przywara7ff76d52010-12-21 11:12:04 +01003887 r = cr_interception(svm);
Paolo Bonzini35754c92015-07-29 12:05:37 +02003888 if (lapic_in_kernel(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003889 return r;
Gleb Natapov0a5fff192009-04-21 17:45:06 +03003890 if (cr8_prev <= kvm_get_cr8(&svm->vcpu))
Andre Przywara7ff76d52010-12-21 11:12:04 +01003891 return r;
Joerg Roedel1d075432007-12-06 21:02:25 +01003892 kvm_run->exit_reason = KVM_EXIT_SET_TPR;
3893 return 0;
3894}
3895
Tom Lendacky801e4592018-02-21 13:39:51 -06003896static int svm_get_msr_feature(struct kvm_msr_entry *msr)
3897{
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01003898 msr->data = 0;
3899
3900 switch (msr->index) {
3901 case MSR_F10H_DECFG:
3902 if (boot_cpu_has(X86_FEATURE_LFENCE_RDTSC))
3903 msr->data |= MSR_F10H_DECFG_LFENCE_SERIALIZE;
3904 break;
3905 default:
3906 return 1;
3907 }
3908
3909 return 0;
Tom Lendacky801e4592018-02-21 13:39:51 -06003910}
3911
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003912static int svm_get_msr(struct kvm_vcpu *vcpu, struct msr_data *msr_info)
Avi Kivity6aa8b732006-12-10 02:21:36 -08003913{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04003914 struct vcpu_svm *svm = to_svm(vcpu);
3915
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003916 switch (msr_info->index) {
Jaswinder Singh Rajputaf24a4e2009-05-15 18:42:05 +05303917 case MSR_IA32_TSC: {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003918 msr_info->data = svm->vmcb->control.tsc_offset +
Haozhong Zhang35181e82015-10-20 15:39:03 +08003919 kvm_scale_tsc(vcpu, rdtsc());
Joerg Roedelfbc0db72011-03-25 09:44:46 +01003920
Avi Kivity6aa8b732006-12-10 02:21:36 -08003921 break;
3922 }
Brian Gerst8c065852010-07-17 09:03:26 -04003923 case MSR_STAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003924 msr_info->data = svm->vmcb->save.star;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003925 break;
Avi Kivity0e859ca2006-12-22 01:05:08 -08003926#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08003927 case MSR_LSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003928 msr_info->data = svm->vmcb->save.lstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003929 break;
3930 case MSR_CSTAR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003931 msr_info->data = svm->vmcb->save.cstar;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003932 break;
3933 case MSR_KERNEL_GS_BASE:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003934 msr_info->data = svm->vmcb->save.kernel_gs_base;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003935 break;
3936 case MSR_SYSCALL_MASK:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003937 msr_info->data = svm->vmcb->save.sfmask;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003938 break;
3939#endif
3940 case MSR_IA32_SYSENTER_CS:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003941 msr_info->data = svm->vmcb->save.sysenter_cs;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003942 break;
3943 case MSR_IA32_SYSENTER_EIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003944 msr_info->data = svm->sysenter_eip;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003945 break;
3946 case MSR_IA32_SYSENTER_ESP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003947 msr_info->data = svm->sysenter_esp;
Avi Kivity6aa8b732006-12-10 02:21:36 -08003948 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01003949 case MSR_TSC_AUX:
3950 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
3951 return 1;
3952 msr_info->data = svm->tsc_aux;
3953 break;
Joerg Roedele0231712010-02-24 18:59:10 +01003954 /*
3955 * Nobody will change the following 5 values in the VMCB so we can
3956 * safely return them on rdmsr. They will always be 0 until LBRV is
3957 * implemented.
3958 */
Joerg Roedela2938c82008-02-13 16:30:28 +01003959 case MSR_IA32_DEBUGCTLMSR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003960 msr_info->data = svm->vmcb->save.dbgctl;
Joerg Roedela2938c82008-02-13 16:30:28 +01003961 break;
3962 case MSR_IA32_LASTBRANCHFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003963 msr_info->data = svm->vmcb->save.br_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003964 break;
3965 case MSR_IA32_LASTBRANCHTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003966 msr_info->data = svm->vmcb->save.br_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003967 break;
3968 case MSR_IA32_LASTINTFROMIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003969 msr_info->data = svm->vmcb->save.last_excp_from;
Joerg Roedela2938c82008-02-13 16:30:28 +01003970 break;
3971 case MSR_IA32_LASTINTTOIP:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003972 msr_info->data = svm->vmcb->save.last_excp_to;
Joerg Roedela2938c82008-02-13 16:30:28 +01003973 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003974 case MSR_VM_HSAVE_PA:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003975 msr_info->data = svm->nested.hsave_msr;
Alexander Grafb286d5d2008-11-25 20:17:05 +01003976 break;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003977 case MSR_VM_CR:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02003978 msr_info->data = svm->nested.vm_cr_msr;
Joerg Roedeleb6f3022008-11-25 20:17:09 +01003979 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01003980 case MSR_IA32_SPEC_CTRL:
3981 if (!msr_info->host_initiated &&
3982 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
3983 return 1;
3984
3985 msr_info->data = svm->spec_ctrl;
3986 break;
Borislav Petkovae8b7872015-11-23 11:12:23 +01003987 case MSR_F15H_IC_CFG: {
3988
3989 int family, model;
3990
3991 family = guest_cpuid_family(vcpu);
3992 model = guest_cpuid_model(vcpu);
3993
3994 if (family < 0 || model < 0)
3995 return kvm_get_msr_common(vcpu, msr_info);
3996
3997 msr_info->data = 0;
3998
3999 if (family == 0x15 &&
4000 (model >= 0x2 && model < 0x20))
4001 msr_info->data = 0x1E;
4002 }
4003 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01004004 case MSR_F10H_DECFG:
4005 msr_info->data = svm->msr_decfg;
4006 break;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004007 default:
Paolo Bonzini609e36d2015-04-08 15:30:38 +02004008 return kvm_get_msr_common(vcpu, msr_info);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004009 }
4010 return 0;
4011}
4012
Avi Kivity851ba692009-08-24 11:10:17 +03004013static int rdmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004014{
David Kaplan668f1982015-02-20 16:02:10 -06004015 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
Paolo Bonzini609e36d2015-04-08 15:30:38 +02004016 struct msr_data msr_info;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004017
Paolo Bonzini609e36d2015-04-08 15:30:38 +02004018 msr_info.index = ecx;
4019 msr_info.host_initiated = false;
4020 if (svm_get_msr(&svm->vcpu, &msr_info)) {
Avi Kivity59200272010-01-25 19:47:02 +02004021 trace_kvm_msr_read_ex(ecx);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004022 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004023 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02004024 } else {
Paolo Bonzini609e36d2015-04-08 15:30:38 +02004025 trace_kvm_msr_read(ecx, msr_info.data);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004026
Paolo Bonzini609e36d2015-04-08 15:30:38 +02004027 kvm_register_write(&svm->vcpu, VCPU_REGS_RAX,
4028 msr_info.data & 0xffffffff);
4029 kvm_register_write(&svm->vcpu, VCPU_REGS_RDX,
4030 msr_info.data >> 32);
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004031 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004032 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004033 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004034}
4035
Joerg Roedel4a810182010-02-24 18:59:15 +01004036static int svm_set_vm_cr(struct kvm_vcpu *vcpu, u64 data)
4037{
4038 struct vcpu_svm *svm = to_svm(vcpu);
4039 int svm_dis, chg_mask;
4040
4041 if (data & ~SVM_VM_CR_VALID_MASK)
4042 return 1;
4043
4044 chg_mask = SVM_VM_CR_VALID_MASK;
4045
4046 if (svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK)
4047 chg_mask &= ~(SVM_VM_CR_SVM_LOCK_MASK | SVM_VM_CR_SVM_DIS_MASK);
4048
4049 svm->nested.vm_cr_msr &= ~chg_mask;
4050 svm->nested.vm_cr_msr |= (data & chg_mask);
4051
4052 svm_dis = svm->nested.vm_cr_msr & SVM_VM_CR_SVM_DIS_MASK;
4053
4054 /* check for svm_disable while efer.svme is set */
4055 if (svm_dis && (vcpu->arch.efer & EFER_SVME))
4056 return 1;
4057
4058 return 0;
4059}
4060
Will Auld8fe8ab42012-11-29 12:42:12 -08004061static int svm_set_msr(struct kvm_vcpu *vcpu, struct msr_data *msr)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004062{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004063 struct vcpu_svm *svm = to_svm(vcpu);
4064
Will Auld8fe8ab42012-11-29 12:42:12 -08004065 u32 ecx = msr->index;
4066 u64 data = msr->data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004067 switch (ecx) {
Paolo Bonzini15038e12017-10-26 09:13:27 +02004068 case MSR_IA32_CR_PAT:
4069 if (!kvm_mtrr_valid(vcpu, MSR_IA32_CR_PAT, data))
4070 return 1;
4071 vcpu->arch.pat = data;
4072 svm->vmcb->save.g_pat = data;
4073 mark_dirty(svm->vmcb, VMCB_NPT);
4074 break;
Zachary Amsdenf4e1b3c2010-08-19 22:07:16 -10004075 case MSR_IA32_TSC:
Will Auld8fe8ab42012-11-29 12:42:12 -08004076 kvm_write_tsc(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004077 break;
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01004078 case MSR_IA32_SPEC_CTRL:
4079 if (!msr->host_initiated &&
4080 !guest_cpuid_has(vcpu, X86_FEATURE_IBRS))
4081 return 1;
4082
4083 /* The STIBP bit doesn't fault even if it's not advertised */
4084 if (data & ~(SPEC_CTRL_IBRS | SPEC_CTRL_STIBP))
4085 return 1;
4086
4087 svm->spec_ctrl = data;
4088
4089 if (!data)
4090 break;
4091
4092 /*
4093 * For non-nested:
4094 * When it's written (to non-zero) for the first time, pass
4095 * it through.
4096 *
4097 * For nested:
4098 * The handling of the MSR bitmap for L2 guests is done in
4099 * nested_svm_vmrun_msrpm.
4100 * We update the L1 MSR bit as well since it will end up
4101 * touching the MSR anyway now.
4102 */
4103 set_msr_interception(svm->msrpm, MSR_IA32_SPEC_CTRL, 1, 1);
4104 break;
Ashok Raj15d45072018-02-01 22:59:43 +01004105 case MSR_IA32_PRED_CMD:
4106 if (!msr->host_initiated &&
4107 !guest_cpuid_has(vcpu, X86_FEATURE_IBPB))
4108 return 1;
4109
4110 if (data & ~PRED_CMD_IBPB)
4111 return 1;
4112
4113 if (!data)
4114 break;
4115
4116 wrmsrl(MSR_IA32_PRED_CMD, PRED_CMD_IBPB);
4117 if (is_guest_mode(vcpu))
4118 break;
4119 set_msr_interception(svm->msrpm, MSR_IA32_PRED_CMD, 0, 1);
4120 break;
Brian Gerst8c065852010-07-17 09:03:26 -04004121 case MSR_STAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004122 svm->vmcb->save.star = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004123 break;
Robert P. J. Day49b14f22007-01-29 13:19:50 -08004124#ifdef CONFIG_X86_64
Avi Kivity6aa8b732006-12-10 02:21:36 -08004125 case MSR_LSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004126 svm->vmcb->save.lstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004127 break;
4128 case MSR_CSTAR:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004129 svm->vmcb->save.cstar = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004130 break;
4131 case MSR_KERNEL_GS_BASE:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004132 svm->vmcb->save.kernel_gs_base = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004133 break;
4134 case MSR_SYSCALL_MASK:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004135 svm->vmcb->save.sfmask = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004136 break;
4137#endif
4138 case MSR_IA32_SYSENTER_CS:
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004139 svm->vmcb->save.sysenter_cs = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004140 break;
4141 case MSR_IA32_SYSENTER_EIP:
Andre Przywara017cb992009-05-28 11:56:31 +02004142 svm->sysenter_eip = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004143 svm->vmcb->save.sysenter_eip = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004144 break;
4145 case MSR_IA32_SYSENTER_ESP:
Andre Przywara017cb992009-05-28 11:56:31 +02004146 svm->sysenter_esp = data;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004147 svm->vmcb->save.sysenter_esp = data;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004148 break;
Paolo Bonzini46896c72015-11-12 14:49:16 +01004149 case MSR_TSC_AUX:
4150 if (!boot_cpu_has(X86_FEATURE_RDTSCP))
4151 return 1;
4152
4153 /*
4154 * This is rare, so we update the MSR here instead of using
4155 * direct_access_msrs. Doing that would require a rdmsr in
4156 * svm_vcpu_put.
4157 */
4158 svm->tsc_aux = data;
4159 wrmsrl(MSR_TSC_AUX, svm->tsc_aux);
4160 break;
Joerg Roedela2938c82008-02-13 16:30:28 +01004161 case MSR_IA32_DEBUGCTLMSR:
Avi Kivity2a6b20b2010-11-09 16:15:42 +02004162 if (!boot_cpu_has(X86_FEATURE_LBRV)) {
Christoffer Dalla737f252012-06-03 21:17:48 +03004163 vcpu_unimpl(vcpu, "%s: MSR_IA32_DEBUGCTL 0x%llx, nop\n",
4164 __func__, data);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004165 break;
4166 }
4167 if (data & DEBUGCTL_RESERVED_BITS)
4168 return 1;
4169
4170 svm->vmcb->save.dbgctl = data;
Joerg Roedelb53ba3f2010-12-03 11:45:59 +01004171 mark_dirty(svm->vmcb, VMCB_LBR);
Joerg Roedel24e09cb2008-02-13 18:58:47 +01004172 if (data & (1ULL<<0))
4173 svm_enable_lbrv(svm);
4174 else
4175 svm_disable_lbrv(svm);
Joerg Roedela2938c82008-02-13 16:30:28 +01004176 break;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004177 case MSR_VM_HSAVE_PA:
Joerg Roedele6aa9ab2009-08-07 11:49:33 +02004178 svm->nested.hsave_msr = data;
Alexander Grafb286d5d2008-11-25 20:17:05 +01004179 break;
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004180 case MSR_VM_CR:
Joerg Roedel4a810182010-02-24 18:59:15 +01004181 return svm_set_vm_cr(vcpu, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004182 case MSR_VM_IGNNE:
Christoffer Dalla737f252012-06-03 21:17:48 +03004183 vcpu_unimpl(vcpu, "unimplemented wrmsr: 0x%x data 0x%llx\n", ecx, data);
Alexander Graf3c5d0a42009-06-15 15:21:23 +02004184 break;
Tom Lendackyd1d93fa2018-02-24 00:18:20 +01004185 case MSR_F10H_DECFG: {
4186 struct kvm_msr_entry msr_entry;
4187
4188 msr_entry.index = msr->index;
4189 if (svm_get_msr_feature(&msr_entry))
4190 return 1;
4191
4192 /* Check the supported bits */
4193 if (data & ~msr_entry.data)
4194 return 1;
4195
4196 /* Don't allow the guest to change a bit, #GP */
4197 if (!msr->host_initiated && (data ^ msr_entry.data))
4198 return 1;
4199
4200 svm->msr_decfg = data;
4201 break;
4202 }
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004203 case MSR_IA32_APICBASE:
4204 if (kvm_vcpu_apicv_active(vcpu))
4205 avic_update_vapic_bar(to_svm(vcpu), data);
4206 /* Follow through */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004207 default:
Will Auld8fe8ab42012-11-29 12:42:12 -08004208 return kvm_set_msr_common(vcpu, msr);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004209 }
4210 return 0;
4211}
4212
Avi Kivity851ba692009-08-24 11:10:17 +03004213static int wrmsr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004214{
Will Auld8fe8ab42012-11-29 12:42:12 -08004215 struct msr_data msr;
David Kaplan668f1982015-02-20 16:02:10 -06004216 u32 ecx = kvm_register_read(&svm->vcpu, VCPU_REGS_RCX);
4217 u64 data = kvm_read_edx_eax(&svm->vcpu);
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004218
Will Auld8fe8ab42012-11-29 12:42:12 -08004219 msr.data = data;
4220 msr.index = ecx;
4221 msr.host_initiated = false;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004222
Marcelo Tosatti5fdbf972008-06-27 14:58:02 -03004223 svm->next_rip = kvm_rip_read(&svm->vcpu) + 2;
Nadav Amit854e8bb2014-09-16 03:24:05 +03004224 if (kvm_set_msr(&svm->vcpu, &msr)) {
Avi Kivity59200272010-01-25 19:47:02 +02004225 trace_kvm_msr_write_ex(ecx, data);
Avi Kivityc1a5d4f2007-11-25 14:12:03 +02004226 kvm_inject_gp(&svm->vcpu, 0);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004227 return 1;
Avi Kivity59200272010-01-25 19:47:02 +02004228 } else {
4229 trace_kvm_msr_write(ecx, data);
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004230 return kvm_skip_emulated_instruction(&svm->vcpu);
Avi Kivity59200272010-01-25 19:47:02 +02004231 }
Avi Kivity6aa8b732006-12-10 02:21:36 -08004232}
4233
Avi Kivity851ba692009-08-24 11:10:17 +03004234static int msr_interception(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004235{
Rusty Russelle756fc62007-07-30 20:07:08 +10004236 if (svm->vmcb->control.exit_info_1)
Avi Kivity851ba692009-08-24 11:10:17 +03004237 return wrmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004238 else
Avi Kivity851ba692009-08-24 11:10:17 +03004239 return rdmsr_interception(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004240}
4241
Avi Kivity851ba692009-08-24 11:10:17 +03004242static int interrupt_window_interception(struct vcpu_svm *svm)
Dor Laorc1150d82007-01-05 16:36:24 -08004243{
Avi Kivity3842d132010-07-27 12:30:24 +03004244 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
Alexander Graff0b85052008-11-25 20:17:01 +01004245 svm_clear_vintr(svm);
Eddie Dong85f455f2007-07-06 12:20:49 +03004246 svm->vmcb->control.int_ctl &= ~V_IRQ_MASK;
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004247 mark_dirty(svm->vmcb, VMCB_INTR);
Jason Wang675acb72012-03-08 18:07:56 +08004248 ++svm->vcpu.stat.irq_window_exits;
Dor Laorc1150d82007-01-05 16:36:24 -08004249 return 1;
4250}
4251
Mark Langsdorf565d0992009-10-06 14:25:02 -05004252static int pause_interception(struct vcpu_svm *svm)
4253{
Longpeng(Mike)de63ad42017-08-08 12:05:33 +08004254 struct kvm_vcpu *vcpu = &svm->vcpu;
4255 bool in_kernel = (svm_get_cpl(vcpu) == 0);
4256
4257 kvm_vcpu_on_spin(vcpu, in_kernel);
Mark Langsdorf565d0992009-10-06 14:25:02 -05004258 return 1;
4259}
4260
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004261static int nop_interception(struct vcpu_svm *svm)
4262{
Ladi Prosekb742c1e2017-06-22 09:05:26 +02004263 return kvm_skip_emulated_instruction(&(svm->vcpu));
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004264}
4265
4266static int monitor_interception(struct vcpu_svm *svm)
4267{
4268 printk_once(KERN_WARNING "kvm: MONITOR instruction emulated as NOP!\n");
4269 return nop_interception(svm);
4270}
4271
4272static int mwait_interception(struct vcpu_svm *svm)
4273{
4274 printk_once(KERN_WARNING "kvm: MWAIT instruction emulated as NOP!\n");
4275 return nop_interception(svm);
4276}
4277
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004278enum avic_ipi_failure_cause {
4279 AVIC_IPI_FAILURE_INVALID_INT_TYPE,
4280 AVIC_IPI_FAILURE_TARGET_NOT_RUNNING,
4281 AVIC_IPI_FAILURE_INVALID_TARGET,
4282 AVIC_IPI_FAILURE_INVALID_BACKING_PAGE,
4283};
4284
4285static int avic_incomplete_ipi_interception(struct vcpu_svm *svm)
4286{
4287 u32 icrh = svm->vmcb->control.exit_info_1 >> 32;
4288 u32 icrl = svm->vmcb->control.exit_info_1;
4289 u32 id = svm->vmcb->control.exit_info_2 >> 32;
Dan Carpenter5446a972016-05-23 13:20:10 +03004290 u32 index = svm->vmcb->control.exit_info_2 & 0xFF;
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004291 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4292
4293 trace_kvm_avic_incomplete_ipi(svm->vcpu.vcpu_id, icrh, icrl, id, index);
4294
4295 switch (id) {
4296 case AVIC_IPI_FAILURE_INVALID_INT_TYPE:
4297 /*
4298 * AVIC hardware handles the generation of
4299 * IPIs when the specified Message Type is Fixed
4300 * (also known as fixed delivery mode) and
4301 * the Trigger Mode is edge-triggered. The hardware
4302 * also supports self and broadcast delivery modes
4303 * specified via the Destination Shorthand(DSH)
4304 * field of the ICRL. Logical and physical APIC ID
4305 * formats are supported. All other IPI types cause
4306 * a #VMEXIT, which needs to emulated.
4307 */
4308 kvm_lapic_reg_write(apic, APIC_ICR2, icrh);
4309 kvm_lapic_reg_write(apic, APIC_ICR, icrl);
4310 break;
4311 case AVIC_IPI_FAILURE_TARGET_NOT_RUNNING: {
4312 int i;
4313 struct kvm_vcpu *vcpu;
4314 struct kvm *kvm = svm->vcpu.kvm;
4315 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4316
4317 /*
4318 * At this point, we expect that the AVIC HW has already
4319 * set the appropriate IRR bits on the valid target
4320 * vcpus. So, we just need to kick the appropriate vcpu.
4321 */
4322 kvm_for_each_vcpu(i, vcpu, kvm) {
4323 bool m = kvm_apic_match_dest(vcpu, apic,
4324 icrl & KVM_APIC_SHORT_MASK,
4325 GET_APIC_DEST_FIELD(icrh),
4326 icrl & KVM_APIC_DEST_MASK);
4327
4328 if (m && !avic_vcpu_is_running(vcpu))
4329 kvm_vcpu_wake_up(vcpu);
4330 }
4331 break;
4332 }
4333 case AVIC_IPI_FAILURE_INVALID_TARGET:
4334 break;
4335 case AVIC_IPI_FAILURE_INVALID_BACKING_PAGE:
4336 WARN_ONCE(1, "Invalid backing page\n");
4337 break;
4338 default:
4339 pr_err("Unknown IPI interception\n");
4340 }
4341
4342 return 1;
4343}
4344
4345static u32 *avic_get_logical_id_entry(struct kvm_vcpu *vcpu, u32 ldr, bool flat)
4346{
4347 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4348 int index;
4349 u32 *logical_apic_id_table;
4350 int dlid = GET_APIC_LOGICAL_ID(ldr);
4351
4352 if (!dlid)
4353 return NULL;
4354
4355 if (flat) { /* flat */
4356 index = ffs(dlid) - 1;
4357 if (index > 7)
4358 return NULL;
4359 } else { /* cluster */
4360 int cluster = (dlid & 0xf0) >> 4;
4361 int apic = ffs(dlid & 0x0f) - 1;
4362
4363 if ((apic < 0) || (apic > 7) ||
4364 (cluster >= 0xf))
4365 return NULL;
4366 index = (cluster << 2) + apic;
4367 }
4368
4369 logical_apic_id_table = (u32 *) page_address(vm_data->avic_logical_id_table_page);
4370
4371 return &logical_apic_id_table[index];
4372}
4373
4374static int avic_ldr_write(struct kvm_vcpu *vcpu, u8 g_physical_id, u32 ldr,
4375 bool valid)
4376{
4377 bool flat;
4378 u32 *entry, new_entry;
4379
4380 flat = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR) == APIC_DFR_FLAT;
4381 entry = avic_get_logical_id_entry(vcpu, ldr, flat);
4382 if (!entry)
4383 return -EINVAL;
4384
4385 new_entry = READ_ONCE(*entry);
4386 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK;
4387 new_entry |= (g_physical_id & AVIC_LOGICAL_ID_ENTRY_GUEST_PHYSICAL_ID_MASK);
4388 if (valid)
4389 new_entry |= AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4390 else
4391 new_entry &= ~AVIC_LOGICAL_ID_ENTRY_VALID_MASK;
4392 WRITE_ONCE(*entry, new_entry);
4393
4394 return 0;
4395}
4396
4397static int avic_handle_ldr_update(struct kvm_vcpu *vcpu)
4398{
4399 int ret;
4400 struct vcpu_svm *svm = to_svm(vcpu);
4401 u32 ldr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_LDR);
4402
4403 if (!ldr)
4404 return 1;
4405
4406 ret = avic_ldr_write(vcpu, vcpu->vcpu_id, ldr, true);
4407 if (ret && svm->ldr_reg) {
4408 avic_ldr_write(vcpu, 0, svm->ldr_reg, false);
4409 svm->ldr_reg = 0;
4410 } else {
4411 svm->ldr_reg = ldr;
4412 }
4413 return ret;
4414}
4415
4416static int avic_handle_apic_id_update(struct kvm_vcpu *vcpu)
4417{
4418 u64 *old, *new;
4419 struct vcpu_svm *svm = to_svm(vcpu);
4420 u32 apic_id_reg = kvm_lapic_get_reg(vcpu->arch.apic, APIC_ID);
4421 u32 id = (apic_id_reg >> 24) & 0xff;
4422
4423 if (vcpu->vcpu_id == id)
4424 return 0;
4425
4426 old = avic_get_physical_id_entry(vcpu, vcpu->vcpu_id);
4427 new = avic_get_physical_id_entry(vcpu, id);
4428 if (!new || !old)
4429 return 1;
4430
4431 /* We need to move physical_id_entry to new offset */
4432 *new = *old;
4433 *old = 0ULL;
4434 to_svm(vcpu)->avic_physical_id_cache = new;
4435
4436 /*
4437 * Also update the guest physical APIC ID in the logical
4438 * APIC ID table entry if already setup the LDR.
4439 */
4440 if (svm->ldr_reg)
4441 avic_handle_ldr_update(vcpu);
4442
4443 return 0;
4444}
4445
4446static int avic_handle_dfr_update(struct kvm_vcpu *vcpu)
4447{
4448 struct vcpu_svm *svm = to_svm(vcpu);
4449 struct kvm_arch *vm_data = &vcpu->kvm->arch;
4450 u32 dfr = kvm_lapic_get_reg(vcpu->arch.apic, APIC_DFR);
4451 u32 mod = (dfr >> 28) & 0xf;
4452
4453 /*
4454 * We assume that all local APICs are using the same type.
4455 * If this changes, we need to flush the AVIC logical
4456 * APID id table.
4457 */
4458 if (vm_data->ldr_mode == mod)
4459 return 0;
4460
4461 clear_page(page_address(vm_data->avic_logical_id_table_page));
4462 vm_data->ldr_mode = mod;
4463
4464 if (svm->ldr_reg)
4465 avic_handle_ldr_update(vcpu);
4466 return 0;
4467}
4468
4469static int avic_unaccel_trap_write(struct vcpu_svm *svm)
4470{
4471 struct kvm_lapic *apic = svm->vcpu.arch.apic;
4472 u32 offset = svm->vmcb->control.exit_info_1 &
4473 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4474
4475 switch (offset) {
4476 case APIC_ID:
4477 if (avic_handle_apic_id_update(&svm->vcpu))
4478 return 0;
4479 break;
4480 case APIC_LDR:
4481 if (avic_handle_ldr_update(&svm->vcpu))
4482 return 0;
4483 break;
4484 case APIC_DFR:
4485 avic_handle_dfr_update(&svm->vcpu);
4486 break;
4487 default:
4488 break;
4489 }
4490
4491 kvm_lapic_reg_write(apic, offset, kvm_lapic_get_reg(apic, offset));
4492
4493 return 1;
4494}
4495
4496static bool is_avic_unaccelerated_access_trap(u32 offset)
4497{
4498 bool ret = false;
4499
4500 switch (offset) {
4501 case APIC_ID:
4502 case APIC_EOI:
4503 case APIC_RRR:
4504 case APIC_LDR:
4505 case APIC_DFR:
4506 case APIC_SPIV:
4507 case APIC_ESR:
4508 case APIC_ICR:
4509 case APIC_LVTT:
4510 case APIC_LVTTHMR:
4511 case APIC_LVTPC:
4512 case APIC_LVT0:
4513 case APIC_LVT1:
4514 case APIC_LVTERR:
4515 case APIC_TMICT:
4516 case APIC_TDCR:
4517 ret = true;
4518 break;
4519 default:
4520 break;
4521 }
4522 return ret;
4523}
4524
4525static int avic_unaccelerated_access_interception(struct vcpu_svm *svm)
4526{
4527 int ret = 0;
4528 u32 offset = svm->vmcb->control.exit_info_1 &
4529 AVIC_UNACCEL_ACCESS_OFFSET_MASK;
4530 u32 vector = svm->vmcb->control.exit_info_2 &
4531 AVIC_UNACCEL_ACCESS_VECTOR_MASK;
4532 bool write = (svm->vmcb->control.exit_info_1 >> 32) &
4533 AVIC_UNACCEL_ACCESS_WRITE_MASK;
4534 bool trap = is_avic_unaccelerated_access_trap(offset);
4535
4536 trace_kvm_avic_unaccelerated_access(svm->vcpu.vcpu_id, offset,
4537 trap, write, vector);
4538 if (trap) {
4539 /* Handling Trap */
4540 WARN_ONCE(!write, "svm: Handling trap read.\n");
4541 ret = avic_unaccel_trap_write(svm);
4542 } else {
4543 /* Handling Fault */
4544 ret = (emulate_instruction(&svm->vcpu, 0) == EMULATE_DONE);
4545 }
4546
4547 return ret;
4548}
4549
Mathias Krause09941fb2012-08-30 01:30:20 +02004550static int (*const svm_exit_handlers[])(struct vcpu_svm *svm) = {
Andre Przywara7ff76d52010-12-21 11:12:04 +01004551 [SVM_EXIT_READ_CR0] = cr_interception,
4552 [SVM_EXIT_READ_CR3] = cr_interception,
4553 [SVM_EXIT_READ_CR4] = cr_interception,
4554 [SVM_EXIT_READ_CR8] = cr_interception,
David Kaplan5e575182015-03-06 14:44:35 -06004555 [SVM_EXIT_CR0_SEL_WRITE] = cr_interception,
Joerg Roedel628afd22011-04-04 12:39:36 +02004556 [SVM_EXIT_WRITE_CR0] = cr_interception,
Andre Przywara7ff76d52010-12-21 11:12:04 +01004557 [SVM_EXIT_WRITE_CR3] = cr_interception,
4558 [SVM_EXIT_WRITE_CR4] = cr_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004559 [SVM_EXIT_WRITE_CR8] = cr8_write_interception,
Andre Przywaracae37972010-12-21 11:12:05 +01004560 [SVM_EXIT_READ_DR0] = dr_interception,
4561 [SVM_EXIT_READ_DR1] = dr_interception,
4562 [SVM_EXIT_READ_DR2] = dr_interception,
4563 [SVM_EXIT_READ_DR3] = dr_interception,
4564 [SVM_EXIT_READ_DR4] = dr_interception,
4565 [SVM_EXIT_READ_DR5] = dr_interception,
4566 [SVM_EXIT_READ_DR6] = dr_interception,
4567 [SVM_EXIT_READ_DR7] = dr_interception,
4568 [SVM_EXIT_WRITE_DR0] = dr_interception,
4569 [SVM_EXIT_WRITE_DR1] = dr_interception,
4570 [SVM_EXIT_WRITE_DR2] = dr_interception,
4571 [SVM_EXIT_WRITE_DR3] = dr_interception,
4572 [SVM_EXIT_WRITE_DR4] = dr_interception,
4573 [SVM_EXIT_WRITE_DR5] = dr_interception,
4574 [SVM_EXIT_WRITE_DR6] = dr_interception,
4575 [SVM_EXIT_WRITE_DR7] = dr_interception,
Jan Kiszkad0bfb942008-12-15 13:52:10 +01004576 [SVM_EXIT_EXCP_BASE + DB_VECTOR] = db_interception,
4577 [SVM_EXIT_EXCP_BASE + BP_VECTOR] = bp_interception,
Anthony Liguori7aa81cc2007-09-17 14:57:50 -05004578 [SVM_EXIT_EXCP_BASE + UD_VECTOR] = ud_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004579 [SVM_EXIT_EXCP_BASE + PF_VECTOR] = pf_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004580 [SVM_EXIT_EXCP_BASE + MC_VECTOR] = mc_interception,
Eric Northup54a20552015-11-03 18:03:53 +01004581 [SVM_EXIT_EXCP_BASE + AC_VECTOR] = ac_interception,
Liran Alon97184202018-03-12 13:12:52 +02004582 [SVM_EXIT_EXCP_BASE + GP_VECTOR] = gp_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004583 [SVM_EXIT_INTR] = intr_interception,
Joerg Roedelc47f0982008-04-30 17:56:00 +02004584 [SVM_EXIT_NMI] = nmi_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004585 [SVM_EXIT_SMI] = nop_on_interception,
4586 [SVM_EXIT_INIT] = nop_on_interception,
Dor Laorc1150d82007-01-05 16:36:24 -08004587 [SVM_EXIT_VINTR] = interrupt_window_interception,
Avi Kivity332b56e2011-11-10 14:57:24 +02004588 [SVM_EXIT_RDPMC] = rdpmc_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004589 [SVM_EXIT_CPUID] = cpuid_interception,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004590 [SVM_EXIT_IRET] = iret_interception,
Avi Kivitycf5a94d2007-10-28 16:11:58 +02004591 [SVM_EXIT_INVD] = emulate_on_interception,
Mark Langsdorf565d0992009-10-06 14:25:02 -05004592 [SVM_EXIT_PAUSE] = pause_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004593 [SVM_EXIT_HLT] = halt_interception,
Marcelo Tosattia7052892008-09-23 13:18:35 -03004594 [SVM_EXIT_INVLPG] = invlpg_interception,
Alexander Grafff092382009-06-15 15:21:24 +02004595 [SVM_EXIT_INVLPGA] = invlpga_interception,
Joerg Roedele0231712010-02-24 18:59:10 +01004596 [SVM_EXIT_IOIO] = io_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004597 [SVM_EXIT_MSR] = msr_interception,
4598 [SVM_EXIT_TASK_SWITCH] = task_switch_interception,
Joerg Roedel46fe4dd2007-01-26 00:56:42 -08004599 [SVM_EXIT_SHUTDOWN] = shutdown_interception,
Alexander Graf3d6368e2008-11-25 20:17:07 +01004600 [SVM_EXIT_VMRUN] = vmrun_interception,
Avi Kivity02e235b2007-02-19 14:37:47 +02004601 [SVM_EXIT_VMMCALL] = vmmcall_interception,
Alexander Graf55426752008-11-25 20:17:06 +01004602 [SVM_EXIT_VMLOAD] = vmload_interception,
4603 [SVM_EXIT_VMSAVE] = vmsave_interception,
Alexander Graf1371d902008-11-25 20:17:04 +01004604 [SVM_EXIT_STGI] = stgi_interception,
4605 [SVM_EXIT_CLGI] = clgi_interception,
Joerg Roedel532a46b2009-10-09 16:08:32 +02004606 [SVM_EXIT_SKINIT] = skinit_interception,
David Kaplandab429a2015-03-02 13:43:37 -06004607 [SVM_EXIT_WBINVD] = wbinvd_interception,
Gabriel L. Somlo87c00572014-05-07 16:52:13 -04004608 [SVM_EXIT_MONITOR] = monitor_interception,
4609 [SVM_EXIT_MWAIT] = mwait_interception,
Joerg Roedel81dd35d2010-12-07 17:15:06 +01004610 [SVM_EXIT_XSETBV] = xsetbv_interception,
Paolo Bonzinid0006532017-08-11 18:36:43 +02004611 [SVM_EXIT_NPF] = npf_interception,
Brijesh Singh7607b712018-02-19 10:14:44 -06004612 [SVM_EXIT_RSM] = rsm_interception,
Suravee Suthikulpanit18f40c52016-05-04 14:09:48 -05004613 [SVM_EXIT_AVIC_INCOMPLETE_IPI] = avic_incomplete_ipi_interception,
4614 [SVM_EXIT_AVIC_UNACCELERATED_ACCESS] = avic_unaccelerated_access_interception,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004615};
4616
Joe Perchesae8cc052011-04-24 22:00:50 -07004617static void dump_vmcb(struct kvm_vcpu *vcpu)
Joerg Roedel3f10c842010-05-05 16:04:42 +02004618{
4619 struct vcpu_svm *svm = to_svm(vcpu);
4620 struct vmcb_control_area *control = &svm->vmcb->control;
4621 struct vmcb_save_area *save = &svm->vmcb->save;
4622
4623 pr_err("VMCB Control Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004624 pr_err("%-20s%04x\n", "cr_read:", control->intercept_cr & 0xffff);
4625 pr_err("%-20s%04x\n", "cr_write:", control->intercept_cr >> 16);
4626 pr_err("%-20s%04x\n", "dr_read:", control->intercept_dr & 0xffff);
4627 pr_err("%-20s%04x\n", "dr_write:", control->intercept_dr >> 16);
4628 pr_err("%-20s%08x\n", "exceptions:", control->intercept_exceptions);
4629 pr_err("%-20s%016llx\n", "intercepts:", control->intercept);
4630 pr_err("%-20s%d\n", "pause filter count:", control->pause_filter_count);
4631 pr_err("%-20s%016llx\n", "iopm_base_pa:", control->iopm_base_pa);
4632 pr_err("%-20s%016llx\n", "msrpm_base_pa:", control->msrpm_base_pa);
4633 pr_err("%-20s%016llx\n", "tsc_offset:", control->tsc_offset);
4634 pr_err("%-20s%d\n", "asid:", control->asid);
4635 pr_err("%-20s%d\n", "tlb_ctl:", control->tlb_ctl);
4636 pr_err("%-20s%08x\n", "int_ctl:", control->int_ctl);
4637 pr_err("%-20s%08x\n", "int_vector:", control->int_vector);
4638 pr_err("%-20s%08x\n", "int_state:", control->int_state);
4639 pr_err("%-20s%08x\n", "exit_code:", control->exit_code);
4640 pr_err("%-20s%016llx\n", "exit_info1:", control->exit_info_1);
4641 pr_err("%-20s%016llx\n", "exit_info2:", control->exit_info_2);
4642 pr_err("%-20s%08x\n", "exit_int_info:", control->exit_int_info);
4643 pr_err("%-20s%08x\n", "exit_int_info_err:", control->exit_int_info_err);
4644 pr_err("%-20s%lld\n", "nested_ctl:", control->nested_ctl);
4645 pr_err("%-20s%016llx\n", "nested_cr3:", control->nested_cr3);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004646 pr_err("%-20s%016llx\n", "avic_vapic_bar:", control->avic_vapic_bar);
Joe Perchesae8cc052011-04-24 22:00:50 -07004647 pr_err("%-20s%08x\n", "event_inj:", control->event_inj);
4648 pr_err("%-20s%08x\n", "event_inj_err:", control->event_inj_err);
Janakarajan Natarajan0dc92112017-07-06 15:50:45 -05004649 pr_err("%-20s%lld\n", "virt_ext:", control->virt_ext);
Joe Perchesae8cc052011-04-24 22:00:50 -07004650 pr_err("%-20s%016llx\n", "next_rip:", control->next_rip);
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004651 pr_err("%-20s%016llx\n", "avic_backing_page:", control->avic_backing_page);
4652 pr_err("%-20s%016llx\n", "avic_logical_id:", control->avic_logical_id);
4653 pr_err("%-20s%016llx\n", "avic_physical_id:", control->avic_physical_id);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004654 pr_err("VMCB State Save Area:\n");
Joe Perchesae8cc052011-04-24 22:00:50 -07004655 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4656 "es:",
4657 save->es.selector, save->es.attrib,
4658 save->es.limit, save->es.base);
4659 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4660 "cs:",
4661 save->cs.selector, save->cs.attrib,
4662 save->cs.limit, save->cs.base);
4663 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4664 "ss:",
4665 save->ss.selector, save->ss.attrib,
4666 save->ss.limit, save->ss.base);
4667 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4668 "ds:",
4669 save->ds.selector, save->ds.attrib,
4670 save->ds.limit, save->ds.base);
4671 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4672 "fs:",
4673 save->fs.selector, save->fs.attrib,
4674 save->fs.limit, save->fs.base);
4675 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4676 "gs:",
4677 save->gs.selector, save->gs.attrib,
4678 save->gs.limit, save->gs.base);
4679 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4680 "gdtr:",
4681 save->gdtr.selector, save->gdtr.attrib,
4682 save->gdtr.limit, save->gdtr.base);
4683 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4684 "ldtr:",
4685 save->ldtr.selector, save->ldtr.attrib,
4686 save->ldtr.limit, save->ldtr.base);
4687 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4688 "idtr:",
4689 save->idtr.selector, save->idtr.attrib,
4690 save->idtr.limit, save->idtr.base);
4691 pr_err("%-5s s: %04x a: %04x l: %08x b: %016llx\n",
4692 "tr:",
4693 save->tr.selector, save->tr.attrib,
4694 save->tr.limit, save->tr.base);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004695 pr_err("cpl: %d efer: %016llx\n",
4696 save->cpl, save->efer);
Joe Perchesae8cc052011-04-24 22:00:50 -07004697 pr_err("%-15s %016llx %-13s %016llx\n",
4698 "cr0:", save->cr0, "cr2:", save->cr2);
4699 pr_err("%-15s %016llx %-13s %016llx\n",
4700 "cr3:", save->cr3, "cr4:", save->cr4);
4701 pr_err("%-15s %016llx %-13s %016llx\n",
4702 "dr6:", save->dr6, "dr7:", save->dr7);
4703 pr_err("%-15s %016llx %-13s %016llx\n",
4704 "rip:", save->rip, "rflags:", save->rflags);
4705 pr_err("%-15s %016llx %-13s %016llx\n",
4706 "rsp:", save->rsp, "rax:", save->rax);
4707 pr_err("%-15s %016llx %-13s %016llx\n",
4708 "star:", save->star, "lstar:", save->lstar);
4709 pr_err("%-15s %016llx %-13s %016llx\n",
4710 "cstar:", save->cstar, "sfmask:", save->sfmask);
4711 pr_err("%-15s %016llx %-13s %016llx\n",
4712 "kernel_gs_base:", save->kernel_gs_base,
4713 "sysenter_cs:", save->sysenter_cs);
4714 pr_err("%-15s %016llx %-13s %016llx\n",
4715 "sysenter_esp:", save->sysenter_esp,
4716 "sysenter_eip:", save->sysenter_eip);
4717 pr_err("%-15s %016llx %-13s %016llx\n",
4718 "gpat:", save->g_pat, "dbgctl:", save->dbgctl);
4719 pr_err("%-15s %016llx %-13s %016llx\n",
4720 "br_from:", save->br_from, "br_to:", save->br_to);
4721 pr_err("%-15s %016llx %-13s %016llx\n",
4722 "excp_from:", save->last_excp_from,
4723 "excp_to:", save->last_excp_to);
Joerg Roedel3f10c842010-05-05 16:04:42 +02004724}
4725
Avi Kivity586f9602010-11-18 13:09:54 +02004726static void svm_get_exit_info(struct kvm_vcpu *vcpu, u64 *info1, u64 *info2)
4727{
4728 struct vmcb_control_area *control = &to_svm(vcpu)->vmcb->control;
4729
4730 *info1 = control->exit_info_1;
4731 *info2 = control->exit_info_2;
4732}
4733
Avi Kivity851ba692009-08-24 11:10:17 +03004734static int handle_exit(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004735{
Avi Kivity04d2cc72007-09-10 18:10:54 +03004736 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivity851ba692009-08-24 11:10:17 +03004737 struct kvm_run *kvm_run = vcpu->run;
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004738 u32 exit_code = svm->vmcb->control.exit_code;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004739
Paolo Bonzini8b89fe12015-12-10 18:37:32 +01004740 trace_kvm_exit(exit_code, vcpu, KVM_ISA_SVM);
4741
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004742 if (!is_cr_intercept(svm, INTERCEPT_CR0_WRITE))
Joerg Roedel2be4fc72010-04-22 12:33:09 +02004743 vcpu->arch.cr0 = svm->vmcb->save.cr0;
4744 if (npt_enabled)
4745 vcpu->arch.cr3 = svm->vmcb->save.cr3;
Joerg Roedelaf9ca2d2008-04-30 17:56:03 +02004746
Joerg Roedelcd3ff652009-10-09 16:08:26 +02004747 if (unlikely(svm->nested.exit_required)) {
4748 nested_svm_vmexit(svm);
4749 svm->nested.exit_required = false;
4750
4751 return 1;
4752 }
4753
Joerg Roedel20307532010-11-29 17:51:48 +01004754 if (is_guest_mode(vcpu)) {
Joerg Roedel410e4d52009-08-07 11:49:44 +02004755 int vmexit;
4756
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004757 trace_kvm_nested_vmexit(svm->vmcb->save.rip, exit_code,
4758 svm->vmcb->control.exit_info_1,
4759 svm->vmcb->control.exit_info_2,
4760 svm->vmcb->control.exit_int_info,
Stefan Hajnoczie097e5f2011-07-22 12:46:52 +01004761 svm->vmcb->control.exit_int_info_err,
4762 KVM_ISA_SVM);
Joerg Roedeld8cabdd2009-10-09 16:08:28 +02004763
Joerg Roedel410e4d52009-08-07 11:49:44 +02004764 vmexit = nested_svm_exit_special(svm);
4765
4766 if (vmexit == NESTED_EXIT_CONTINUE)
4767 vmexit = nested_svm_exit_handled(svm);
4768
4769 if (vmexit == NESTED_EXIT_DONE)
Alexander Grafcf74a782008-11-25 20:17:08 +01004770 return 1;
Alexander Grafcf74a782008-11-25 20:17:08 +01004771 }
4772
Joerg Roedela5c38322009-08-07 11:49:32 +02004773 svm_complete_interrupts(svm);
4774
Avi Kivity04d2cc72007-09-10 18:10:54 +03004775 if (svm->vmcb->control.exit_code == SVM_EXIT_ERR) {
4776 kvm_run->exit_reason = KVM_EXIT_FAIL_ENTRY;
4777 kvm_run->fail_entry.hardware_entry_failure_reason
4778 = svm->vmcb->control.exit_code;
Joerg Roedel3f10c842010-05-05 16:04:42 +02004779 pr_err("KVM: FAILED VMRUN WITH VMCB:\n");
4780 dump_vmcb(vcpu);
Avi Kivity04d2cc72007-09-10 18:10:54 +03004781 return 0;
4782 }
4783
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04004784 if (is_external_interrupt(svm->vmcb->control.exit_int_info) &&
Joerg Roedel709ddeb2008-02-07 13:47:45 +01004785 exit_code != SVM_EXIT_EXCP_BASE + PF_VECTOR &&
Joerg Roedel55c5e462010-09-10 17:31:04 +02004786 exit_code != SVM_EXIT_NPF && exit_code != SVM_EXIT_TASK_SWITCH &&
4787 exit_code != SVM_EXIT_INTR && exit_code != SVM_EXIT_NMI)
Borislav Petkov6614c7d2013-04-26 00:22:01 +02004788 printk(KERN_ERR "%s: unexpected exit_int_info 0x%x "
Avi Kivity6aa8b732006-12-10 02:21:36 -08004789 "exit_code 0x%x\n",
Harvey Harrisonb8688d52008-03-03 12:59:56 -08004790 __func__, svm->vmcb->control.exit_int_info,
Avi Kivity6aa8b732006-12-10 02:21:36 -08004791 exit_code);
4792
Ahmed S. Darwish9d8f5492007-02-19 14:37:46 +02004793 if (exit_code >= ARRAY_SIZE(svm_exit_handlers)
Joe Perches56919c52007-11-12 20:06:51 -08004794 || !svm_exit_handlers[exit_code]) {
Bandan Dasfaac2452015-03-16 17:18:25 -04004795 WARN_ONCE(1, "svm: unexpected exit reason 0x%x\n", exit_code);
Michael S. Tsirkin2bc19dc2014-09-18 16:21:16 +03004796 kvm_queue_exception(vcpu, UD_VECTOR);
4797 return 1;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004798 }
4799
Avi Kivity851ba692009-08-24 11:10:17 +03004800 return svm_exit_handlers[exit_code](svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004801}
4802
4803static void reload_tss(struct kvm_vcpu *vcpu)
4804{
4805 int cpu = raw_smp_processor_id();
4806
Tejun Heo0fe1e002009-10-29 22:34:14 +09004807 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4808 sd->tss_desc->type = 9; /* available 32/64-bit TSS */
Avi Kivity6aa8b732006-12-10 02:21:36 -08004809 load_TR_desc();
4810}
4811
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004812static void pre_sev_run(struct vcpu_svm *svm, int cpu)
4813{
4814 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
4815 int asid = sev_get_asid(svm->vcpu.kvm);
4816
4817 /* Assign the asid allocated with this SEV guest */
4818 svm->vmcb->control.asid = asid;
4819
4820 /*
4821 * Flush guest TLB:
4822 *
4823 * 1) when different VMCB for the same ASID is to be run on the same host CPU.
4824 * 2) or this VMCB was executed on different host CPU in previous VMRUNs.
4825 */
4826 if (sd->sev_vmcbs[asid] == svm->vmcb &&
4827 svm->last_cpu == cpu)
4828 return;
4829
4830 svm->last_cpu = cpu;
4831 sd->sev_vmcbs[asid] = svm->vmcb;
4832 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
4833 mark_dirty(svm->vmcb, VMCB_ASID);
4834}
4835
Rusty Russelle756fc62007-07-30 20:07:08 +10004836static void pre_svm_run(struct vcpu_svm *svm)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004837{
4838 int cpu = raw_smp_processor_id();
4839
Tejun Heo0fe1e002009-10-29 22:34:14 +09004840 struct svm_cpu_data *sd = per_cpu(svm_data, cpu);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004841
Brijesh Singh70cd94e2017-12-04 10:57:34 -06004842 if (sev_guest(svm->vcpu.kvm))
4843 return pre_sev_run(svm, cpu);
4844
Marcelo Tosatti4b656b12009-07-21 12:47:45 -03004845 /* FIXME: handle wraparound of asid_generation */
Tejun Heo0fe1e002009-10-29 22:34:14 +09004846 if (svm->asid_generation != sd->asid_generation)
4847 new_asid(svm, sd);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004848}
4849
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004850static void svm_inject_nmi(struct kvm_vcpu *vcpu)
4851{
4852 struct vcpu_svm *svm = to_svm(vcpu);
4853
4854 svm->vmcb->control.event_inj = SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_NMI;
4855 vcpu->arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01004856 set_intercept(svm, INTERCEPT_IRET);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004857 ++vcpu->stat.nmi_injections;
4858}
Avi Kivity6aa8b732006-12-10 02:21:36 -08004859
Eddie Dong85f455f2007-07-06 12:20:49 +03004860static inline void svm_inject_irq(struct vcpu_svm *svm, int irq)
Avi Kivity6aa8b732006-12-10 02:21:36 -08004861{
4862 struct vmcb_control_area *control;
4863
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004864 /* The following fields are ignored when AVIC is enabled */
Rusty Russelle756fc62007-07-30 20:07:08 +10004865 control = &svm->vmcb->control;
Eddie Dong85f455f2007-07-06 12:20:49 +03004866 control->int_vector = irq;
Avi Kivity6aa8b732006-12-10 02:21:36 -08004867 control->int_ctl &= ~V_INTR_PRIO_MASK;
4868 control->int_ctl |= V_IRQ_MASK |
4869 ((/*control->int_vector >> 4*/ 0xf) << V_INTR_PRIO_SHIFT);
Joerg Roedeldecdbf62010-12-03 11:45:52 +01004870 mark_dirty(svm->vmcb, VMCB_INTR);
Avi Kivity6aa8b732006-12-10 02:21:36 -08004871}
4872
Gleb Natapov66fd3f72009-05-11 13:35:50 +03004873static void svm_set_irq(struct kvm_vcpu *vcpu)
Eddie Dong2a8067f2007-08-06 16:29:07 +03004874{
4875 struct vcpu_svm *svm = to_svm(vcpu);
4876
Joerg Roedel2af91942009-08-07 11:49:28 +02004877 BUG_ON(!(gif_set(svm)));
Alexander Grafcf74a782008-11-25 20:17:08 +01004878
Gleb Natapov9fb2d2b2010-05-23 14:28:26 +03004879 trace_kvm_inj_virq(vcpu->arch.interrupt.nr);
4880 ++vcpu->stat.irq_injections;
4881
Alexander Graf219b65d2009-06-15 15:21:25 +02004882 svm->vmcb->control.event_inj = vcpu->arch.interrupt.nr |
4883 SVM_EVTINJ_VALID | SVM_EVTINJ_TYPE_INTR;
Eddie Dong2a8067f2007-08-06 16:29:07 +03004884}
4885
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004886static inline bool svm_nested_virtualize_tpr(struct kvm_vcpu *vcpu)
4887{
4888 return is_guest_mode(vcpu) && (vcpu->arch.hflags & HF_VINTR_MASK);
4889}
4890
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004891static void update_cr8_intercept(struct kvm_vcpu *vcpu, int tpr, int irr)
4892{
4893 struct vcpu_svm *svm = to_svm(vcpu);
4894
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05004895 if (svm_nested_virtualize_tpr(vcpu) ||
4896 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01004897 return;
4898
Radim Krčmář596f3142014-03-11 19:11:18 +01004899 clr_cr_intercept(svm, INTERCEPT_CR8_WRITE);
4900
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004901 if (irr == -1)
4902 return;
4903
4904 if (tpr >= irr)
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01004905 set_cr_intercept(svm, INTERCEPT_CR8_WRITE);
Gleb Natapov95ba8273132009-04-21 17:45:08 +03004906}
4907
Yang Zhang8d146952013-01-25 10:18:50 +08004908static void svm_set_virtual_x2apic_mode(struct kvm_vcpu *vcpu, bool set)
4909{
4910 return;
4911}
4912
Suravee Suthikulpanitb2a05fe2017-09-12 10:42:41 -05004913static bool svm_get_enable_apicv(struct kvm_vcpu *vcpu)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004914{
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004915 return avic && irqchip_split(vcpu->kvm);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004916}
4917
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004918static void svm_hwapic_irr_update(struct kvm_vcpu *vcpu, int max_irr)
4919{
4920}
4921
Paolo Bonzini67c9ddd2016-05-10 17:01:23 +02004922static void svm_hwapic_isr_update(struct kvm_vcpu *vcpu, int max_isr)
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004923{
4924}
4925
4926/* Note: Currently only used by Hyper-V. */
Andrey Smetanind62caab2015-11-10 15:36:33 +03004927static void svm_refresh_apicv_exec_ctrl(struct kvm_vcpu *vcpu)
4928{
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004929 struct vcpu_svm *svm = to_svm(vcpu);
4930 struct vmcb *vmcb = svm->vmcb;
4931
Suravee Suthikulpanit67034bb2017-09-12 10:42:42 -05004932 if (!kvm_vcpu_apicv_active(&svm->vcpu))
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05004933 return;
4934
4935 vmcb->control.int_ctl &= ~AVIC_ENABLE_MASK;
4936 mark_dirty(vmcb, VMCB_INTR);
Yang Zhangc7c9c562013-01-25 10:18:51 +08004937}
4938
Andrey Smetanin63086302015-11-10 15:36:32 +03004939static void svm_load_eoi_exitmap(struct kvm_vcpu *vcpu, u64 *eoi_exit_bitmap)
Yang Zhangc7c9c562013-01-25 10:18:51 +08004940{
4941 return;
4942}
4943
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004944static void svm_deliver_avic_intr(struct kvm_vcpu *vcpu, int vec)
4945{
4946 kvm_lapic_set_irr(vec, vcpu->arch.apic);
4947 smp_mb__after_atomic();
4948
4949 if (avic_vcpu_is_running(vcpu))
4950 wrmsrl(SVM_AVIC_DOORBELL,
Suravee Suthikulpanit7d669f52016-06-15 17:23:45 -05004951 kvm_cpu_get_apicid(vcpu->cpu));
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05004952 else
4953 kvm_vcpu_wake_up(vcpu);
4954}
4955
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05004956static void svm_ir_list_del(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4957{
4958 unsigned long flags;
4959 struct amd_svm_iommu_ir *cur;
4960
4961 spin_lock_irqsave(&svm->ir_list_lock, flags);
4962 list_for_each_entry(cur, &svm->ir_list, node) {
4963 if (cur->data != pi->ir_data)
4964 continue;
4965 list_del(&cur->node);
4966 kfree(cur);
4967 break;
4968 }
4969 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
4970}
4971
4972static int svm_ir_list_add(struct vcpu_svm *svm, struct amd_iommu_pi_data *pi)
4973{
4974 int ret = 0;
4975 unsigned long flags;
4976 struct amd_svm_iommu_ir *ir;
4977
4978 /**
4979 * In some cases, the existing irte is updaed and re-set,
4980 * so we need to check here if it's already been * added
4981 * to the ir_list.
4982 */
4983 if (pi->ir_data && (pi->prev_ga_tag != 0)) {
4984 struct kvm *kvm = svm->vcpu.kvm;
4985 u32 vcpu_id = AVIC_GATAG_TO_VCPUID(pi->prev_ga_tag);
4986 struct kvm_vcpu *prev_vcpu = kvm_get_vcpu_by_id(kvm, vcpu_id);
4987 struct vcpu_svm *prev_svm;
4988
4989 if (!prev_vcpu) {
4990 ret = -EINVAL;
4991 goto out;
4992 }
4993
4994 prev_svm = to_svm(prev_vcpu);
4995 svm_ir_list_del(prev_svm, pi);
4996 }
4997
4998 /**
4999 * Allocating new amd_iommu_pi_data, which will get
5000 * add to the per-vcpu ir_list.
5001 */
5002 ir = kzalloc(sizeof(struct amd_svm_iommu_ir), GFP_KERNEL);
5003 if (!ir) {
5004 ret = -ENOMEM;
5005 goto out;
5006 }
5007 ir->data = pi->ir_data;
5008
5009 spin_lock_irqsave(&svm->ir_list_lock, flags);
5010 list_add(&ir->node, &svm->ir_list);
5011 spin_unlock_irqrestore(&svm->ir_list_lock, flags);
5012out:
5013 return ret;
5014}
5015
5016/**
5017 * Note:
5018 * The HW cannot support posting multicast/broadcast
5019 * interrupts to a vCPU. So, we still use legacy interrupt
5020 * remapping for these kind of interrupts.
5021 *
5022 * For lowest-priority interrupts, we only support
5023 * those with single CPU as the destination, e.g. user
5024 * configures the interrupts via /proc/irq or uses
5025 * irqbalance to make the interrupts single-CPU.
5026 */
5027static int
5028get_pi_vcpu_info(struct kvm *kvm, struct kvm_kernel_irq_routing_entry *e,
5029 struct vcpu_data *vcpu_info, struct vcpu_svm **svm)
5030{
5031 struct kvm_lapic_irq irq;
5032 struct kvm_vcpu *vcpu = NULL;
5033
5034 kvm_set_msi_irq(kvm, e, &irq);
5035
5036 if (!kvm_intr_is_single_vcpu(kvm, &irq, &vcpu)) {
5037 pr_debug("SVM: %s: use legacy intr remap mode for irq %u\n",
5038 __func__, irq.vector);
5039 return -1;
5040 }
5041
5042 pr_debug("SVM: %s: use GA mode for irq %u\n", __func__,
5043 irq.vector);
5044 *svm = to_svm(vcpu);
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005045 vcpu_info->pi_desc_addr = __sme_set(page_to_phys((*svm)->avic_backing_page));
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05005046 vcpu_info->vector = irq.vector;
5047
5048 return 0;
5049}
5050
5051/*
5052 * svm_update_pi_irte - set IRTE for Posted-Interrupts
5053 *
5054 * @kvm: kvm
5055 * @host_irq: host irq of the interrupt
5056 * @guest_irq: gsi of the interrupt
5057 * @set: set or unset PI
5058 * returns 0 on success, < 0 on failure
5059 */
5060static int svm_update_pi_irte(struct kvm *kvm, unsigned int host_irq,
5061 uint32_t guest_irq, bool set)
5062{
5063 struct kvm_kernel_irq_routing_entry *e;
5064 struct kvm_irq_routing_table *irq_rt;
5065 int idx, ret = -EINVAL;
5066
5067 if (!kvm_arch_has_assigned_device(kvm) ||
5068 !irq_remapping_cap(IRQ_POSTING_CAP))
5069 return 0;
5070
5071 pr_debug("SVM: %s: host_irq=%#x, guest_irq=%#x, set=%#x\n",
5072 __func__, host_irq, guest_irq, set);
5073
5074 idx = srcu_read_lock(&kvm->irq_srcu);
5075 irq_rt = srcu_dereference(kvm->irq_routing, &kvm->irq_srcu);
5076 WARN_ON(guest_irq >= irq_rt->nr_rt_entries);
5077
5078 hlist_for_each_entry(e, &irq_rt->map[guest_irq], link) {
5079 struct vcpu_data vcpu_info;
5080 struct vcpu_svm *svm = NULL;
5081
5082 if (e->type != KVM_IRQ_ROUTING_MSI)
5083 continue;
5084
5085 /**
5086 * Here, we setup with legacy mode in the following cases:
5087 * 1. When cannot target interrupt to a specific vcpu.
5088 * 2. Unsetting posted interrupt.
5089 * 3. APIC virtialization is disabled for the vcpu.
5090 */
5091 if (!get_pi_vcpu_info(kvm, e, &vcpu_info, &svm) && set &&
5092 kvm_vcpu_apicv_active(&svm->vcpu)) {
5093 struct amd_iommu_pi_data pi;
5094
5095 /* Try to enable guest_mode in IRTE */
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005096 pi.base = __sme_set(page_to_phys(svm->avic_backing_page) &
5097 AVIC_HPA_MASK);
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05005098 pi.ga_tag = AVIC_GATAG(kvm->arch.avic_vm_id,
5099 svm->vcpu.vcpu_id);
5100 pi.is_guest_mode = true;
5101 pi.vcpu_data = &vcpu_info;
5102 ret = irq_set_vcpu_affinity(host_irq, &pi);
5103
5104 /**
5105 * Here, we successfully setting up vcpu affinity in
5106 * IOMMU guest mode. Now, we need to store the posted
5107 * interrupt information in a per-vcpu ir_list so that
5108 * we can reference to them directly when we update vcpu
5109 * scheduling information in IOMMU irte.
5110 */
5111 if (!ret && pi.is_guest_mode)
5112 svm_ir_list_add(svm, &pi);
5113 } else {
5114 /* Use legacy mode in IRTE */
5115 struct amd_iommu_pi_data pi;
5116
5117 /**
5118 * Here, pi is used to:
5119 * - Tell IOMMU to use legacy mode for this interrupt.
5120 * - Retrieve ga_tag of prior interrupt remapping data.
5121 */
5122 pi.is_guest_mode = false;
5123 ret = irq_set_vcpu_affinity(host_irq, &pi);
5124
5125 /**
5126 * Check if the posted interrupt was previously
5127 * setup with the guest_mode by checking if the ga_tag
5128 * was cached. If so, we need to clean up the per-vcpu
5129 * ir_list.
5130 */
5131 if (!ret && pi.prev_ga_tag) {
5132 int id = AVIC_GATAG_TO_VCPUID(pi.prev_ga_tag);
5133 struct kvm_vcpu *vcpu;
5134
5135 vcpu = kvm_get_vcpu_by_id(kvm, id);
5136 if (vcpu)
5137 svm_ir_list_del(to_svm(vcpu), &pi);
5138 }
5139 }
5140
5141 if (!ret && svm) {
5142 trace_kvm_pi_irte_update(svm->vcpu.vcpu_id,
5143 host_irq, e->gsi,
5144 vcpu_info.vector,
5145 vcpu_info.pi_desc_addr, set);
5146 }
5147
5148 if (ret < 0) {
5149 pr_err("%s: failed to update PI IRTE\n", __func__);
5150 goto out;
5151 }
5152 }
5153
5154 ret = 0;
5155out:
5156 srcu_read_unlock(&kvm->irq_srcu, idx);
5157 return ret;
5158}
5159
Gleb Natapov95ba8273132009-04-21 17:45:08 +03005160static int svm_nmi_allowed(struct kvm_vcpu *vcpu)
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005161{
5162 struct vcpu_svm *svm = to_svm(vcpu);
5163 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel924584c2010-04-22 12:33:07 +02005164 int ret;
5165 ret = !(vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK) &&
5166 !(svm->vcpu.arch.hflags & HF_NMI_MASK);
5167 ret = ret && gif_set(svm) && nested_svm_nmi(svm);
5168
5169 return ret;
Joerg Roedelaaacfc92008-04-16 16:51:18 +02005170}
5171
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005172static bool svm_get_nmi_mask(struct kvm_vcpu *vcpu)
5173{
5174 struct vcpu_svm *svm = to_svm(vcpu);
5175
5176 return !!(svm->vcpu.arch.hflags & HF_NMI_MASK);
5177}
5178
5179static void svm_set_nmi_mask(struct kvm_vcpu *vcpu, bool masked)
5180{
5181 struct vcpu_svm *svm = to_svm(vcpu);
5182
5183 if (masked) {
5184 svm->vcpu.arch.hflags |= HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005185 set_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005186 } else {
5187 svm->vcpu.arch.hflags &= ~HF_NMI_MASK;
Joerg Roedel8a05a1b82010-11-30 18:04:00 +01005188 clr_intercept(svm, INTERCEPT_IRET);
Jan Kiszka3cfc3092009-11-12 01:04:25 +01005189 }
5190}
5191
Gleb Natapov78646122009-03-23 12:12:11 +02005192static int svm_interrupt_allowed(struct kvm_vcpu *vcpu)
5193{
5194 struct vcpu_svm *svm = to_svm(vcpu);
5195 struct vmcb *vmcb = svm->vmcb;
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005196 int ret;
5197
5198 if (!gif_set(svm) ||
5199 (vmcb->control.int_state & SVM_INTERRUPT_SHADOW_MASK))
5200 return 0;
5201
Avi Kivityf6e78472010-08-02 15:30:20 +03005202 ret = !!(kvm_get_rflags(vcpu) & X86_EFLAGS_IF);
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005203
Joerg Roedel20307532010-11-29 17:51:48 +01005204 if (is_guest_mode(vcpu))
Joerg Roedel7fcdb512009-09-16 15:24:15 +02005205 return ret && !(svm->vcpu.arch.hflags & HF_VINTR_MASK);
5206
5207 return ret;
Gleb Natapov78646122009-03-23 12:12:11 +02005208}
5209
Jan Kiszkac9a79532014-03-07 20:03:15 +01005210static void enable_irq_window(struct kvm_vcpu *vcpu)
Gleb Natapov9222be12009-04-23 17:14:37 +03005211{
Alexander Graf219b65d2009-06-15 15:21:25 +02005212 struct vcpu_svm *svm = to_svm(vcpu);
Alexander Graf219b65d2009-06-15 15:21:25 +02005213
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05005214 if (kvm_vcpu_apicv_active(vcpu))
5215 return;
5216
Joerg Roedele0231712010-02-24 18:59:10 +01005217 /*
5218 * In case GIF=0 we can't rely on the CPU to tell us when GIF becomes
5219 * 1, because that's a separate STGI/VMRUN intercept. The next time we
5220 * get that intercept, this function will be called again though and
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005221 * we'll get the vintr intercept. However, if the vGIF feature is
5222 * enabled, the STGI interception will not occur. Enable the irq
5223 * window under the assumption that the hardware will set the GIF.
Joerg Roedele0231712010-02-24 18:59:10 +01005224 */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005225 if ((vgif_enabled(svm) || gif_set(svm)) && nested_svm_intr(svm)) {
Alexander Graf219b65d2009-06-15 15:21:25 +02005226 svm_set_vintr(svm);
5227 svm_inject_irq(svm, 0x0);
5228 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005229}
5230
Jan Kiszkac9a79532014-03-07 20:03:15 +01005231static void enable_nmi_window(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005232{
Avi Kivity04d2cc72007-09-10 18:10:54 +03005233 struct vcpu_svm *svm = to_svm(vcpu);
Eddie Dong85f455f2007-07-06 12:20:49 +03005234
Gleb Natapov44c11432009-05-11 13:35:52 +03005235 if ((svm->vcpu.arch.hflags & (HF_NMI_MASK | HF_IRET_MASK))
5236 == HF_NMI_MASK)
Jan Kiszkac9a79532014-03-07 20:03:15 +01005237 return; /* IRET will cause a vm exit */
Gleb Natapov44c11432009-05-11 13:35:52 +03005238
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005239 if (!gif_set(svm)) {
5240 if (vgif_enabled(svm))
5241 set_intercept(svm, INTERCEPT_STGI);
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005242 return; /* STGI will cause a vm exit */
Janakarajan Natarajan640bd6e2017-08-23 09:57:19 -05005243 }
Ladi Prosek1a5e1852017-06-21 09:07:01 +02005244
5245 if (svm->nested.exit_required)
5246 return; /* we're not going to run the guest yet */
5247
Joerg Roedele0231712010-02-24 18:59:10 +01005248 /*
5249 * Something prevents NMI from been injected. Single step over possible
5250 * problem (IRET or exception injection or interrupt shadow)
5251 */
Ladi Prosekab2f4d732017-06-21 09:06:58 +02005252 svm->nmi_singlestep_guest_rflags = svm_get_rflags(vcpu);
Jan Kiszka6be7d302009-10-18 13:24:54 +02005253 svm->nmi_singlestep = true;
Gleb Natapov44c11432009-05-11 13:35:52 +03005254 svm->vmcb->save.rflags |= (X86_EFLAGS_TF | X86_EFLAGS_RF);
Eddie Dong85f455f2007-07-06 12:20:49 +03005255}
5256
Izik Eiduscbc94022007-10-25 00:29:55 +02005257static int svm_set_tss_addr(struct kvm *kvm, unsigned int addr)
5258{
5259 return 0;
5260}
5261
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08005262static void svm_flush_tlb(struct kvm_vcpu *vcpu, bool invalidate_gpa)
Avi Kivityd9e368d2007-06-07 19:18:30 +03005263{
Joerg Roedel38e5e922010-12-03 15:25:16 +01005264 struct vcpu_svm *svm = to_svm(vcpu);
5265
5266 if (static_cpu_has(X86_FEATURE_FLUSHBYASID))
5267 svm->vmcb->control.tlb_ctl = TLB_CONTROL_FLUSH_ASID;
5268 else
5269 svm->asid_generation--;
Avi Kivityd9e368d2007-06-07 19:18:30 +03005270}
5271
Avi Kivity04d2cc72007-09-10 18:10:54 +03005272static void svm_prepare_guest_switch(struct kvm_vcpu *vcpu)
5273{
5274}
5275
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005276static inline void sync_cr8_to_lapic(struct kvm_vcpu *vcpu)
5277{
5278 struct vcpu_svm *svm = to_svm(vcpu);
5279
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005280 if (svm_nested_virtualize_tpr(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005281 return;
5282
Roedel, Joerg4ee546b2010-12-03 10:50:51 +01005283 if (!is_cr_intercept(svm, INTERCEPT_CR8_WRITE)) {
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005284 int cr8 = svm->vmcb->control.int_ctl & V_TPR_MASK;
Gleb Natapov615d5192009-04-21 17:45:05 +03005285 kvm_set_cr8(vcpu, cr8);
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005286 }
5287}
5288
Joerg Roedel649d6862008-04-16 16:51:15 +02005289static inline void sync_lapic_to_cr8(struct kvm_vcpu *vcpu)
5290{
5291 struct vcpu_svm *svm = to_svm(vcpu);
5292 u64 cr8;
5293
Suravee Suthikulpanit3bbf3562016-05-04 14:09:51 -05005294 if (svm_nested_virtualize_tpr(vcpu) ||
5295 kvm_vcpu_apicv_active(vcpu))
Joerg Roedel88ab24a2010-02-19 16:23:06 +01005296 return;
5297
Joerg Roedel649d6862008-04-16 16:51:15 +02005298 cr8 = kvm_get_cr8(vcpu);
5299 svm->vmcb->control.int_ctl &= ~V_TPR_MASK;
5300 svm->vmcb->control.int_ctl |= cr8 & V_TPR_MASK;
5301}
5302
Gleb Natapov9222be12009-04-23 17:14:37 +03005303static void svm_complete_interrupts(struct vcpu_svm *svm)
5304{
5305 u8 vector;
5306 int type;
5307 u32 exitintinfo = svm->vmcb->control.exit_int_info;
Jan Kiszka66b71382010-02-23 17:47:56 +01005308 unsigned int3_injected = svm->int3_injected;
5309
5310 svm->int3_injected = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005311
Avi Kivitybd3d1ec2011-02-03 15:29:52 +02005312 /*
5313 * If we've made progress since setting HF_IRET_MASK, we've
5314 * executed an IRET and can allow NMI injection.
5315 */
5316 if ((svm->vcpu.arch.hflags & HF_IRET_MASK)
5317 && kvm_rip_read(&svm->vcpu) != svm->nmi_iret_rip) {
Gleb Natapov44c11432009-05-11 13:35:52 +03005318 svm->vcpu.arch.hflags &= ~(HF_NMI_MASK | HF_IRET_MASK);
Avi Kivity3842d132010-07-27 12:30:24 +03005319 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5320 }
Gleb Natapov44c11432009-05-11 13:35:52 +03005321
Gleb Natapov9222be12009-04-23 17:14:37 +03005322 svm->vcpu.arch.nmi_injected = false;
5323 kvm_clear_exception_queue(&svm->vcpu);
5324 kvm_clear_interrupt_queue(&svm->vcpu);
5325
5326 if (!(exitintinfo & SVM_EXITINTINFO_VALID))
5327 return;
5328
Avi Kivity3842d132010-07-27 12:30:24 +03005329 kvm_make_request(KVM_REQ_EVENT, &svm->vcpu);
5330
Gleb Natapov9222be12009-04-23 17:14:37 +03005331 vector = exitintinfo & SVM_EXITINTINFO_VEC_MASK;
5332 type = exitintinfo & SVM_EXITINTINFO_TYPE_MASK;
5333
5334 switch (type) {
5335 case SVM_EXITINTINFO_TYPE_NMI:
5336 svm->vcpu.arch.nmi_injected = true;
5337 break;
5338 case SVM_EXITINTINFO_TYPE_EXEPT:
Jan Kiszka66b71382010-02-23 17:47:56 +01005339 /*
5340 * In case of software exceptions, do not reinject the vector,
5341 * but re-execute the instruction instead. Rewind RIP first
5342 * if we emulated INT3 before.
5343 */
5344 if (kvm_exception_is_soft(vector)) {
5345 if (vector == BP_VECTOR && int3_injected &&
5346 kvm_is_linear_rip(&svm->vcpu, svm->int3_rip))
5347 kvm_rip_write(&svm->vcpu,
5348 kvm_rip_read(&svm->vcpu) -
5349 int3_injected);
Alexander Graf219b65d2009-06-15 15:21:25 +02005350 break;
Jan Kiszka66b71382010-02-23 17:47:56 +01005351 }
Gleb Natapov9222be12009-04-23 17:14:37 +03005352 if (exitintinfo & SVM_EXITINTINFO_VALID_ERR) {
5353 u32 err = svm->vmcb->control.exit_int_info_err;
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005354 kvm_requeue_exception_e(&svm->vcpu, vector, err);
Gleb Natapov9222be12009-04-23 17:14:37 +03005355
5356 } else
Joerg Roedelce7ddec2010-04-22 12:33:13 +02005357 kvm_requeue_exception(&svm->vcpu, vector);
Gleb Natapov9222be12009-04-23 17:14:37 +03005358 break;
5359 case SVM_EXITINTINFO_TYPE_INTR:
Gleb Natapov66fd3f72009-05-11 13:35:50 +03005360 kvm_queue_interrupt(&svm->vcpu, vector, false);
Gleb Natapov9222be12009-04-23 17:14:37 +03005361 break;
5362 default:
5363 break;
5364 }
5365}
5366
Avi Kivityb463a6f2010-07-20 15:06:17 +03005367static void svm_cancel_injection(struct kvm_vcpu *vcpu)
5368{
5369 struct vcpu_svm *svm = to_svm(vcpu);
5370 struct vmcb_control_area *control = &svm->vmcb->control;
5371
5372 control->exit_int_info = control->event_inj;
5373 control->exit_int_info_err = control->event_inj_err;
5374 control->event_inj = 0;
5375 svm_complete_interrupts(svm);
5376}
5377
Avi Kivity851ba692009-08-24 11:10:17 +03005378static void svm_vcpu_run(struct kvm_vcpu *vcpu)
Avi Kivity6aa8b732006-12-10 02:21:36 -08005379{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005380 struct vcpu_svm *svm = to_svm(vcpu);
Avi Kivityd9e368d2007-06-07 19:18:30 +03005381
Joerg Roedel2041a062010-04-22 12:33:08 +02005382 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5383 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5384 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5385
Joerg Roedelcd3ff652009-10-09 16:08:26 +02005386 /*
5387 * A vmexit emulation is required before the vcpu can be executed
5388 * again.
5389 */
5390 if (unlikely(svm->nested.exit_required))
5391 return;
5392
Ladi Proseka12713c2017-06-21 09:07:00 +02005393 /*
5394 * Disable singlestep if we're injecting an interrupt/exception.
5395 * We don't want our modified rflags to be pushed on the stack where
5396 * we might not be able to easily reset them if we disabled NMI
5397 * singlestep later.
5398 */
5399 if (svm->nmi_singlestep && svm->vmcb->control.event_inj) {
5400 /*
5401 * Event injection happens before external interrupts cause a
5402 * vmexit and interrupts are disabled here, so smp_send_reschedule
5403 * is enough to force an immediate vmexit.
5404 */
5405 disable_nmi_singlestep(svm);
5406 smp_send_reschedule(vcpu->cpu);
5407 }
5408
Rusty Russelle756fc62007-07-30 20:07:08 +10005409 pre_svm_run(svm);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005410
Joerg Roedel649d6862008-04-16 16:51:15 +02005411 sync_lapic_to_cr8(vcpu);
5412
Joerg Roedelcda0ffd2009-08-07 11:49:45 +02005413 svm->vmcb->save.cr2 = vcpu->arch.cr2;
Avi Kivity6aa8b732006-12-10 02:21:36 -08005414
Avi Kivity04d2cc72007-09-10 18:10:54 +03005415 clgi();
5416
5417 local_irq_enable();
Avi Kivity36241b82006-12-22 01:05:20 -08005418
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01005419 /*
5420 * If this vCPU has touched SPEC_CTRL, restore the guest's value if
5421 * it's non-zero. Since vmentry is serialising on affected CPUs, there
5422 * is no need to worry about the conditional branch over the wrmsr
5423 * being speculatively taken.
5424 */
5425 if (svm->spec_ctrl)
Paolo Bonziniecb586b2018-02-22 16:43:17 +01005426 native_wrmsrl(MSR_IA32_SPEC_CTRL, svm->spec_ctrl);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01005427
Avi Kivity6aa8b732006-12-10 02:21:36 -08005428 asm volatile (
Avi Kivity74547662012-09-16 15:10:59 +03005429 "push %%" _ASM_BP "; \n\t"
5430 "mov %c[rbx](%[svm]), %%" _ASM_BX " \n\t"
5431 "mov %c[rcx](%[svm]), %%" _ASM_CX " \n\t"
5432 "mov %c[rdx](%[svm]), %%" _ASM_DX " \n\t"
5433 "mov %c[rsi](%[svm]), %%" _ASM_SI " \n\t"
5434 "mov %c[rdi](%[svm]), %%" _ASM_DI " \n\t"
5435 "mov %c[rbp](%[svm]), %%" _ASM_BP " \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005436#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005437 "mov %c[r8](%[svm]), %%r8 \n\t"
5438 "mov %c[r9](%[svm]), %%r9 \n\t"
5439 "mov %c[r10](%[svm]), %%r10 \n\t"
5440 "mov %c[r11](%[svm]), %%r11 \n\t"
5441 "mov %c[r12](%[svm]), %%r12 \n\t"
5442 "mov %c[r13](%[svm]), %%r13 \n\t"
5443 "mov %c[r14](%[svm]), %%r14 \n\t"
5444 "mov %c[r15](%[svm]), %%r15 \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005445#endif
5446
Avi Kivity6aa8b732006-12-10 02:21:36 -08005447 /* Enter guest mode */
Avi Kivity74547662012-09-16 15:10:59 +03005448 "push %%" _ASM_AX " \n\t"
5449 "mov %c[vmcb](%[svm]), %%" _ASM_AX " \n\t"
Avi Kivity4ecac3f2008-05-13 13:23:38 +03005450 __ex(SVM_VMLOAD) "\n\t"
5451 __ex(SVM_VMRUN) "\n\t"
5452 __ex(SVM_VMSAVE) "\n\t"
Avi Kivity74547662012-09-16 15:10:59 +03005453 "pop %%" _ASM_AX " \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005454
5455 /* Save guest registers, load host registers */
Avi Kivity74547662012-09-16 15:10:59 +03005456 "mov %%" _ASM_BX ", %c[rbx](%[svm]) \n\t"
5457 "mov %%" _ASM_CX ", %c[rcx](%[svm]) \n\t"
5458 "mov %%" _ASM_DX ", %c[rdx](%[svm]) \n\t"
5459 "mov %%" _ASM_SI ", %c[rsi](%[svm]) \n\t"
5460 "mov %%" _ASM_DI ", %c[rdi](%[svm]) \n\t"
5461 "mov %%" _ASM_BP ", %c[rbp](%[svm]) \n\t"
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005462#ifdef CONFIG_X86_64
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005463 "mov %%r8, %c[r8](%[svm]) \n\t"
5464 "mov %%r9, %c[r9](%[svm]) \n\t"
5465 "mov %%r10, %c[r10](%[svm]) \n\t"
5466 "mov %%r11, %c[r11](%[svm]) \n\t"
5467 "mov %%r12, %c[r12](%[svm]) \n\t"
5468 "mov %%r13, %c[r13](%[svm]) \n\t"
5469 "mov %%r14, %c[r14](%[svm]) \n\t"
5470 "mov %%r15, %c[r15](%[svm]) \n\t"
Avi Kivity6aa8b732006-12-10 02:21:36 -08005471#endif
Jim Mattson0cb5b302018-01-03 14:31:38 -08005472 /*
5473 * Clear host registers marked as clobbered to prevent
5474 * speculative use.
5475 */
5476 "xor %%" _ASM_BX ", %%" _ASM_BX " \n\t"
5477 "xor %%" _ASM_CX ", %%" _ASM_CX " \n\t"
5478 "xor %%" _ASM_DX ", %%" _ASM_DX " \n\t"
5479 "xor %%" _ASM_SI ", %%" _ASM_SI " \n\t"
5480 "xor %%" _ASM_DI ", %%" _ASM_DI " \n\t"
5481#ifdef CONFIG_X86_64
5482 "xor %%r8, %%r8 \n\t"
5483 "xor %%r9, %%r9 \n\t"
5484 "xor %%r10, %%r10 \n\t"
5485 "xor %%r11, %%r11 \n\t"
5486 "xor %%r12, %%r12 \n\t"
5487 "xor %%r13, %%r13 \n\t"
5488 "xor %%r14, %%r14 \n\t"
5489 "xor %%r15, %%r15 \n\t"
5490#endif
Avi Kivity74547662012-09-16 15:10:59 +03005491 "pop %%" _ASM_BP
Avi Kivity6aa8b732006-12-10 02:21:36 -08005492 :
Rusty Russellfb3f0f52007-07-27 17:16:56 +10005493 : [svm]"a"(svm),
Avi Kivity6aa8b732006-12-10 02:21:36 -08005494 [vmcb]"i"(offsetof(struct vcpu_svm, vmcb_pa)),
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005495 [rbx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBX])),
5496 [rcx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RCX])),
5497 [rdx]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDX])),
5498 [rsi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RSI])),
5499 [rdi]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RDI])),
5500 [rbp]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_RBP]))
Avi Kivity05b3e0c2006-12-13 00:33:45 -08005501#ifdef CONFIG_X86_64
Zhang Xiantaoad312c72007-12-13 23:50:52 +08005502 , [r8]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R8])),
5503 [r9]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R9])),
5504 [r10]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R10])),
5505 [r11]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R11])),
5506 [r12]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R12])),
5507 [r13]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R13])),
5508 [r14]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R14])),
5509 [r15]"i"(offsetof(struct vcpu_svm, vcpu.arch.regs[VCPU_REGS_R15]))
Avi Kivity6aa8b732006-12-10 02:21:36 -08005510#endif
Laurent Vivier54a08c02007-10-25 14:18:53 +02005511 : "cc", "memory"
5512#ifdef CONFIG_X86_64
Avi Kivity74547662012-09-16 15:10:59 +03005513 , "rbx", "rcx", "rdx", "rsi", "rdi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005514 , "r8", "r9", "r10", "r11" , "r12", "r13", "r14", "r15"
Avi Kivity74547662012-09-16 15:10:59 +03005515#else
5516 , "ebx", "ecx", "edx", "esi", "edi"
Laurent Vivier54a08c02007-10-25 14:18:53 +02005517#endif
5518 );
Avi Kivity6aa8b732006-12-10 02:21:36 -08005519
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01005520 /*
5521 * We do not use IBRS in the kernel. If this vCPU has used the
5522 * SPEC_CTRL MSR it may have left it on; save the value and
5523 * turn it off. This is much more efficient than blindly adding
5524 * it to the atomic save/restore list. Especially as the former
5525 * (Saving guest MSRs on vmexit) doesn't even exist in KVM.
5526 *
5527 * For non-nested case:
5528 * If the L01 MSR bitmap does not intercept the MSR, then we need to
5529 * save it.
5530 *
5531 * For nested case:
5532 * If the L02 MSR bitmap does not intercept the MSR, then we need to
5533 * save it.
5534 */
Paolo Bonzini946fbbc2018-02-22 16:43:18 +01005535 if (unlikely(!msr_write_intercepted(vcpu, MSR_IA32_SPEC_CTRL)))
Paolo Bonziniecb586b2018-02-22 16:43:17 +01005536 svm->spec_ctrl = native_read_msr(MSR_IA32_SPEC_CTRL);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01005537
5538 if (svm->spec_ctrl)
Paolo Bonziniecb586b2018-02-22 16:43:17 +01005539 native_wrmsrl(MSR_IA32_SPEC_CTRL, 0);
KarimAllah Ahmedb2ac58f2018-02-03 15:56:23 +01005540
David Woodhouse117cc7a2018-01-12 11:11:27 +00005541 /* Eliminate branch target predictions from guest mode */
5542 vmexit_fill_RSB();
5543
Avi Kivity82ca2d12010-10-21 12:20:34 +02005544#ifdef CONFIG_X86_64
5545 wrmsrl(MSR_GS_BASE, svm->host.gs_base);
5546#else
Avi Kivitydacccfd2010-10-21 12:20:33 +02005547 loadsegment(fs, svm->host.fs);
Avi Kivity831ca602011-03-08 16:09:51 +02005548#ifndef CONFIG_X86_32_LAZY_GS
5549 loadsegment(gs, svm->host.gs);
5550#endif
Avi Kivity9581d442010-10-19 16:46:55 +02005551#endif
Avi Kivity6aa8b732006-12-10 02:21:36 -08005552
5553 reload_tss(vcpu);
5554
Avi Kivity56ba47d2007-11-07 17:14:18 +02005555 local_irq_disable();
5556
Avi Kivity13c34e02010-10-21 12:20:31 +02005557 vcpu->arch.cr2 = svm->vmcb->save.cr2;
5558 vcpu->arch.regs[VCPU_REGS_RAX] = svm->vmcb->save.rax;
5559 vcpu->arch.regs[VCPU_REGS_RSP] = svm->vmcb->save.rsp;
5560 vcpu->arch.regs[VCPU_REGS_RIP] = svm->vmcb->save.rip;
5561
Joerg Roedel3781c012011-01-14 16:45:02 +01005562 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5563 kvm_before_handle_nmi(&svm->vcpu);
5564
5565 stgi();
5566
5567 /* Any pending NMI will happen here */
5568
5569 if (unlikely(svm->vmcb->control.exit_code == SVM_EXIT_NMI))
5570 kvm_after_handle_nmi(&svm->vcpu);
5571
Joerg Roedeld7bf8222008-04-16 16:51:17 +02005572 sync_cr8_to_lapic(vcpu);
5573
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005574 svm->next_rip = 0;
Gleb Natapov9222be12009-04-23 17:14:37 +03005575
Joerg Roedel38e5e922010-12-03 15:25:16 +01005576 svm->vmcb->control.tlb_ctl = TLB_CONTROL_DO_NOTHING;
5577
Gleb Natapov631bc482010-10-14 11:22:52 +02005578 /* if exit due to PF check for async PF */
5579 if (svm->vmcb->control.exit_code == SVM_EXIT_EXCP_BASE + PF_VECTOR)
Wanpeng Li1261bfa2017-07-13 18:30:40 -07005580 svm->vcpu.arch.apf.host_apf_reason = kvm_read_and_reset_pf_reason();
Gleb Natapov631bc482010-10-14 11:22:52 +02005581
Avi Kivity6de4f3a2009-05-31 22:58:47 +03005582 if (npt_enabled) {
5583 vcpu->arch.regs_avail &= ~(1 << VCPU_EXREG_PDPTR);
5584 vcpu->arch.regs_dirty &= ~(1 << VCPU_EXREG_PDPTR);
5585 }
Joerg Roedelfe5913e2010-05-17 14:43:34 +02005586
5587 /*
5588 * We need to handle MC intercepts here before the vcpu has a chance to
5589 * change the physical cpu
5590 */
5591 if (unlikely(svm->vmcb->control.exit_code ==
5592 SVM_EXIT_EXCP_BASE + MC_VECTOR))
5593 svm_handle_mce(svm);
Roedel, Joerg8d28fec2010-12-03 13:15:21 +01005594
5595 mark_all_clean(svm->vmcb);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005596}
Josh Poimboeufc207aee2017-06-28 10:11:06 -05005597STACK_FRAME_NON_STANDARD(svm_vcpu_run);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005598
Avi Kivity6aa8b732006-12-10 02:21:36 -08005599static void svm_set_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5600{
Gregory Haskinsa2fa3e92007-07-27 08:13:10 -04005601 struct vcpu_svm *svm = to_svm(vcpu);
5602
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005603 svm->vmcb->save.cr3 = __sme_set(root);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005604 mark_dirty(svm->vmcb, VMCB_CR);
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08005605 svm_flush_tlb(vcpu, true);
Avi Kivity6aa8b732006-12-10 02:21:36 -08005606}
5607
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005608static void set_tdp_cr3(struct kvm_vcpu *vcpu, unsigned long root)
5609{
5610 struct vcpu_svm *svm = to_svm(vcpu);
5611
Tom Lendackyd0ec49d2017-07-17 16:10:27 -05005612 svm->vmcb->control.nested_cr3 = __sme_set(root);
Joerg Roedelb2747162010-12-03 11:45:53 +01005613 mark_dirty(svm->vmcb, VMCB_NPT);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005614
5615 /* Also sync guest cr3 here in case we live migrate */
Avi Kivity9f8fe502010-12-05 17:30:00 +02005616 svm->vmcb->save.cr3 = kvm_read_cr3(vcpu);
Joerg Roedeldcca1a62010-12-03 11:45:54 +01005617 mark_dirty(svm->vmcb, VMCB_CR);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005618
Wanpeng Lic2ba05c2017-12-12 17:33:03 -08005619 svm_flush_tlb(vcpu, true);
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02005620}
5621
Avi Kivity6aa8b732006-12-10 02:21:36 -08005622static int is_disabled(void)
5623{
Joerg Roedel6031a612007-06-22 12:29:50 +03005624 u64 vm_cr;
5625
5626 rdmsrl(MSR_VM_CR, vm_cr);
5627 if (vm_cr & (1 << SVM_VM_CR_SVM_DISABLE))
5628 return 1;
5629
Avi Kivity6aa8b732006-12-10 02:21:36 -08005630 return 0;
5631}
5632
Ingo Molnar102d8322007-02-19 14:37:47 +02005633static void
5634svm_patch_hypercall(struct kvm_vcpu *vcpu, unsigned char *hypercall)
5635{
5636 /*
5637 * Patch in the VMMCALL instruction:
5638 */
5639 hypercall[0] = 0x0f;
5640 hypercall[1] = 0x01;
5641 hypercall[2] = 0xd9;
Ingo Molnar102d8322007-02-19 14:37:47 +02005642}
5643
Yang, Sheng002c7f72007-07-31 14:23:01 +03005644static void svm_check_processor_compat(void *rtn)
5645{
5646 *(int *)rtn = 0;
5647}
5648
Avi Kivity774ead32007-12-26 13:57:04 +02005649static bool svm_cpu_has_accelerated_tpr(void)
5650{
5651 return false;
5652}
5653
Paolo Bonzini6d396b52015-04-01 14:25:33 +02005654static bool svm_has_high_real_mode_segbase(void)
5655{
5656 return true;
5657}
5658
Paolo Bonzinifc07e762015-10-01 13:20:22 +02005659static u64 svm_get_mt_mask(struct kvm_vcpu *vcpu, gfn_t gfn, bool is_mmio)
5660{
5661 return 0;
5662}
5663
Sheng Yang0e851882009-12-18 16:48:46 +08005664static void svm_cpuid_update(struct kvm_vcpu *vcpu)
5665{
Joerg Roedel6092d3d2015-10-14 15:10:54 +02005666 struct vcpu_svm *svm = to_svm(vcpu);
5667
5668 /* Update nrips enabled cache */
Radim Krčmářd6321d42017-08-05 00:12:49 +02005669 svm->nrips_enabled = !!guest_cpuid_has(&svm->vcpu, X86_FEATURE_NRIPS);
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005670
5671 if (!kvm_vcpu_apicv_active(vcpu))
5672 return;
5673
Radim Krčmář1b4d56b2017-08-05 00:12:50 +02005674 guest_cpuid_clear(vcpu, X86_FEATURE_X2APIC);
Sheng Yang0e851882009-12-18 16:48:46 +08005675}
5676
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005677static void svm_set_supported_cpuid(u32 func, struct kvm_cpuid_entry2 *entry)
5678{
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005679 switch (func) {
Suravee Suthikulpanit46781ea2016-05-04 14:09:50 -05005680 case 0x1:
5681 if (avic)
5682 entry->ecx &= ~bit(X86_FEATURE_X2APIC);
5683 break;
Joerg Roedel4c62a2d2010-09-10 17:31:06 +02005684 case 0x80000001:
5685 if (nested)
5686 entry->ecx |= (1 << 2); /* Set SVM bit */
5687 break;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005688 case 0x8000000A:
5689 entry->eax = 1; /* SVM revision 1 */
5690 entry->ebx = 8; /* Lets support 8 ASIDs in case we add proper
5691 ASID emulation to nested SVM */
5692 entry->ecx = 0; /* Reserved */
Joerg Roedel7a190662010-07-27 18:14:21 +02005693 entry->edx = 0; /* Per default do not support any
5694 additional features */
5695
5696 /* Support next_rip if host supports it */
Avi Kivity2a6b20b2010-11-09 16:15:42 +02005697 if (boot_cpu_has(X86_FEATURE_NRIPS))
Joerg Roedel7a190662010-07-27 18:14:21 +02005698 entry->edx |= SVM_FEATURE_NRIP;
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005699
Joerg Roedel3d4aeaa2010-09-10 17:31:05 +02005700 /* Support NPT for the guest if enabled */
5701 if (npt_enabled)
5702 entry->edx |= SVM_FEATURE_NPT;
5703
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005704 break;
Brijesh Singh8765d752017-12-04 10:57:25 -06005705 case 0x8000001F:
5706 /* Support memory encryption cpuid if host supports it */
5707 if (boot_cpu_has(X86_FEATURE_SEV))
5708 cpuid(0x8000001f, &entry->eax, &entry->ebx,
5709 &entry->ecx, &entry->edx);
5710
Joerg Roedelc2c63a42010-04-22 12:33:12 +02005711 }
Joerg Roedeld4330ef2010-04-22 12:33:11 +02005712}
5713
Sheng Yang17cc3932010-01-05 19:02:27 +08005714static int svm_get_lpage_level(void)
Joerg Roedel344f4142009-07-27 16:30:48 +02005715{
Sheng Yang17cc3932010-01-05 19:02:27 +08005716 return PT_PDPE_LEVEL;
Joerg Roedel344f4142009-07-27 16:30:48 +02005717}
5718
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005719static bool svm_rdtscp_supported(void)
5720{
Paolo Bonzini46896c72015-11-12 14:49:16 +01005721 return boot_cpu_has(X86_FEATURE_RDTSCP);
Sheng Yang4e47c7a2009-12-18 16:48:47 +08005722}
5723
Mao, Junjiead756a12012-07-02 01:18:48 +00005724static bool svm_invpcid_supported(void)
5725{
5726 return false;
5727}
5728
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01005729static bool svm_mpx_supported(void)
5730{
5731 return false;
5732}
5733
Wanpeng Li55412b22014-12-02 19:21:30 +08005734static bool svm_xsaves_supported(void)
5735{
5736 return false;
5737}
5738
Paolo Bonzini66336ca2016-07-12 10:36:41 +02005739static bool svm_umip_emulated(void)
5740{
5741 return false;
5742}
5743
Sheng Yangf5f48ee2010-06-30 12:25:15 +08005744static bool svm_has_wbinvd_exit(void)
5745{
5746 return true;
5747}
5748
Joerg Roedel80612522011-04-04 12:39:33 +02005749#define PRE_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005750 .stage = X86_ICPT_PRE_EXCEPT, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005751#define POST_EX(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005752 .stage = X86_ICPT_POST_EXCEPT, }
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005753#define POST_MEM(exit) { .exit_code = (exit), \
Avi Kivity40e19b52011-04-21 12:35:41 +03005754 .stage = X86_ICPT_POST_MEMACCESS, }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005755
Mathias Krause09941fb2012-08-30 01:30:20 +02005756static const struct __x86_intercept {
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005757 u32 exit_code;
5758 enum x86_intercept_stage stage;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005759} x86_intercept_map[] = {
5760 [x86_intercept_cr_read] = POST_EX(SVM_EXIT_READ_CR0),
5761 [x86_intercept_cr_write] = POST_EX(SVM_EXIT_WRITE_CR0),
5762 [x86_intercept_clts] = POST_EX(SVM_EXIT_WRITE_CR0),
5763 [x86_intercept_lmsw] = POST_EX(SVM_EXIT_WRITE_CR0),
5764 [x86_intercept_smsw] = POST_EX(SVM_EXIT_READ_CR0),
Joerg Roedel3b88e412011-04-04 12:39:29 +02005765 [x86_intercept_dr_read] = POST_EX(SVM_EXIT_READ_DR0),
5766 [x86_intercept_dr_write] = POST_EX(SVM_EXIT_WRITE_DR0),
Joerg Roedeldee6bb72011-04-04 12:39:30 +02005767 [x86_intercept_sldt] = POST_EX(SVM_EXIT_LDTR_READ),
5768 [x86_intercept_str] = POST_EX(SVM_EXIT_TR_READ),
5769 [x86_intercept_lldt] = POST_EX(SVM_EXIT_LDTR_WRITE),
5770 [x86_intercept_ltr] = POST_EX(SVM_EXIT_TR_WRITE),
5771 [x86_intercept_sgdt] = POST_EX(SVM_EXIT_GDTR_READ),
5772 [x86_intercept_sidt] = POST_EX(SVM_EXIT_IDTR_READ),
5773 [x86_intercept_lgdt] = POST_EX(SVM_EXIT_GDTR_WRITE),
5774 [x86_intercept_lidt] = POST_EX(SVM_EXIT_IDTR_WRITE),
Joerg Roedel01de8b02011-04-04 12:39:31 +02005775 [x86_intercept_vmrun] = POST_EX(SVM_EXIT_VMRUN),
5776 [x86_intercept_vmmcall] = POST_EX(SVM_EXIT_VMMCALL),
5777 [x86_intercept_vmload] = POST_EX(SVM_EXIT_VMLOAD),
5778 [x86_intercept_vmsave] = POST_EX(SVM_EXIT_VMSAVE),
5779 [x86_intercept_stgi] = POST_EX(SVM_EXIT_STGI),
5780 [x86_intercept_clgi] = POST_EX(SVM_EXIT_CLGI),
5781 [x86_intercept_skinit] = POST_EX(SVM_EXIT_SKINIT),
5782 [x86_intercept_invlpga] = POST_EX(SVM_EXIT_INVLPGA),
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005783 [x86_intercept_rdtscp] = POST_EX(SVM_EXIT_RDTSCP),
5784 [x86_intercept_monitor] = POST_MEM(SVM_EXIT_MONITOR),
5785 [x86_intercept_mwait] = POST_EX(SVM_EXIT_MWAIT),
Joerg Roedel80612522011-04-04 12:39:33 +02005786 [x86_intercept_invlpg] = POST_EX(SVM_EXIT_INVLPG),
5787 [x86_intercept_invd] = POST_EX(SVM_EXIT_INVD),
5788 [x86_intercept_wbinvd] = POST_EX(SVM_EXIT_WBINVD),
5789 [x86_intercept_wrmsr] = POST_EX(SVM_EXIT_MSR),
5790 [x86_intercept_rdtsc] = POST_EX(SVM_EXIT_RDTSC),
5791 [x86_intercept_rdmsr] = POST_EX(SVM_EXIT_MSR),
5792 [x86_intercept_rdpmc] = POST_EX(SVM_EXIT_RDPMC),
5793 [x86_intercept_cpuid] = PRE_EX(SVM_EXIT_CPUID),
5794 [x86_intercept_rsm] = PRE_EX(SVM_EXIT_RSM),
Joerg Roedelbf608f82011-04-04 12:39:34 +02005795 [x86_intercept_pause] = PRE_EX(SVM_EXIT_PAUSE),
5796 [x86_intercept_pushf] = PRE_EX(SVM_EXIT_PUSHF),
5797 [x86_intercept_popf] = PRE_EX(SVM_EXIT_POPF),
5798 [x86_intercept_intn] = PRE_EX(SVM_EXIT_SWINT),
5799 [x86_intercept_iret] = PRE_EX(SVM_EXIT_IRET),
5800 [x86_intercept_icebp] = PRE_EX(SVM_EXIT_ICEBP),
5801 [x86_intercept_hlt] = POST_EX(SVM_EXIT_HLT),
Joerg Roedelf6511932011-04-04 12:39:35 +02005802 [x86_intercept_in] = POST_EX(SVM_EXIT_IOIO),
5803 [x86_intercept_ins] = POST_EX(SVM_EXIT_IOIO),
5804 [x86_intercept_out] = POST_EX(SVM_EXIT_IOIO),
5805 [x86_intercept_outs] = POST_EX(SVM_EXIT_IOIO),
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005806};
5807
Joerg Roedel80612522011-04-04 12:39:33 +02005808#undef PRE_EX
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005809#undef POST_EX
Joerg Roedeld7eb8202011-04-04 12:39:32 +02005810#undef POST_MEM
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005811
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005812static int svm_check_intercept(struct kvm_vcpu *vcpu,
5813 struct x86_instruction_info *info,
5814 enum x86_intercept_stage stage)
5815{
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005816 struct vcpu_svm *svm = to_svm(vcpu);
5817 int vmexit, ret = X86EMUL_CONTINUE;
5818 struct __x86_intercept icpt_info;
5819 struct vmcb *vmcb = svm->vmcb;
5820
5821 if (info->intercept >= ARRAY_SIZE(x86_intercept_map))
5822 goto out;
5823
5824 icpt_info = x86_intercept_map[info->intercept];
5825
Avi Kivity40e19b52011-04-21 12:35:41 +03005826 if (stage != icpt_info.stage)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005827 goto out;
5828
5829 switch (icpt_info.exit_code) {
5830 case SVM_EXIT_READ_CR0:
5831 if (info->intercept == x86_intercept_cr_read)
5832 icpt_info.exit_code += info->modrm_reg;
5833 break;
5834 case SVM_EXIT_WRITE_CR0: {
5835 unsigned long cr0, val;
5836 u64 intercept;
5837
5838 if (info->intercept == x86_intercept_cr_write)
5839 icpt_info.exit_code += info->modrm_reg;
5840
Jan Kiszka62baf442014-06-29 21:55:53 +02005841 if (icpt_info.exit_code != SVM_EXIT_WRITE_CR0 ||
5842 info->intercept == x86_intercept_clts)
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005843 break;
5844
5845 intercept = svm->nested.intercept;
5846
5847 if (!(intercept & (1ULL << INTERCEPT_SELECTIVE_CR0)))
5848 break;
5849
5850 cr0 = vcpu->arch.cr0 & ~SVM_CR0_SELECTIVE_MASK;
5851 val = info->src_val & ~SVM_CR0_SELECTIVE_MASK;
5852
5853 if (info->intercept == x86_intercept_lmsw) {
5854 cr0 &= 0xfUL;
5855 val &= 0xfUL;
5856 /* lmsw can't clear PE - catch this here */
5857 if (cr0 & X86_CR0_PE)
5858 val |= X86_CR0_PE;
5859 }
5860
5861 if (cr0 ^ val)
5862 icpt_info.exit_code = SVM_EXIT_CR0_SEL_WRITE;
5863
5864 break;
5865 }
Joerg Roedel3b88e412011-04-04 12:39:29 +02005866 case SVM_EXIT_READ_DR0:
5867 case SVM_EXIT_WRITE_DR0:
5868 icpt_info.exit_code += info->modrm_reg;
5869 break;
Joerg Roedel80612522011-04-04 12:39:33 +02005870 case SVM_EXIT_MSR:
5871 if (info->intercept == x86_intercept_wrmsr)
5872 vmcb->control.exit_info_1 = 1;
5873 else
5874 vmcb->control.exit_info_1 = 0;
5875 break;
Joerg Roedelbf608f82011-04-04 12:39:34 +02005876 case SVM_EXIT_PAUSE:
5877 /*
5878 * We get this for NOP only, but pause
5879 * is rep not, check this here
5880 */
5881 if (info->rep_prefix != REPE_PREFIX)
5882 goto out;
Jan H. Schönherr49a8afc2017-09-05 23:58:44 +02005883 break;
Joerg Roedelf6511932011-04-04 12:39:35 +02005884 case SVM_EXIT_IOIO: {
5885 u64 exit_info;
5886 u32 bytes;
5887
Joerg Roedelf6511932011-04-04 12:39:35 +02005888 if (info->intercept == x86_intercept_in ||
5889 info->intercept == x86_intercept_ins) {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005890 exit_info = ((info->src_val & 0xffff) << 16) |
5891 SVM_IOIO_TYPE_MASK;
Joerg Roedelf6511932011-04-04 12:39:35 +02005892 bytes = info->dst_bytes;
Jan Kiszka6493f152014-06-30 11:07:05 +02005893 } else {
Jan Kiszka6cbc5f52014-06-30 12:52:55 +02005894 exit_info = (info->dst_val & 0xffff) << 16;
Jan Kiszka6493f152014-06-30 11:07:05 +02005895 bytes = info->src_bytes;
Joerg Roedelf6511932011-04-04 12:39:35 +02005896 }
5897
5898 if (info->intercept == x86_intercept_outs ||
5899 info->intercept == x86_intercept_ins)
5900 exit_info |= SVM_IOIO_STR_MASK;
5901
5902 if (info->rep_prefix)
5903 exit_info |= SVM_IOIO_REP_MASK;
5904
5905 bytes = min(bytes, 4u);
5906
5907 exit_info |= bytes << SVM_IOIO_SIZE_SHIFT;
5908
5909 exit_info |= (u32)info->ad_bytes << (SVM_IOIO_ASIZE_SHIFT - 1);
5910
5911 vmcb->control.exit_info_1 = exit_info;
5912 vmcb->control.exit_info_2 = info->next_rip;
5913
5914 break;
5915 }
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005916 default:
5917 break;
5918 }
5919
Bandan Dasf1047652015-06-11 02:05:33 -04005920 /* TODO: Advertise NRIPS to guest hypervisor unconditionally */
5921 if (static_cpu_has(X86_FEATURE_NRIPS))
5922 vmcb->control.next_rip = info->next_rip;
Joerg Roedelcfec82c2011-04-04 12:39:28 +02005923 vmcb->control.exit_code = icpt_info.exit_code;
5924 vmexit = nested_svm_exit_handled(svm);
5925
5926 ret = (vmexit == NESTED_EXIT_DONE) ? X86EMUL_INTERCEPTED
5927 : X86EMUL_CONTINUE;
5928
5929out:
5930 return ret;
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02005931}
5932
Yang Zhanga547c6d2013-04-11 19:25:10 +08005933static void svm_handle_external_intr(struct kvm_vcpu *vcpu)
5934{
5935 local_irq_enable();
Paolo Bonzinif2485b32016-06-15 15:23:11 +02005936 /*
5937 * We must have an instruction with interrupts enabled, so
5938 * the timer interrupt isn't delayed by the interrupt shadow.
5939 */
5940 asm("nop");
5941 local_irq_disable();
Yang Zhanga547c6d2013-04-11 19:25:10 +08005942}
5943
Radim Krčmářae97a3b2014-08-21 18:08:06 +02005944static void svm_sched_in(struct kvm_vcpu *vcpu, int cpu)
5945{
5946}
5947
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05005948static inline void avic_post_state_restore(struct kvm_vcpu *vcpu)
5949{
5950 if (avic_handle_apic_id_update(vcpu) != 0)
5951 return;
5952 if (avic_handle_dfr_update(vcpu) != 0)
5953 return;
5954 avic_handle_ldr_update(vcpu);
5955}
5956
Borislav Petkov74f16902017-03-26 23:51:24 +02005957static void svm_setup_mce(struct kvm_vcpu *vcpu)
5958{
5959 /* [63:9] are reserved. */
5960 vcpu->arch.mcg_cap &= 0x1ff;
5961}
5962
Ladi Prosek72d7b372017-10-11 16:54:41 +02005963static int svm_smi_allowed(struct kvm_vcpu *vcpu)
5964{
Ladi Prosek05cade72017-10-11 16:54:45 +02005965 struct vcpu_svm *svm = to_svm(vcpu);
5966
5967 /* Per APM Vol.2 15.22.2 "Response to SMI" */
5968 if (!gif_set(svm))
5969 return 0;
5970
5971 if (is_guest_mode(&svm->vcpu) &&
5972 svm->nested.intercept & (1ULL << INTERCEPT_SMI)) {
5973 /* TODO: Might need to set exit_info_1 and exit_info_2 here */
5974 svm->vmcb->control.exit_code = SVM_EXIT_SMI;
5975 svm->nested.exit_required = true;
5976 return 0;
5977 }
5978
Ladi Prosek72d7b372017-10-11 16:54:41 +02005979 return 1;
5980}
5981
Ladi Prosek0234bf82017-10-11 16:54:40 +02005982static int svm_pre_enter_smm(struct kvm_vcpu *vcpu, char *smstate)
5983{
Ladi Prosek05cade72017-10-11 16:54:45 +02005984 struct vcpu_svm *svm = to_svm(vcpu);
5985 int ret;
5986
5987 if (is_guest_mode(vcpu)) {
5988 /* FED8h - SVM Guest */
5989 put_smstate(u64, smstate, 0x7ed8, 1);
5990 /* FEE0h - SVM Guest VMCB Physical Address */
5991 put_smstate(u64, smstate, 0x7ee0, svm->nested.vmcb);
5992
5993 svm->vmcb->save.rax = vcpu->arch.regs[VCPU_REGS_RAX];
5994 svm->vmcb->save.rsp = vcpu->arch.regs[VCPU_REGS_RSP];
5995 svm->vmcb->save.rip = vcpu->arch.regs[VCPU_REGS_RIP];
5996
5997 ret = nested_svm_vmexit(svm);
5998 if (ret)
5999 return ret;
6000 }
Ladi Prosek0234bf82017-10-11 16:54:40 +02006001 return 0;
6002}
6003
6004static int svm_pre_leave_smm(struct kvm_vcpu *vcpu, u64 smbase)
6005{
Ladi Prosek05cade72017-10-11 16:54:45 +02006006 struct vcpu_svm *svm = to_svm(vcpu);
6007 struct vmcb *nested_vmcb;
6008 struct page *page;
6009 struct {
6010 u64 guest;
6011 u64 vmcb;
6012 } svm_state_save;
6013 int ret;
6014
6015 ret = kvm_vcpu_read_guest(vcpu, smbase + 0xfed8, &svm_state_save,
6016 sizeof(svm_state_save));
6017 if (ret)
6018 return ret;
6019
6020 if (svm_state_save.guest) {
6021 vcpu->arch.hflags &= ~HF_SMM_MASK;
6022 nested_vmcb = nested_svm_map(svm, svm_state_save.vmcb, &page);
6023 if (nested_vmcb)
6024 enter_svm_guest_mode(svm, svm_state_save.vmcb, nested_vmcb, page);
6025 else
6026 ret = 1;
6027 vcpu->arch.hflags |= HF_SMM_MASK;
6028 }
6029 return ret;
Ladi Prosek0234bf82017-10-11 16:54:40 +02006030}
6031
Ladi Prosekcc3d9672017-10-17 16:02:39 +02006032static int enable_smi_window(struct kvm_vcpu *vcpu)
6033{
6034 struct vcpu_svm *svm = to_svm(vcpu);
6035
6036 if (!gif_set(svm)) {
6037 if (vgif_enabled(svm))
6038 set_intercept(svm, INTERCEPT_STGI);
6039 /* STGI will cause a vm exit */
6040 return 1;
6041 }
6042 return 0;
6043}
6044
Brijesh Singh1654efc2017-12-04 10:57:34 -06006045static int sev_asid_new(void)
6046{
6047 int pos;
6048
6049 /*
6050 * SEV-enabled guest must use asid from min_sev_asid to max_sev_asid.
6051 */
6052 pos = find_next_zero_bit(sev_asid_bitmap, max_sev_asid, min_sev_asid - 1);
6053 if (pos >= max_sev_asid)
6054 return -EBUSY;
6055
6056 set_bit(pos, sev_asid_bitmap);
6057 return pos + 1;
6058}
6059
6060static int sev_guest_init(struct kvm *kvm, struct kvm_sev_cmd *argp)
6061{
6062 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6063 int asid, ret;
6064
6065 ret = -EBUSY;
6066 asid = sev_asid_new();
6067 if (asid < 0)
6068 return ret;
6069
6070 ret = sev_platform_init(&argp->error);
6071 if (ret)
6072 goto e_free;
6073
6074 sev->active = true;
6075 sev->asid = asid;
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006076 INIT_LIST_HEAD(&sev->regions_list);
Brijesh Singh1654efc2017-12-04 10:57:34 -06006077
6078 return 0;
6079
6080e_free:
6081 __sev_asid_free(asid);
6082 return ret;
6083}
6084
Brijesh Singh59414c92017-12-04 10:57:35 -06006085static int sev_bind_asid(struct kvm *kvm, unsigned int handle, int *error)
6086{
6087 struct sev_data_activate *data;
6088 int asid = sev_get_asid(kvm);
6089 int ret;
6090
6091 wbinvd_on_all_cpus();
6092
6093 ret = sev_guest_df_flush(error);
6094 if (ret)
6095 return ret;
6096
6097 data = kzalloc(sizeof(*data), GFP_KERNEL);
6098 if (!data)
6099 return -ENOMEM;
6100
6101 /* activate ASID on the given handle */
6102 data->handle = handle;
6103 data->asid = asid;
6104 ret = sev_guest_activate(data, error);
6105 kfree(data);
6106
6107 return ret;
6108}
6109
Brijesh Singh89c50582017-12-04 10:57:35 -06006110static int __sev_issue_cmd(int fd, int id, void *data, int *error)
Brijesh Singh59414c92017-12-04 10:57:35 -06006111{
6112 struct fd f;
6113 int ret;
6114
6115 f = fdget(fd);
6116 if (!f.file)
6117 return -EBADF;
6118
6119 ret = sev_issue_cmd_external_user(f.file, id, data, error);
6120
6121 fdput(f);
6122 return ret;
6123}
6124
Brijesh Singh89c50582017-12-04 10:57:35 -06006125static int sev_issue_cmd(struct kvm *kvm, int id, void *data, int *error)
6126{
6127 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6128
6129 return __sev_issue_cmd(sev->fd, id, data, error);
6130}
6131
Brijesh Singh59414c92017-12-04 10:57:35 -06006132static int sev_launch_start(struct kvm *kvm, struct kvm_sev_cmd *argp)
6133{
6134 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6135 struct sev_data_launch_start *start;
6136 struct kvm_sev_launch_start params;
6137 void *dh_blob, *session_blob;
6138 int *error = &argp->error;
6139 int ret;
6140
6141 if (!sev_guest(kvm))
6142 return -ENOTTY;
6143
6144 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6145 return -EFAULT;
6146
6147 start = kzalloc(sizeof(*start), GFP_KERNEL);
6148 if (!start)
6149 return -ENOMEM;
6150
6151 dh_blob = NULL;
6152 if (params.dh_uaddr) {
6153 dh_blob = psp_copy_user_blob(params.dh_uaddr, params.dh_len);
6154 if (IS_ERR(dh_blob)) {
6155 ret = PTR_ERR(dh_blob);
6156 goto e_free;
6157 }
6158
6159 start->dh_cert_address = __sme_set(__pa(dh_blob));
6160 start->dh_cert_len = params.dh_len;
6161 }
6162
6163 session_blob = NULL;
6164 if (params.session_uaddr) {
6165 session_blob = psp_copy_user_blob(params.session_uaddr, params.session_len);
6166 if (IS_ERR(session_blob)) {
6167 ret = PTR_ERR(session_blob);
6168 goto e_free_dh;
6169 }
6170
6171 start->session_address = __sme_set(__pa(session_blob));
6172 start->session_len = params.session_len;
6173 }
6174
6175 start->handle = params.handle;
6176 start->policy = params.policy;
6177
6178 /* create memory encryption context */
Brijesh Singh89c50582017-12-04 10:57:35 -06006179 ret = __sev_issue_cmd(argp->sev_fd, SEV_CMD_LAUNCH_START, start, error);
Brijesh Singh59414c92017-12-04 10:57:35 -06006180 if (ret)
6181 goto e_free_session;
6182
6183 /* Bind ASID to this guest */
6184 ret = sev_bind_asid(kvm, start->handle, error);
6185 if (ret)
6186 goto e_free_session;
6187
6188 /* return handle to userspace */
6189 params.handle = start->handle;
6190 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params))) {
6191 sev_unbind_asid(kvm, start->handle);
6192 ret = -EFAULT;
6193 goto e_free_session;
6194 }
6195
6196 sev->handle = start->handle;
6197 sev->fd = argp->sev_fd;
6198
6199e_free_session:
6200 kfree(session_blob);
6201e_free_dh:
6202 kfree(dh_blob);
6203e_free:
6204 kfree(start);
6205 return ret;
6206}
6207
Brijesh Singh89c50582017-12-04 10:57:35 -06006208static int get_num_contig_pages(int idx, struct page **inpages,
6209 unsigned long npages)
6210{
6211 unsigned long paddr, next_paddr;
6212 int i = idx + 1, pages = 1;
6213
6214 /* find the number of contiguous pages starting from idx */
6215 paddr = __sme_page_pa(inpages[idx]);
6216 while (i < npages) {
6217 next_paddr = __sme_page_pa(inpages[i++]);
6218 if ((paddr + PAGE_SIZE) == next_paddr) {
6219 pages++;
6220 paddr = next_paddr;
6221 continue;
6222 }
6223 break;
6224 }
6225
6226 return pages;
6227}
6228
6229static int sev_launch_update_data(struct kvm *kvm, struct kvm_sev_cmd *argp)
6230{
6231 unsigned long vaddr, vaddr_end, next_vaddr, npages, size;
6232 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6233 struct kvm_sev_launch_update_data params;
6234 struct sev_data_launch_update_data *data;
6235 struct page **inpages;
6236 int i, ret, pages;
6237
6238 if (!sev_guest(kvm))
6239 return -ENOTTY;
6240
6241 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6242 return -EFAULT;
6243
6244 data = kzalloc(sizeof(*data), GFP_KERNEL);
6245 if (!data)
6246 return -ENOMEM;
6247
6248 vaddr = params.uaddr;
6249 size = params.len;
6250 vaddr_end = vaddr + size;
6251
6252 /* Lock the user memory. */
6253 inpages = sev_pin_memory(kvm, vaddr, size, &npages, 1);
6254 if (!inpages) {
6255 ret = -ENOMEM;
6256 goto e_free;
6257 }
6258
6259 /*
6260 * The LAUNCH_UPDATE command will perform in-place encryption of the
6261 * memory content (i.e it will write the same memory region with C=1).
6262 * It's possible that the cache may contain the data with C=0, i.e.,
6263 * unencrypted so invalidate it first.
6264 */
6265 sev_clflush_pages(inpages, npages);
6266
6267 for (i = 0; vaddr < vaddr_end; vaddr = next_vaddr, i += pages) {
6268 int offset, len;
6269
6270 /*
6271 * If the user buffer is not page-aligned, calculate the offset
6272 * within the page.
6273 */
6274 offset = vaddr & (PAGE_SIZE - 1);
6275
6276 /* Calculate the number of pages that can be encrypted in one go. */
6277 pages = get_num_contig_pages(i, inpages, npages);
6278
6279 len = min_t(size_t, ((pages * PAGE_SIZE) - offset), size);
6280
6281 data->handle = sev->handle;
6282 data->len = len;
6283 data->address = __sme_page_pa(inpages[i]) + offset;
6284 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_DATA, data, &argp->error);
6285 if (ret)
6286 goto e_unpin;
6287
6288 size -= len;
6289 next_vaddr = vaddr + len;
6290 }
6291
6292e_unpin:
6293 /* content of memory is updated, mark pages dirty */
6294 for (i = 0; i < npages; i++) {
6295 set_page_dirty_lock(inpages[i]);
6296 mark_page_accessed(inpages[i]);
6297 }
6298 /* unlock the user pages */
6299 sev_unpin_memory(kvm, inpages, npages);
6300e_free:
6301 kfree(data);
6302 return ret;
6303}
6304
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006305static int sev_launch_measure(struct kvm *kvm, struct kvm_sev_cmd *argp)
6306{
Brijesh Singh3e233382018-02-23 12:36:50 -06006307 void __user *measure = (void __user *)(uintptr_t)argp->data;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006308 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6309 struct sev_data_launch_measure *data;
6310 struct kvm_sev_launch_measure params;
Brijesh Singh3e233382018-02-23 12:36:50 -06006311 void __user *p = NULL;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006312 void *blob = NULL;
6313 int ret;
6314
6315 if (!sev_guest(kvm))
6316 return -ENOTTY;
6317
Brijesh Singh3e233382018-02-23 12:36:50 -06006318 if (copy_from_user(&params, measure, sizeof(params)))
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006319 return -EFAULT;
6320
6321 data = kzalloc(sizeof(*data), GFP_KERNEL);
6322 if (!data)
6323 return -ENOMEM;
6324
6325 /* User wants to query the blob length */
6326 if (!params.len)
6327 goto cmd;
6328
Brijesh Singh3e233382018-02-23 12:36:50 -06006329 p = (void __user *)(uintptr_t)params.uaddr;
6330 if (p) {
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006331 if (params.len > SEV_FW_BLOB_MAX_SIZE) {
6332 ret = -EINVAL;
6333 goto e_free;
6334 }
6335
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006336 ret = -ENOMEM;
6337 blob = kmalloc(params.len, GFP_KERNEL);
6338 if (!blob)
6339 goto e_free;
6340
6341 data->address = __psp_pa(blob);
6342 data->len = params.len;
6343 }
6344
6345cmd:
6346 data->handle = sev->handle;
6347 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_MEASURE, data, &argp->error);
6348
6349 /*
6350 * If we query the session length, FW responded with expected data.
6351 */
6352 if (!params.len)
6353 goto done;
6354
6355 if (ret)
6356 goto e_free_blob;
6357
6358 if (blob) {
Brijesh Singh3e233382018-02-23 12:36:50 -06006359 if (copy_to_user(p, blob, params.len))
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006360 ret = -EFAULT;
6361 }
6362
6363done:
6364 params.len = data->len;
Brijesh Singh3e233382018-02-23 12:36:50 -06006365 if (copy_to_user(measure, &params, sizeof(params)))
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006366 ret = -EFAULT;
6367e_free_blob:
6368 kfree(blob);
6369e_free:
6370 kfree(data);
6371 return ret;
6372}
6373
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006374static int sev_launch_finish(struct kvm *kvm, struct kvm_sev_cmd *argp)
6375{
6376 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6377 struct sev_data_launch_finish *data;
6378 int ret;
6379
6380 if (!sev_guest(kvm))
6381 return -ENOTTY;
6382
6383 data = kzalloc(sizeof(*data), GFP_KERNEL);
6384 if (!data)
6385 return -ENOMEM;
6386
6387 data->handle = sev->handle;
6388 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_FINISH, data, &argp->error);
6389
6390 kfree(data);
6391 return ret;
6392}
6393
Brijesh Singh255d9e72017-12-04 10:57:37 -06006394static int sev_guest_status(struct kvm *kvm, struct kvm_sev_cmd *argp)
6395{
6396 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6397 struct kvm_sev_guest_status params;
6398 struct sev_data_guest_status *data;
6399 int ret;
6400
6401 if (!sev_guest(kvm))
6402 return -ENOTTY;
6403
6404 data = kzalloc(sizeof(*data), GFP_KERNEL);
6405 if (!data)
6406 return -ENOMEM;
6407
6408 data->handle = sev->handle;
6409 ret = sev_issue_cmd(kvm, SEV_CMD_GUEST_STATUS, data, &argp->error);
6410 if (ret)
6411 goto e_free;
6412
6413 params.policy = data->policy;
6414 params.state = data->state;
6415 params.handle = data->handle;
6416
6417 if (copy_to_user((void __user *)(uintptr_t)argp->data, &params, sizeof(params)))
6418 ret = -EFAULT;
6419e_free:
6420 kfree(data);
6421 return ret;
6422}
6423
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006424static int __sev_issue_dbg_cmd(struct kvm *kvm, unsigned long src,
6425 unsigned long dst, int size,
6426 int *error, bool enc)
6427{
6428 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6429 struct sev_data_dbg *data;
6430 int ret;
6431
6432 data = kzalloc(sizeof(*data), GFP_KERNEL);
6433 if (!data)
6434 return -ENOMEM;
6435
6436 data->handle = sev->handle;
6437 data->dst_addr = dst;
6438 data->src_addr = src;
6439 data->len = size;
6440
6441 ret = sev_issue_cmd(kvm,
6442 enc ? SEV_CMD_DBG_ENCRYPT : SEV_CMD_DBG_DECRYPT,
6443 data, error);
6444 kfree(data);
6445 return ret;
6446}
6447
6448static int __sev_dbg_decrypt(struct kvm *kvm, unsigned long src_paddr,
6449 unsigned long dst_paddr, int sz, int *err)
6450{
6451 int offset;
6452
6453 /*
6454 * Its safe to read more than we are asked, caller should ensure that
6455 * destination has enough space.
6456 */
6457 src_paddr = round_down(src_paddr, 16);
6458 offset = src_paddr & 15;
6459 sz = round_up(sz + offset, 16);
6460
6461 return __sev_issue_dbg_cmd(kvm, src_paddr, dst_paddr, sz, err, false);
6462}
6463
6464static int __sev_dbg_decrypt_user(struct kvm *kvm, unsigned long paddr,
6465 unsigned long __user dst_uaddr,
6466 unsigned long dst_paddr,
6467 int size, int *err)
6468{
6469 struct page *tpage = NULL;
6470 int ret, offset;
6471
6472 /* if inputs are not 16-byte then use intermediate buffer */
6473 if (!IS_ALIGNED(dst_paddr, 16) ||
6474 !IS_ALIGNED(paddr, 16) ||
6475 !IS_ALIGNED(size, 16)) {
6476 tpage = (void *)alloc_page(GFP_KERNEL);
6477 if (!tpage)
6478 return -ENOMEM;
6479
6480 dst_paddr = __sme_page_pa(tpage);
6481 }
6482
6483 ret = __sev_dbg_decrypt(kvm, paddr, dst_paddr, size, err);
6484 if (ret)
6485 goto e_free;
6486
6487 if (tpage) {
6488 offset = paddr & 15;
6489 if (copy_to_user((void __user *)(uintptr_t)dst_uaddr,
6490 page_address(tpage) + offset, size))
6491 ret = -EFAULT;
6492 }
6493
6494e_free:
6495 if (tpage)
6496 __free_page(tpage);
6497
6498 return ret;
6499}
6500
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006501static int __sev_dbg_encrypt_user(struct kvm *kvm, unsigned long paddr,
6502 unsigned long __user vaddr,
6503 unsigned long dst_paddr,
6504 unsigned long __user dst_vaddr,
6505 int size, int *error)
6506{
6507 struct page *src_tpage = NULL;
6508 struct page *dst_tpage = NULL;
6509 int ret, len = size;
6510
6511 /* If source buffer is not aligned then use an intermediate buffer */
6512 if (!IS_ALIGNED(vaddr, 16)) {
6513 src_tpage = alloc_page(GFP_KERNEL);
6514 if (!src_tpage)
6515 return -ENOMEM;
6516
6517 if (copy_from_user(page_address(src_tpage),
6518 (void __user *)(uintptr_t)vaddr, size)) {
6519 __free_page(src_tpage);
6520 return -EFAULT;
6521 }
6522
6523 paddr = __sme_page_pa(src_tpage);
6524 }
6525
6526 /*
6527 * If destination buffer or length is not aligned then do read-modify-write:
6528 * - decrypt destination in an intermediate buffer
6529 * - copy the source buffer in an intermediate buffer
6530 * - use the intermediate buffer as source buffer
6531 */
6532 if (!IS_ALIGNED(dst_vaddr, 16) || !IS_ALIGNED(size, 16)) {
6533 int dst_offset;
6534
6535 dst_tpage = alloc_page(GFP_KERNEL);
6536 if (!dst_tpage) {
6537 ret = -ENOMEM;
6538 goto e_free;
6539 }
6540
6541 ret = __sev_dbg_decrypt(kvm, dst_paddr,
6542 __sme_page_pa(dst_tpage), size, error);
6543 if (ret)
6544 goto e_free;
6545
6546 /*
6547 * If source is kernel buffer then use memcpy() otherwise
6548 * copy_from_user().
6549 */
6550 dst_offset = dst_paddr & 15;
6551
6552 if (src_tpage)
6553 memcpy(page_address(dst_tpage) + dst_offset,
6554 page_address(src_tpage), size);
6555 else {
6556 if (copy_from_user(page_address(dst_tpage) + dst_offset,
6557 (void __user *)(uintptr_t)vaddr, size)) {
6558 ret = -EFAULT;
6559 goto e_free;
6560 }
6561 }
6562
6563 paddr = __sme_page_pa(dst_tpage);
6564 dst_paddr = round_down(dst_paddr, 16);
6565 len = round_up(size, 16);
6566 }
6567
6568 ret = __sev_issue_dbg_cmd(kvm, paddr, dst_paddr, len, error, true);
6569
6570e_free:
6571 if (src_tpage)
6572 __free_page(src_tpage);
6573 if (dst_tpage)
6574 __free_page(dst_tpage);
6575 return ret;
6576}
6577
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006578static int sev_dbg_crypt(struct kvm *kvm, struct kvm_sev_cmd *argp, bool dec)
6579{
6580 unsigned long vaddr, vaddr_end, next_vaddr;
6581 unsigned long dst_vaddr, dst_vaddr_end;
6582 struct page **src_p, **dst_p;
6583 struct kvm_sev_dbg debug;
6584 unsigned long n;
6585 int ret, size;
6586
6587 if (!sev_guest(kvm))
6588 return -ENOTTY;
6589
6590 if (copy_from_user(&debug, (void __user *)(uintptr_t)argp->data, sizeof(debug)))
6591 return -EFAULT;
6592
6593 vaddr = debug.src_uaddr;
6594 size = debug.len;
6595 vaddr_end = vaddr + size;
6596 dst_vaddr = debug.dst_uaddr;
6597 dst_vaddr_end = dst_vaddr + size;
6598
6599 for (; vaddr < vaddr_end; vaddr = next_vaddr) {
6600 int len, s_off, d_off;
6601
6602 /* lock userspace source and destination page */
6603 src_p = sev_pin_memory(kvm, vaddr & PAGE_MASK, PAGE_SIZE, &n, 0);
6604 if (!src_p)
6605 return -EFAULT;
6606
6607 dst_p = sev_pin_memory(kvm, dst_vaddr & PAGE_MASK, PAGE_SIZE, &n, 1);
6608 if (!dst_p) {
6609 sev_unpin_memory(kvm, src_p, n);
6610 return -EFAULT;
6611 }
6612
6613 /*
6614 * The DBG_{DE,EN}CRYPT commands will perform {dec,en}cryption of the
6615 * memory content (i.e it will write the same memory region with C=1).
6616 * It's possible that the cache may contain the data with C=0, i.e.,
6617 * unencrypted so invalidate it first.
6618 */
6619 sev_clflush_pages(src_p, 1);
6620 sev_clflush_pages(dst_p, 1);
6621
6622 /*
6623 * Since user buffer may not be page aligned, calculate the
6624 * offset within the page.
6625 */
6626 s_off = vaddr & ~PAGE_MASK;
6627 d_off = dst_vaddr & ~PAGE_MASK;
6628 len = min_t(size_t, (PAGE_SIZE - s_off), size);
6629
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006630 if (dec)
6631 ret = __sev_dbg_decrypt_user(kvm,
6632 __sme_page_pa(src_p[0]) + s_off,
6633 dst_vaddr,
6634 __sme_page_pa(dst_p[0]) + d_off,
6635 len, &argp->error);
6636 else
6637 ret = __sev_dbg_encrypt_user(kvm,
6638 __sme_page_pa(src_p[0]) + s_off,
6639 vaddr,
6640 __sme_page_pa(dst_p[0]) + d_off,
6641 dst_vaddr,
6642 len, &argp->error);
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006643
6644 sev_unpin_memory(kvm, src_p, 1);
6645 sev_unpin_memory(kvm, dst_p, 1);
6646
6647 if (ret)
6648 goto err;
6649
6650 next_vaddr = vaddr + len;
6651 dst_vaddr = dst_vaddr + len;
6652 size -= len;
6653 }
6654err:
6655 return ret;
6656}
6657
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006658static int sev_launch_secret(struct kvm *kvm, struct kvm_sev_cmd *argp)
6659{
6660 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6661 struct sev_data_launch_secret *data;
6662 struct kvm_sev_launch_secret params;
6663 struct page **pages;
6664 void *blob, *hdr;
6665 unsigned long n;
Brijesh Singh9c5e0af2018-02-19 10:13:25 -06006666 int ret, offset;
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006667
6668 if (!sev_guest(kvm))
6669 return -ENOTTY;
6670
6671 if (copy_from_user(&params, (void __user *)(uintptr_t)argp->data, sizeof(params)))
6672 return -EFAULT;
6673
6674 pages = sev_pin_memory(kvm, params.guest_uaddr, params.guest_len, &n, 1);
6675 if (!pages)
6676 return -ENOMEM;
6677
6678 /*
6679 * The secret must be copied into contiguous memory region, lets verify
6680 * that userspace memory pages are contiguous before we issue command.
6681 */
6682 if (get_num_contig_pages(0, pages, n) != n) {
6683 ret = -EINVAL;
6684 goto e_unpin_memory;
6685 }
6686
6687 ret = -ENOMEM;
6688 data = kzalloc(sizeof(*data), GFP_KERNEL);
6689 if (!data)
6690 goto e_unpin_memory;
6691
Brijesh Singh9c5e0af2018-02-19 10:13:25 -06006692 offset = params.guest_uaddr & (PAGE_SIZE - 1);
6693 data->guest_address = __sme_page_pa(pages[0]) + offset;
6694 data->guest_len = params.guest_len;
6695
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006696 blob = psp_copy_user_blob(params.trans_uaddr, params.trans_len);
6697 if (IS_ERR(blob)) {
6698 ret = PTR_ERR(blob);
6699 goto e_free;
6700 }
6701
6702 data->trans_address = __psp_pa(blob);
6703 data->trans_len = params.trans_len;
6704
6705 hdr = psp_copy_user_blob(params.hdr_uaddr, params.hdr_len);
6706 if (IS_ERR(hdr)) {
6707 ret = PTR_ERR(hdr);
6708 goto e_free_blob;
6709 }
Brijesh Singh9c5e0af2018-02-19 10:13:25 -06006710 data->hdr_address = __psp_pa(hdr);
6711 data->hdr_len = params.hdr_len;
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006712
6713 data->handle = sev->handle;
6714 ret = sev_issue_cmd(kvm, SEV_CMD_LAUNCH_UPDATE_SECRET, data, &argp->error);
6715
6716 kfree(hdr);
6717
6718e_free_blob:
6719 kfree(blob);
6720e_free:
6721 kfree(data);
6722e_unpin_memory:
6723 sev_unpin_memory(kvm, pages, n);
6724 return ret;
6725}
6726
Brijesh Singh1654efc2017-12-04 10:57:34 -06006727static int svm_mem_enc_op(struct kvm *kvm, void __user *argp)
6728{
6729 struct kvm_sev_cmd sev_cmd;
6730 int r;
6731
6732 if (!svm_sev_enabled())
6733 return -ENOTTY;
6734
6735 if (copy_from_user(&sev_cmd, argp, sizeof(struct kvm_sev_cmd)))
6736 return -EFAULT;
6737
6738 mutex_lock(&kvm->lock);
6739
6740 switch (sev_cmd.id) {
6741 case KVM_SEV_INIT:
6742 r = sev_guest_init(kvm, &sev_cmd);
6743 break;
Brijesh Singh59414c92017-12-04 10:57:35 -06006744 case KVM_SEV_LAUNCH_START:
6745 r = sev_launch_start(kvm, &sev_cmd);
6746 break;
Brijesh Singh89c50582017-12-04 10:57:35 -06006747 case KVM_SEV_LAUNCH_UPDATE_DATA:
6748 r = sev_launch_update_data(kvm, &sev_cmd);
6749 break;
Brijesh Singh0d0736f2017-12-04 10:57:36 -06006750 case KVM_SEV_LAUNCH_MEASURE:
6751 r = sev_launch_measure(kvm, &sev_cmd);
6752 break;
Brijesh Singh5bdb0e22017-12-04 10:57:36 -06006753 case KVM_SEV_LAUNCH_FINISH:
6754 r = sev_launch_finish(kvm, &sev_cmd);
6755 break;
Brijesh Singh255d9e72017-12-04 10:57:37 -06006756 case KVM_SEV_GUEST_STATUS:
6757 r = sev_guest_status(kvm, &sev_cmd);
6758 break;
Brijesh Singh24f41fb2017-12-04 10:57:37 -06006759 case KVM_SEV_DBG_DECRYPT:
6760 r = sev_dbg_crypt(kvm, &sev_cmd, true);
6761 break;
Brijesh Singh7d1594f2017-12-04 10:57:37 -06006762 case KVM_SEV_DBG_ENCRYPT:
6763 r = sev_dbg_crypt(kvm, &sev_cmd, false);
6764 break;
Brijesh Singh9f5b5b92017-12-04 10:57:38 -06006765 case KVM_SEV_LAUNCH_SECRET:
6766 r = sev_launch_secret(kvm, &sev_cmd);
6767 break;
Brijesh Singh1654efc2017-12-04 10:57:34 -06006768 default:
6769 r = -EINVAL;
6770 goto out;
6771 }
6772
6773 if (copy_to_user(argp, &sev_cmd, sizeof(struct kvm_sev_cmd)))
6774 r = -EFAULT;
6775
6776out:
6777 mutex_unlock(&kvm->lock);
6778 return r;
6779}
6780
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006781static int svm_register_enc_region(struct kvm *kvm,
6782 struct kvm_enc_region *range)
6783{
6784 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6785 struct enc_region *region;
6786 int ret = 0;
6787
6788 if (!sev_guest(kvm))
6789 return -ENOTTY;
6790
6791 region = kzalloc(sizeof(*region), GFP_KERNEL);
6792 if (!region)
6793 return -ENOMEM;
6794
6795 region->pages = sev_pin_memory(kvm, range->addr, range->size, &region->npages, 1);
6796 if (!region->pages) {
6797 ret = -ENOMEM;
6798 goto e_free;
6799 }
6800
6801 /*
6802 * The guest may change the memory encryption attribute from C=0 -> C=1
6803 * or vice versa for this memory range. Lets make sure caches are
6804 * flushed to ensure that guest data gets written into memory with
6805 * correct C-bit.
6806 */
6807 sev_clflush_pages(region->pages, region->npages);
6808
6809 region->uaddr = range->addr;
6810 region->size = range->size;
6811
6812 mutex_lock(&kvm->lock);
6813 list_add_tail(&region->list, &sev->regions_list);
6814 mutex_unlock(&kvm->lock);
6815
6816 return ret;
6817
6818e_free:
6819 kfree(region);
6820 return ret;
6821}
6822
6823static struct enc_region *
6824find_enc_region(struct kvm *kvm, struct kvm_enc_region *range)
6825{
6826 struct kvm_sev_info *sev = &kvm->arch.sev_info;
6827 struct list_head *head = &sev->regions_list;
6828 struct enc_region *i;
6829
6830 list_for_each_entry(i, head, list) {
6831 if (i->uaddr == range->addr &&
6832 i->size == range->size)
6833 return i;
6834 }
6835
6836 return NULL;
6837}
6838
6839
6840static int svm_unregister_enc_region(struct kvm *kvm,
6841 struct kvm_enc_region *range)
6842{
6843 struct enc_region *region;
6844 int ret;
6845
6846 mutex_lock(&kvm->lock);
6847
6848 if (!sev_guest(kvm)) {
6849 ret = -ENOTTY;
6850 goto failed;
6851 }
6852
6853 region = find_enc_region(kvm, range);
6854 if (!region) {
6855 ret = -EINVAL;
6856 goto failed;
6857 }
6858
6859 __unregister_enc_region_locked(kvm, region);
6860
6861 mutex_unlock(&kvm->lock);
6862 return 0;
6863
6864failed:
6865 mutex_unlock(&kvm->lock);
6866 return ret;
6867}
6868
Kees Cook404f6aa2016-08-08 16:29:06 -07006869static struct kvm_x86_ops svm_x86_ops __ro_after_init = {
Avi Kivity6aa8b732006-12-10 02:21:36 -08006870 .cpu_has_kvm_support = has_svm,
6871 .disabled_by_bios = is_disabled,
6872 .hardware_setup = svm_hardware_setup,
6873 .hardware_unsetup = svm_hardware_unsetup,
Yang, Sheng002c7f72007-07-31 14:23:01 +03006874 .check_processor_compatibility = svm_check_processor_compat,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006875 .hardware_enable = svm_hardware_enable,
6876 .hardware_disable = svm_hardware_disable,
Avi Kivity774ead32007-12-26 13:57:04 +02006877 .cpu_has_accelerated_tpr = svm_cpu_has_accelerated_tpr,
Paolo Bonzini6d396b52015-04-01 14:25:33 +02006878 .cpu_has_high_real_mode_segbase = svm_has_high_real_mode_segbase,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006879
6880 .vcpu_create = svm_create_vcpu,
6881 .vcpu_free = svm_free_vcpu,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006882 .vcpu_reset = svm_vcpu_reset,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006883
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006884 .vm_init = avic_vm_init,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006885 .vm_destroy = svm_vm_destroy,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006886
Avi Kivity04d2cc72007-09-10 18:10:54 +03006887 .prepare_guest_switch = svm_prepare_guest_switch,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006888 .vcpu_load = svm_vcpu_load,
6889 .vcpu_put = svm_vcpu_put,
Suravee Suthikulpanit8221c132016-05-04 14:09:52 -05006890 .vcpu_blocking = svm_vcpu_blocking,
6891 .vcpu_unblocking = svm_vcpu_unblocking,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006892
Paolo Bonzinia96036b2015-11-10 11:55:36 +01006893 .update_bp_intercept = update_bp_intercept,
Tom Lendacky801e4592018-02-21 13:39:51 -06006894 .get_msr_feature = svm_get_msr_feature,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006895 .get_msr = svm_get_msr,
6896 .set_msr = svm_set_msr,
6897 .get_segment_base = svm_get_segment_base,
6898 .get_segment = svm_get_segment,
6899 .set_segment = svm_set_segment,
Izik Eidus2e4d2652008-03-24 19:38:34 +02006900 .get_cpl = svm_get_cpl,
Rusty Russell1747fb72007-09-06 01:21:32 +10006901 .get_cs_db_l_bits = kvm_get_cs_db_l_bits,
Avi Kivitye8467fd2009-12-29 18:43:06 +02006902 .decache_cr0_guest_bits = svm_decache_cr0_guest_bits,
Avi Kivityaff48ba2010-12-05 18:56:11 +02006903 .decache_cr3 = svm_decache_cr3,
Anthony Liguori25c4c272007-04-27 09:29:21 +03006904 .decache_cr4_guest_bits = svm_decache_cr4_guest_bits,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006905 .set_cr0 = svm_set_cr0,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006906 .set_cr3 = svm_set_cr3,
6907 .set_cr4 = svm_set_cr4,
6908 .set_efer = svm_set_efer,
6909 .get_idt = svm_get_idt,
6910 .set_idt = svm_set_idt,
6911 .get_gdt = svm_get_gdt,
6912 .set_gdt = svm_set_gdt,
Jan Kiszka73aaf249e2014-01-04 18:47:16 +01006913 .get_dr6 = svm_get_dr6,
6914 .set_dr6 = svm_set_dr6,
Gleb Natapov020df072010-04-13 10:05:23 +03006915 .set_dr7 = svm_set_dr7,
Paolo Bonzinifacb0132014-02-21 10:32:27 +01006916 .sync_dirty_debug_regs = svm_sync_dirty_debug_regs,
Avi Kivity6de4f3a2009-05-31 22:58:47 +03006917 .cache_reg = svm_cache_reg,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006918 .get_rflags = svm_get_rflags,
6919 .set_rflags = svm_set_rflags,
Huaitong Hanbe94f6b2016-03-22 16:51:20 +08006920
Avi Kivity6aa8b732006-12-10 02:21:36 -08006921 .tlb_flush = svm_flush_tlb,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006922
Avi Kivity6aa8b732006-12-10 02:21:36 -08006923 .run = svm_vcpu_run,
Avi Kivity04d2cc72007-09-10 18:10:54 +03006924 .handle_exit = handle_exit,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006925 .skip_emulated_instruction = skip_emulated_instruction,
Glauber Costa2809f5d2009-05-12 16:21:05 -04006926 .set_interrupt_shadow = svm_set_interrupt_shadow,
6927 .get_interrupt_shadow = svm_get_interrupt_shadow,
Ingo Molnar102d8322007-02-19 14:37:47 +02006928 .patch_hypercall = svm_patch_hypercall,
Eddie Dong2a8067f2007-08-06 16:29:07 +03006929 .set_irq = svm_set_irq,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006930 .set_nmi = svm_inject_nmi,
Avi Kivity298101d2007-11-25 13:41:11 +02006931 .queue_exception = svm_queue_exception,
Avi Kivityb463a6f2010-07-20 15:06:17 +03006932 .cancel_injection = svm_cancel_injection,
Gleb Natapov78646122009-03-23 12:12:11 +02006933 .interrupt_allowed = svm_interrupt_allowed,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006934 .nmi_allowed = svm_nmi_allowed,
Jan Kiszka3cfc3092009-11-12 01:04:25 +01006935 .get_nmi_mask = svm_get_nmi_mask,
6936 .set_nmi_mask = svm_set_nmi_mask,
Gleb Natapov95ba8273132009-04-21 17:45:08 +03006937 .enable_nmi_window = enable_nmi_window,
6938 .enable_irq_window = enable_irq_window,
6939 .update_cr8_intercept = update_cr8_intercept,
Yang Zhang8d146952013-01-25 10:18:50 +08006940 .set_virtual_x2apic_mode = svm_set_virtual_x2apic_mode,
Andrey Smetanind62caab2015-11-10 15:36:33 +03006941 .get_enable_apicv = svm_get_enable_apicv,
6942 .refresh_apicv_exec_ctrl = svm_refresh_apicv_exec_ctrl,
Yang Zhangc7c9c562013-01-25 10:18:51 +08006943 .load_eoi_exitmap = svm_load_eoi_exitmap,
Suravee Suthikulpanit44a95da2016-05-04 14:09:46 -05006944 .hwapic_irr_update = svm_hwapic_irr_update,
6945 .hwapic_isr_update = svm_hwapic_isr_update,
Liran Alonfa59cc02017-12-24 18:12:53 +02006946 .sync_pir_to_irr = kvm_lapic_find_highest_irr,
Suravee Suthikulpanitbe8ca172016-05-04 14:09:49 -05006947 .apicv_post_state_restore = avic_post_state_restore,
Izik Eiduscbc94022007-10-25 00:29:55 +02006948
6949 .set_tss_addr = svm_set_tss_addr,
Sheng Yang67253af2008-04-25 10:20:22 +08006950 .get_tdp_level = get_npt_level,
Sheng Yang4b12f0d2009-04-27 20:35:42 +08006951 .get_mt_mask = svm_get_mt_mask,
Marcelo Tosatti229456f2009-06-17 09:22:14 -03006952
Avi Kivity586f9602010-11-18 13:09:54 +02006953 .get_exit_info = svm_get_exit_info,
Avi Kivity586f9602010-11-18 13:09:54 +02006954
Sheng Yang17cc3932010-01-05 19:02:27 +08006955 .get_lpage_level = svm_get_lpage_level,
Sheng Yang0e851882009-12-18 16:48:46 +08006956
6957 .cpuid_update = svm_cpuid_update,
Sheng Yang4e47c7a2009-12-18 16:48:47 +08006958
6959 .rdtscp_supported = svm_rdtscp_supported,
Mao, Junjiead756a12012-07-02 01:18:48 +00006960 .invpcid_supported = svm_invpcid_supported,
Paolo Bonzini93c4adc2014-03-05 23:19:52 +01006961 .mpx_supported = svm_mpx_supported,
Wanpeng Li55412b22014-12-02 19:21:30 +08006962 .xsaves_supported = svm_xsaves_supported,
Paolo Bonzini66336ca2016-07-12 10:36:41 +02006963 .umip_emulated = svm_umip_emulated,
Joerg Roedeld4330ef2010-04-22 12:33:11 +02006964
6965 .set_supported_cpuid = svm_set_supported_cpuid,
Sheng Yangf5f48ee2010-06-30 12:25:15 +08006966
6967 .has_wbinvd_exit = svm_has_wbinvd_exit,
Zachary Amsden99e3e302010-08-19 22:07:17 -10006968
6969 .write_tsc_offset = svm_write_tsc_offset,
Joerg Roedel1c97f0a2010-09-10 17:30:41 +02006970
6971 .set_tdp_cr3 = set_tdp_cr3,
Joerg Roedel8a76d7f2011-04-04 12:39:27 +02006972
6973 .check_intercept = svm_check_intercept,
Yang Zhanga547c6d2013-04-11 19:25:10 +08006974 .handle_external_intr = svm_handle_external_intr,
Radim Krčmářae97a3b2014-08-21 18:08:06 +02006975
6976 .sched_in = svm_sched_in,
Wei Huang25462f72015-06-19 15:45:05 +02006977
6978 .pmu_ops = &amd_pmu_ops,
Suravee Suthikulpanit340d3bc2016-05-04 14:09:47 -05006979 .deliver_posted_interrupt = svm_deliver_avic_intr,
Suravee Suthikulpanit411b44b2016-08-23 13:52:43 -05006980 .update_pi_irte = svm_update_pi_irte,
Borislav Petkov74f16902017-03-26 23:51:24 +02006981 .setup_mce = svm_setup_mce,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006982
Ladi Prosek72d7b372017-10-11 16:54:41 +02006983 .smi_allowed = svm_smi_allowed,
Ladi Prosek0234bf82017-10-11 16:54:40 +02006984 .pre_enter_smm = svm_pre_enter_smm,
6985 .pre_leave_smm = svm_pre_leave_smm,
Ladi Prosekcc3d9672017-10-17 16:02:39 +02006986 .enable_smi_window = enable_smi_window,
Brijesh Singh1654efc2017-12-04 10:57:34 -06006987
6988 .mem_enc_op = svm_mem_enc_op,
Brijesh Singh1e80fdc2017-12-04 10:57:38 -06006989 .mem_enc_reg_region = svm_register_enc_region,
6990 .mem_enc_unreg_region = svm_unregister_enc_region,
Avi Kivity6aa8b732006-12-10 02:21:36 -08006991};
6992
6993static int __init svm_init(void)
6994{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08006995 return kvm_init(&svm_x86_ops, sizeof(struct vcpu_svm),
Avi Kivity0ee75be2010-04-28 15:39:01 +03006996 __alignof__(struct vcpu_svm), THIS_MODULE);
Avi Kivity6aa8b732006-12-10 02:21:36 -08006997}
6998
6999static void __exit svm_exit(void)
7000{
Zhang Xiantaocb498ea2007-11-14 20:39:31 +08007001 kvm_exit();
Avi Kivity6aa8b732006-12-10 02:21:36 -08007002}
7003
7004module_init(svm_init)
7005module_exit(svm_exit)